From 5a935501e2adf78e5b6470cba256d21f852c5230 Mon Sep 17 00:00:00 2001
From: markwallace-microsoft
<127216156+markwallace-microsoft@users.noreply.github.com>
Date: Wed, 19 Jun 2024 16:06:58 +0100
Subject: [PATCH 1/7] Adding reasoning to function calling samples
---
.../OpenAI_ReasonedFunctionCalling.cs | 63 +++++++++++++++++++
.../samples/InternalUtilities/BaseTest.cs | 11 ++--
2 files changed, 70 insertions(+), 4 deletions(-)
create mode 100644 dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
new file mode 100644
index 000000000000..48001a17d2e0
--- /dev/null
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
@@ -0,0 +1,63 @@
+// Copyright (c) Microsoft. All rights reserved.
+
+using System.ComponentModel;
+using Microsoft.SemanticKernel;
+using Microsoft.SemanticKernel.ChatCompletion;
+using Microsoft.SemanticKernel.Connectors.OpenAI;
+
+namespace ChatCompletion;
+
+///
+///
+///
+public sealed class OpenAI_ReasonedFunctionCalling(ITestOutputHelper output) : BaseTest(output)
+{
+ [Fact]
+ public async Task AskingAssistantToExplainFunctionCallsAsync()
+ {
+ // Create a kernel with MistralAI chat completion and WeatherPlugin
+ Kernel kernel = CreateKernelWithPlugin();
+ var service = kernel.GetRequiredService();
+
+ // Invoke chat prompt with auto invocation of functions enabled
+ var chatHistory = new ChatHistory
+ {
+ new ChatMessageContent(AuthorRole.System, "Always include a description explaining why you want a function to be called when you request a function be called."),
+ new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
+ };
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
+ var result1 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result1);
+ Console.WriteLine(result1);
+
+ chatHistory.Add(new ChatMessageContent(AuthorRole.User, "Explain why you called those functions?"));
+ var result2 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ Console.WriteLine(result2);
+ }
+
+ public sealed class WeatherPlugin
+ {
+ [KernelFunction]
+ [Description("Get the current weather in a given location.")]
+ public string GetWeather(
+ [Description("The city and department, e.g. Marseille, 13")] string location
+ ) => $"12°C\nWind: 11 KMPH\nHumidity: 48%\nMostly cloudy\nLocation: {location}";
+ }
+
+ private Kernel CreateKernelWithPlugin()
+ {
+ // Create a logging handler to output HTTP requests and responses
+ var handler = new LoggingHandler(new HttpClientHandler(), this.Output);
+ HttpClient httpClient = new(handler);
+
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
+ IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
+ kernelBuilder.AddOpenAIChatCompletion(
+ modelId: TestConfiguration.OpenAI.ChatModelId!,
+ apiKey: TestConfiguration.OpenAI.ApiKey!,
+ httpClient: httpClient);
+ kernelBuilder.Plugins.AddFromType();
+ Kernel kernel = kernelBuilder.Build();
+ return kernel;
+ }
+}
diff --git a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
index 8e65d7dcd88a..f1d84d0eb22b 100644
--- a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
+++ b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
@@ -1,5 +1,6 @@
// Copyright (c) Microsoft. All rights reserved.
using System.Reflection;
+using System.Text.Json;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using Microsoft.SemanticKernel;
@@ -102,6 +103,8 @@ public void Write(object? target = null)
protected sealed class LoggingHandler(HttpMessageHandler innerHandler, ITestOutputHelper output) : DelegatingHandler(innerHandler)
{
+ private static readonly JsonSerializerOptions s_jsonSerializerOptions = new() { WriteIndented = true };
+
private readonly ITestOutputHelper _output = output;
protected override async Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken)
@@ -110,7 +113,9 @@ protected override async Task SendAsync(HttpRequestMessage
if (request.Content is not null)
{
var content = await request.Content.ReadAsStringAsync(cancellationToken);
- this._output.WriteLine(content);
+ string formattedContent = JsonSerializer.Serialize(JsonSerializer.Deserialize(content), s_jsonSerializerOptions);
+ this._output.WriteLine(formattedContent);
+ this._output.WriteLine(string.Empty);
}
// Call the next handler in the pipeline
@@ -121,11 +126,9 @@ protected override async Task SendAsync(HttpRequestMessage
// Log the response details
var responseContent = await response.Content.ReadAsStringAsync(cancellationToken);
this._output.WriteLine(responseContent);
+ this._output.WriteLine(string.Empty);
}
- // Log the response details
- this._output.WriteLine("");
-
return response;
}
}
From 455626df5864074af805bf39b80d6e8976bdd1eb Mon Sep 17 00:00:00 2001
From: markwallace-microsoft
<127216156+markwallace-microsoft@users.noreply.github.com>
Date: Mon, 24 Jun 2024 11:30:54 +0100
Subject: [PATCH 2/7] Try a few different techniques and compare outcomes
---
.../OpenAI_ReasonedFunctionCalling.cs | 141 +++++++++++++++++-
.../samples/InternalUtilities/BaseTest.cs | 2 +
2 files changed, 140 insertions(+), 3 deletions(-)
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
index 48001a17d2e0..28d5d8506a1f 100644
--- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
@@ -8,12 +8,15 @@
namespace ChatCompletion;
///
-///
+/// Samples showing how to get the LLM to provide the reason is using function calling.
///
public sealed class OpenAI_ReasonedFunctionCalling(ITestOutputHelper output) : BaseTest(output)
{
+ ///
+ /// Using the system prompt to explain function calls doesn't work work with gpt-4o.
+ ///
[Fact]
- public async Task AskingAssistantToExplainFunctionCallsAsync()
+ public async Task UseSystemPromptToExplainFunctionCallsAsync()
{
// Create a kernel with MistralAI chat completion and WeatherPlugin
Kernel kernel = CreateKernelWithPlugin();
@@ -26,6 +29,27 @@ public async Task AskingAssistantToExplainFunctionCallsAsync()
new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
};
var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
+ var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result);
+ Console.WriteLine(result);
+ }
+
+ ///
+ /// Asking the model to explain function calls after execution works well but may be too late depending on your use case.
+ ///
+ [Fact]
+ public async Task AskAssistantToExplainFunctionCallsAfterExecutionAsync()
+ {
+ // Create a kernel with MistralAI chat completion and WeatherPlugin
+ Kernel kernel = CreateKernelWithPlugin();
+ var service = kernel.GetRequiredService();
+
+ // Invoke chat prompt with auto invocation of functions enabled
+ var chatHistory = new ChatHistory
+ {
+ new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
+ };
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
var result1 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
chatHistory.Add(result1);
Console.WriteLine(result1);
@@ -35,7 +59,118 @@ public async Task AskingAssistantToExplainFunctionCallsAsync()
Console.WriteLine(result2);
}
- public sealed class WeatherPlugin
+ ///
+ /// Asking the model to explain function calls in response to each function call can work but the model may also
+ /// get confused and treat the request to explain the function calls as an error response from the function calls.
+ ///
+ [Fact]
+ public async Task AskAssistantToExplainFunctionCallsBeforeExecutionAsync()
+ {
+ // Create a kernel with MistralAI chat completion and WeatherPlugin
+ Kernel kernel = CreateKernelWithPlugin();
+ kernel.AutoFunctionInvocationFilters.Add(new RespondExplainFunctionInvocationFilter());
+ var service = kernel.GetRequiredService();
+
+ // Invoke chat prompt with auto invocation of functions enabled
+ var chatHistory = new ChatHistory
+ {
+ new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
+ };
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
+ var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result);
+ Console.WriteLine(result);
+ }
+
+ ///
+ /// Asking to the model to explain function calls using a separate conversation i.e. chat history seems to provide the
+ /// best results. This may be because the model can focus on explaining the function calls without being confused by other
+ /// messages in the chat history.
+ ///
+ [Fact]
+ public async Task QueryAssistantToExplainFunctionCallsBeforeExecutionAsync()
+ {
+ // Create a kernel with MistralAI chat completion and WeatherPlugin
+ Kernel kernel = CreateKernelWithPlugin();
+ kernel.AutoFunctionInvocationFilters.Add(new QueryExplainFunctionInvocationFilter(this.Output));
+ var service = kernel.GetRequiredService();
+
+ // Invoke chat prompt with auto invocation of functions enabled
+ var chatHistory = new ChatHistory
+ {
+ new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
+ };
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
+ var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result);
+ Console.WriteLine(result);
+ }
+
+ ///
+ /// This will respond to function call requests and ask the model to explain why it is
+ /// calling the function(s). It is only suitable for transient use because it stores information about the functions that have been
+ /// called for a single chat history.
+ ///
+ private sealed class RespondExplainFunctionInvocationFilter : IAutoFunctionInvocationFilter
+ {
+ private readonly HashSet _functionNames = [];
+
+ public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext context, Func next)
+ {
+ // Get the function calls for which we need an explanation
+ var functionCalls = FunctionCallContent.GetFunctionCalls(context.ChatHistory.Last());
+ var needExplanation = 0;
+ foreach (var functionCall in functionCalls)
+ {
+ var functionName = $"{functionCall.PluginName}-{functionCall.FunctionName}";
+ if (_functionNames.Add(functionName))
+ {
+ needExplanation++;
+ }
+ }
+
+ if (needExplanation > 0)
+ {
+ // Create a response asking why these functions are being called
+ context.Result = new FunctionResult(context.Result, $"Provide an explanation why you are calling function {string.Join(',', _functionNames)} and try again");
+ return;
+ }
+
+ // Invoke the functions
+ await next(context);
+ }
+ }
+
+ ///
+ /// This uses the currently available to query the model
+ /// to find out what certain functions are being called.
+ ///
+ private sealed class QueryExplainFunctionInvocationFilter(ITestOutputHelper output) : IAutoFunctionInvocationFilter
+ {
+ private readonly ITestOutputHelper _output = output;
+
+ public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext context, Func next)
+ {
+ // Invoke the model to explain why the functions are being called
+ var message = context.ChatHistory[^2];
+ var functionCalls = FunctionCallContent.GetFunctionCalls(context.ChatHistory.Last());
+ var functionNames = functionCalls.Select(fc => $"{fc.PluginName}-{fc.FunctionName}").ToList();
+ var service = context.Kernel.GetRequiredService();
+
+ var chatHistory = new ChatHistory
+ {
+ new ChatMessageContent(AuthorRole.User, $"Provide an explanation why these functions: {string.Join(',', functionNames)} need to be called to answer this query: {message.Content}")
+ };
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions };
+ var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, context.Kernel);
+ this._output.WriteLine(result);
+
+ // Invoke the functions
+ await next(context);
+ }
+ }
+
+ private sealed class WeatherPlugin
{
[KernelFunction]
[Description("Get the current weather in a given location.")]
diff --git a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
index f1d84d0eb22b..c846fc87c463 100644
--- a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
+++ b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
@@ -114,6 +114,7 @@ protected override async Task SendAsync(HttpRequestMessage
{
var content = await request.Content.ReadAsStringAsync(cancellationToken);
string formattedContent = JsonSerializer.Serialize(JsonSerializer.Deserialize(content), s_jsonSerializerOptions);
+ this._output.WriteLine("=== REQUEST ===");
this._output.WriteLine(formattedContent);
this._output.WriteLine(string.Empty);
}
@@ -125,6 +126,7 @@ protected override async Task SendAsync(HttpRequestMessage
{
// Log the response details
var responseContent = await response.Content.ReadAsStringAsync(cancellationToken);
+ this._output.WriteLine("=== RESPONSE ===");
this._output.WriteLine(responseContent);
this._output.WriteLine(string.Empty);
}
From ffb89d7db63bcf9d580f9e797c7f289e75ecd53e Mon Sep 17 00:00:00 2001
From: markwallace-microsoft
<127216156+markwallace-microsoft@users.noreply.github.com>
Date: Mon, 24 Jun 2024 15:32:09 +0100
Subject: [PATCH 3/7] Decorated function example
---
.../OpenAI_ReasonedFunctionCalling.cs | 35 ++++++++++++++++++-
1 file changed, 34 insertions(+), 1 deletion(-)
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
index 28d5d8506a1f..adcd9e837111 100644
--- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
@@ -25,7 +25,7 @@ public async Task UseSystemPromptToExplainFunctionCallsAsync()
// Invoke chat prompt with auto invocation of functions enabled
var chatHistory = new ChatHistory
{
- new ChatMessageContent(AuthorRole.System, "Always include a description explaining why you want a function to be called when you request a function be called."),
+ new ChatMessageContent(AuthorRole.System, "Always explain why function tool calls are being used."),
new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
};
var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
@@ -59,6 +59,27 @@ public async Task AskAssistantToExplainFunctionCallsAfterExecutionAsync()
Console.WriteLine(result2);
}
+ ///
+ /// Decorate each function to be called with an extra parameter which includes the reason this function needs to be called.
+ ///
+ [Fact]
+ public async Task UseDecoratedFunctionAsync()
+ {
+ // Create a kernel with MistralAI chat completion and WeatherPlugin
+ Kernel kernel = CreateKernelWithPlugin();
+ var service = kernel.GetRequiredService();
+
+ // Invoke chat prompt with auto invocation of functions enabled
+ var chatHistory = new ChatHistory
+ {
+ new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
+ };
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
+ var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result);
+ Console.WriteLine(result);
+ }
+
///
/// Asking the model to explain function calls in response to each function call can work but the model may also
/// get confused and treat the request to explain the function calls as an error response from the function calls.
@@ -179,6 +200,18 @@ public string GetWeather(
) => $"12°C\nWind: 11 KMPH\nHumidity: 48%\nMostly cloudy\nLocation: {location}";
}
+ private sealed class DecoratedWeatherPlugin
+ {
+ private readonly WeatherPlugin _weatherPlugin = new();
+
+ [KernelFunction]
+ [Description("Get the current weather in a given location.")]
+ public string GetWeather(
+ [Description("A detailed explanation why this function is being called")] string explanation,
+ [Description("The city and department, e.g. Marseille, 13")] string location
+ ) => this._weatherPlugin.GetWeather(location);
+ }
+
private Kernel CreateKernelWithPlugin()
{
// Create a logging handler to output HTTP requests and responses
From cbfe2a1e870c9fba02721828b762a044fb8d0f46 Mon Sep 17 00:00:00 2001
From: markwallace-microsoft
<127216156+markwallace-microsoft@users.noreply.github.com>
Date: Tue, 25 Jun 2024 10:39:52 +0100
Subject: [PATCH 4/7] Address code review feedback
---
.../OpenAI_ReasonedFunctionCalling.cs | 69 +++++++++++--------
dotnet/samples/Concepts/README.md | 3 +-
.../samples/InternalUtilities/BaseTest.cs | 11 ++-
3 files changed, 51 insertions(+), 32 deletions(-)
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
index adcd9e837111..2039f093099d 100644
--- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
@@ -8,39 +8,21 @@
namespace ChatCompletion;
///
-/// Samples showing how to get the LLM to provide the reason is using function calling.
+/// Samples showing how to get the LLM to provide the reason it is calling a function
+/// when using automatic function calling.
///
public sealed class OpenAI_ReasonedFunctionCalling(ITestOutputHelper output) : BaseTest(output)
{
///
- /// Using the system prompt to explain function calls doesn't work work with gpt-4o.
+ /// Shows how to ask the model to explain function calls after execution.
///
- [Fact]
- public async Task UseSystemPromptToExplainFunctionCallsAsync()
- {
- // Create a kernel with MistralAI chat completion and WeatherPlugin
- Kernel kernel = CreateKernelWithPlugin();
- var service = kernel.GetRequiredService();
-
- // Invoke chat prompt with auto invocation of functions enabled
- var chatHistory = new ChatHistory
- {
- new ChatMessageContent(AuthorRole.System, "Always explain why function tool calls are being used."),
- new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")
- };
- var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
- var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
- chatHistory.Add(result);
- Console.WriteLine(result);
- }
-
- ///
+ ///
/// Asking the model to explain function calls after execution works well but may be too late depending on your use case.
- ///
+ ///
[Fact]
public async Task AskAssistantToExplainFunctionCallsAfterExecutionAsync()
{
- // Create a kernel with MistralAI chat completion and WeatherPlugin
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
Kernel kernel = CreateKernelWithPlugin();
var service = kernel.GetRequiredService();
@@ -60,12 +42,13 @@ public async Task AskAssistantToExplainFunctionCallsAfterExecutionAsync()
}
///
- /// Decorate each function to be called with an extra parameter which includes the reason this function needs to be called.
+ /// Shows how to use a function that has ben decorated with an extra parameter which must be set by the model
+ /// with the reason this function needs to be called.
///
[Fact]
public async Task UseDecoratedFunctionAsync()
{
- // Create a kernel with MistralAI chat completion and WeatherPlugin
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
Kernel kernel = CreateKernelWithPlugin();
var service = kernel.GetRequiredService();
@@ -80,6 +63,26 @@ public async Task UseDecoratedFunctionAsync()
Console.WriteLine(result);
}
+ ///
+ /// Shows how to use a function that has ben decorated with an extra parameter which must be set by the model
+ /// with the reason this function needs to be called.
+ ///
+ [Fact]
+ public async Task UseDecoratedFunctionWithPromptAsync()
+ {
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
+ Kernel kernel = CreateKernelWithPlugin();
+ var service = kernel.GetRequiredService();
+
+ // Invoke chat prompt with auto invocation of functions enabled
+ string chatPrompt = """
+ What is the weather like in Paris?
+ """;
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
+ var result = await kernel.InvokePromptAsync(chatPrompt, new(executionSettings));
+ Console.WriteLine(result);
+ }
+
///
/// Asking the model to explain function calls in response to each function call can work but the model may also
/// get confused and treat the request to explain the function calls as an error response from the function calls.
@@ -87,7 +90,7 @@ public async Task UseDecoratedFunctionAsync()
[Fact]
public async Task AskAssistantToExplainFunctionCallsBeforeExecutionAsync()
{
- // Create a kernel with MistralAI chat completion and WeatherPlugin
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
Kernel kernel = CreateKernelWithPlugin();
kernel.AutoFunctionInvocationFilters.Add(new RespondExplainFunctionInvocationFilter());
var service = kernel.GetRequiredService();
@@ -111,7 +114,7 @@ public async Task AskAssistantToExplainFunctionCallsBeforeExecutionAsync()
[Fact]
public async Task QueryAssistantToExplainFunctionCallsBeforeExecutionAsync()
{
- // Create a kernel with MistralAI chat completion and WeatherPlugin
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
Kernel kernel = CreateKernelWithPlugin();
kernel.AutoFunctionInvocationFilters.Add(new QueryExplainFunctionInvocationFilter(this.Output));
var service = kernel.GetRequiredService();
@@ -129,9 +132,13 @@ public async Task QueryAssistantToExplainFunctionCallsBeforeExecutionAsync()
///
/// This will respond to function call requests and ask the model to explain why it is
- /// calling the function(s). It is only suitable for transient use because it stores information about the functions that have been
+ /// calling the function(s). This filter must be registered transiently because it maintains state for the functions that have been
/// called for a single chat history.
///
+ ///
+ /// This filter implementation is not intended for production use. It is a demonstration of how to use filters to interact with the
+ /// model during automatic function invocation so that the model explains why it is calling a function.
+ ///
private sealed class RespondExplainFunctionInvocationFilter : IAutoFunctionInvocationFilter
{
private readonly HashSet _functionNames = [];
@@ -166,6 +173,10 @@ public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext co
/// This uses the currently available to query the model
/// to find out what certain functions are being called.
///
+ ///
+ /// This filter implementation is not intended for production use. It is a demonstration of how to use filters to interact with the
+ /// model during automatic function invocation so that the model explains why it is calling a function.
+ ///
private sealed class QueryExplainFunctionInvocationFilter(ITestOutputHelper output) : IAutoFunctionInvocationFilter
{
private readonly ITestOutputHelper _output = output;
diff --git a/dotnet/samples/Concepts/README.md b/dotnet/samples/Concepts/README.md
index 7eaa2a8a7ae6..529bb3ec1566 100644
--- a/dotnet/samples/Concepts/README.md
+++ b/dotnet/samples/Concepts/README.md
@@ -49,7 +49,8 @@ Down below you can find the code snippets that demonstrate the usage of many Sem
- [OpenAI_ChatCompletionWithVision](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionWithVision.cs)
- [OpenAI_CustomAzureOpenAIClient](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_CustomAzureOpenAIClient.cs)
- [OpenAI_UsingLogitBias](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_UsingLogitBias.cs)
-- [OpenAI_FunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_FunctionCalling.cs)
+- [OpenAI_FunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs)
+- [OpenAI_ReasonedFunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_FunctionCalling.cs)
- [MistralAI_ChatPrompt](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/MistralAI_ChatPrompt.cs)
- [MistralAI_FunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/MistralAI_FunctionCalling.cs)
- [MistralAI_StreamingFunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/MistralAI_StreamingFunctionCalling.cs)
diff --git a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
index c846fc87c463..d71d3c1f0032 100644
--- a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
+++ b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs
@@ -113,9 +113,16 @@ protected override async Task SendAsync(HttpRequestMessage
if (request.Content is not null)
{
var content = await request.Content.ReadAsStringAsync(cancellationToken);
- string formattedContent = JsonSerializer.Serialize(JsonSerializer.Deserialize(content), s_jsonSerializerOptions);
this._output.WriteLine("=== REQUEST ===");
- this._output.WriteLine(formattedContent);
+ try
+ {
+ string formattedContent = JsonSerializer.Serialize(JsonSerializer.Deserialize(content), s_jsonSerializerOptions);
+ this._output.WriteLine(formattedContent);
+ }
+ catch (JsonException)
+ {
+ this._output.WriteLine(content);
+ }
this._output.WriteLine(string.Empty);
}
From 787e91feada59658554d1d4fa6b81a2cea9110dd Mon Sep 17 00:00:00 2001
From: markwallace-microsoft
<127216156+markwallace-microsoft@users.noreply.github.com>
Date: Tue, 25 Jun 2024 15:22:34 +0100
Subject: [PATCH 5/7] Add one more sample showing how function call results can
be remembered
---
.../OpenAI_ReasonedFunctionCalling.cs | 1 -
.../OpenAI_RepeatedFunctionCalling.cs | 76 +++++++++++++++++++
dotnet/samples/Concepts/README.md | 4 +-
3 files changed, 78 insertions(+), 3 deletions(-)
create mode 100644 dotnet/samples/Concepts/ChatCompletion/OpenAI_RepeatedFunctionCalling.cs
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
index 2039f093099d..5d6d105eeceb 100644
--- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
@@ -201,7 +201,6 @@ public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext co
await next(context);
}
}
-
private sealed class WeatherPlugin
{
[KernelFunction]
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_RepeatedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_RepeatedFunctionCalling.cs
new file mode 100644
index 000000000000..11ea5ab362f9
--- /dev/null
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_RepeatedFunctionCalling.cs
@@ -0,0 +1,76 @@
+// Copyright (c) Microsoft. All rights reserved.
+
+using System.ComponentModel;
+using Microsoft.SemanticKernel;
+using Microsoft.SemanticKernel.ChatCompletion;
+using Microsoft.SemanticKernel.Connectors.OpenAI;
+
+namespace ChatCompletion;
+
+///
+/// Sample shows how to the model will reuse a function result from the chat history.
+///
+public sealed class OpenAI_RepeatedFunctionCalling(ITestOutputHelper output) : BaseTest(output)
+{
+ ///
+ /// Sample shows a chat history where each ask requires a function to be called but when
+ /// an ask is repeated the model will reuse the previous function result.
+ ///
+ [Fact]
+ public async Task ReuseFunctionResultExecutionAsync()
+ {
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
+ Kernel kernel = CreateKernelWithPlugin();
+ var service = kernel.GetRequiredService();
+
+ // Invoke chat prompt with auto invocation of functions enabled
+ var chatHistory = new ChatHistory
+ {
+ new ChatMessageContent(AuthorRole.User, "What is the weather like in Boston?")
+ };
+ var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
+ var result1 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result1);
+ Console.WriteLine(result1);
+
+ chatHistory.Add(new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?"));
+ var result2 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result2);
+ Console.WriteLine(result2);
+
+ chatHistory.Add(new ChatMessageContent(AuthorRole.User, "What is the weather like in Dublin?"));
+ var result3 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result3);
+ Console.WriteLine(result3);
+
+ chatHistory.Add(new ChatMessageContent(AuthorRole.User, "What is the weather like in Boston?"));
+ var result4 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel);
+ chatHistory.Add(result4);
+ Console.WriteLine(result4);
+ }
+ private sealed class WeatherPlugin
+ {
+ [KernelFunction]
+ [Description("Get the current weather in a given location.")]
+ public string GetWeather(
+ [Description("The city and department, e.g. Marseille, 13")] string location
+ ) => $"12°C\nWind: 11 KMPH\nHumidity: 48%\nMostly cloudy\nLocation: {location}";
+ }
+
+ private Kernel CreateKernelWithPlugin()
+ {
+ // Create a logging handler to output HTTP requests and responses
+ var handler = new LoggingHandler(new HttpClientHandler(), this.Output);
+ HttpClient httpClient = new(handler);
+
+ // Create a kernel with OpenAI chat completion and WeatherPlugin
+ IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
+ kernelBuilder.AddOpenAIChatCompletion(
+ modelId: TestConfiguration.OpenAI.ChatModelId!,
+ apiKey: TestConfiguration.OpenAI.ApiKey!,
+ httpClient: httpClient);
+ kernelBuilder.Plugins.AddFromType();
+ Kernel kernel = kernelBuilder.Build();
+ return kernel;
+ }
+}
diff --git a/dotnet/samples/Concepts/README.md b/dotnet/samples/Concepts/README.md
index 529bb3ec1566..45d744bc5b9c 100644
--- a/dotnet/samples/Concepts/README.md
+++ b/dotnet/samples/Concepts/README.md
@@ -49,8 +49,8 @@ Down below you can find the code snippets that demonstrate the usage of many Sem
- [OpenAI_ChatCompletionWithVision](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionWithVision.cs)
- [OpenAI_CustomAzureOpenAIClient](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_CustomAzureOpenAIClient.cs)
- [OpenAI_UsingLogitBias](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_UsingLogitBias.cs)
-- [OpenAI_FunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs)
-- [OpenAI_ReasonedFunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_FunctionCalling.cs)
+- [OpenAI_FunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_FunctionCalling.cs)
+- [OpenAI_ReasonedFunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs)
- [MistralAI_ChatPrompt](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/MistralAI_ChatPrompt.cs)
- [MistralAI_FunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/MistralAI_FunctionCalling.cs)
- [MistralAI_StreamingFunctionCalling](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/MistralAI_StreamingFunctionCalling.cs)
From 4246b8813972c615b8bf88c1d1be9b60af58866c Mon Sep 17 00:00:00 2001
From: Mark Wallace <127216156+markwallace-microsoft@users.noreply.github.com>
Date: Mon, 1 Jul 2024 15:46:12 +0100
Subject: [PATCH 6/7] Update
dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
Co-authored-by: SergeyMenshykh <68852919+SergeyMenshykh@users.noreply.github.com>
---
.../Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
index 5d6d105eeceb..5f9b8ea4bdf6 100644
--- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
@@ -42,7 +42,7 @@ public async Task AskAssistantToExplainFunctionCallsAfterExecutionAsync()
}
///
- /// Shows how to use a function that has ben decorated with an extra parameter which must be set by the model
+ /// Shows how to use a function that has been decorated with an extra parameter which must be set by the model
/// with the reason this function needs to be called.
///
[Fact]
From 65a6a91a8489bb028ee7655caceb3b946b089bab Mon Sep 17 00:00:00 2001
From: markwallace-microsoft
<127216156+markwallace-microsoft@users.noreply.github.com>
Date: Mon, 1 Jul 2024 15:49:05 +0100
Subject: [PATCH 7/7] Fix typo
---
.../Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
index 5f9b8ea4bdf6..74f3d4bd6a64 100644
--- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
+++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
@@ -64,7 +64,7 @@ public async Task UseDecoratedFunctionAsync()
}
///
- /// Shows how to use a function that has ben decorated with an extra parameter which must be set by the model
+ /// Shows how to use a function that has been decorated with an extra parameter which must be set by the model
/// with the reason this function needs to be called.
///
[Fact]