-
Notifications
You must be signed in to change notification settings - Fork 4.6k
.Net: Sample showing model thought process for each function call #6917
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
markwallace-microsoft
merged 8 commits into
microsoft:main
from
markwallace-microsoft:users/markwallace/issue_6782
Jul 4, 2024
Merged
Changes from all commits
Commits
Show all changes
8 commits
Select commit
Hold shift + click to select a range
5a93550
Adding reasoning to function calling samples
markwallace-microsoft 455626d
Try a few different techniques and compare outcomes
markwallace-microsoft ffb89d7
Decorated function example
markwallace-microsoft cbfe2a1
Address code review feedback
markwallace-microsoft 787e91f
Add one more sample showing how function call results can be remembered
markwallace-microsoft 4246b88
Update dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunction…
markwallace-microsoft f616c1f
Merge branch 'main' into users/markwallace/issue_6782
markwallace-microsoft 65a6a91
Fix typo
markwallace-microsoft File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
241 changes: 241 additions & 0 deletions
241
dotnet/samples/Concepts/ChatCompletion/OpenAI_ReasonedFunctionCalling.cs
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,241 @@ | ||
| // Copyright (c) Microsoft. All rights reserved. | ||
|
|
||
| using System.ComponentModel; | ||
| using Microsoft.SemanticKernel; | ||
| using Microsoft.SemanticKernel.ChatCompletion; | ||
| using Microsoft.SemanticKernel.Connectors.OpenAI; | ||
|
|
||
| namespace ChatCompletion; | ||
|
|
||
| /// <summary> | ||
| /// Samples showing how to get the LLM to provide the reason it is calling a function | ||
| /// when using automatic function calling. | ||
| /// </summary> | ||
| public sealed class OpenAI_ReasonedFunctionCalling(ITestOutputHelper output) : BaseTest(output) | ||
| { | ||
| /// <summary> | ||
| /// Shows how to ask the model to explain function calls after execution. | ||
| /// </summary> | ||
| /// <remarks> | ||
| /// Asking the model to explain function calls after execution works well but may be too late depending on your use case. | ||
| /// </remarks> | ||
| [Fact] | ||
| public async Task AskAssistantToExplainFunctionCallsAfterExecutionAsync() | ||
| { | ||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| Kernel kernel = CreateKernelWithPlugin<WeatherPlugin>(); | ||
| var service = kernel.GetRequiredService<IChatCompletionService>(); | ||
|
|
||
| // Invoke chat prompt with auto invocation of functions enabled | ||
| var chatHistory = new ChatHistory | ||
| { | ||
| new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?") | ||
| }; | ||
| var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }; | ||
| var result1 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result1); | ||
| Console.WriteLine(result1); | ||
|
|
||
| chatHistory.Add(new ChatMessageContent(AuthorRole.User, "Explain why you called those functions?")); | ||
| var result2 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| Console.WriteLine(result2); | ||
| } | ||
|
|
||
| /// <summary> | ||
| /// Shows how to use a function that has been decorated with an extra parameter which must be set by the model | ||
| /// with the reason this function needs to be called. | ||
| /// </summary> | ||
| [Fact] | ||
| public async Task UseDecoratedFunctionAsync() | ||
| { | ||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| Kernel kernel = CreateKernelWithPlugin<DecoratedWeatherPlugin>(); | ||
| var service = kernel.GetRequiredService<IChatCompletionService>(); | ||
|
|
||
| // Invoke chat prompt with auto invocation of functions enabled | ||
| var chatHistory = new ChatHistory | ||
| { | ||
| new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?") | ||
| }; | ||
| var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }; | ||
| var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result); | ||
| Console.WriteLine(result); | ||
| } | ||
|
|
||
| /// <summary> | ||
| /// Shows how to use a function that has been decorated with an extra parameter which must be set by the model | ||
| /// with the reason this function needs to be called. | ||
| /// </summary> | ||
| [Fact] | ||
| public async Task UseDecoratedFunctionWithPromptAsync() | ||
| { | ||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| Kernel kernel = CreateKernelWithPlugin<DecoratedWeatherPlugin>(); | ||
| var service = kernel.GetRequiredService<IChatCompletionService>(); | ||
|
|
||
| // Invoke chat prompt with auto invocation of functions enabled | ||
| string chatPrompt = """ | ||
| <message role="user">What is the weather like in Paris?</message> | ||
| """; | ||
| var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }; | ||
| var result = await kernel.InvokePromptAsync(chatPrompt, new(executionSettings)); | ||
| Console.WriteLine(result); | ||
| } | ||
|
|
||
| /// <summary> | ||
| /// Asking the model to explain function calls in response to each function call can work but the model may also | ||
| /// get confused and treat the request to explain the function calls as an error response from the function calls. | ||
| /// </summary> | ||
| [Fact] | ||
| public async Task AskAssistantToExplainFunctionCallsBeforeExecutionAsync() | ||
| { | ||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| Kernel kernel = CreateKernelWithPlugin<WeatherPlugin>(); | ||
| kernel.AutoFunctionInvocationFilters.Add(new RespondExplainFunctionInvocationFilter()); | ||
| var service = kernel.GetRequiredService<IChatCompletionService>(); | ||
|
|
||
| // Invoke chat prompt with auto invocation of functions enabled | ||
| var chatHistory = new ChatHistory | ||
| { | ||
| new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?") | ||
| }; | ||
| var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }; | ||
| var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result); | ||
| Console.WriteLine(result); | ||
| } | ||
|
|
||
| /// <summary> | ||
| /// Asking to the model to explain function calls using a separate conversation i.e. chat history seems to provide the | ||
| /// best results. This may be because the model can focus on explaining the function calls without being confused by other | ||
| /// messages in the chat history. | ||
| /// </summary> | ||
| [Fact] | ||
| public async Task QueryAssistantToExplainFunctionCallsBeforeExecutionAsync() | ||
| { | ||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| Kernel kernel = CreateKernelWithPlugin<WeatherPlugin>(); | ||
| kernel.AutoFunctionInvocationFilters.Add(new QueryExplainFunctionInvocationFilter(this.Output)); | ||
| var service = kernel.GetRequiredService<IChatCompletionService>(); | ||
|
|
||
| // Invoke chat prompt with auto invocation of functions enabled | ||
| var chatHistory = new ChatHistory | ||
| { | ||
| new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?") | ||
| }; | ||
| var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }; | ||
| var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result); | ||
| Console.WriteLine(result); | ||
| } | ||
|
|
||
| /// <summary> | ||
| /// This <see cref="IAutoFunctionInvocationFilter"/> will respond to function call requests and ask the model to explain why it is | ||
| /// calling the function(s). This filter must be registered transiently because it maintains state for the functions that have been | ||
| /// called for a single chat history. | ||
| /// </summary> | ||
| /// <remarks> | ||
| /// This filter implementation is not intended for production use. It is a demonstration of how to use filters to interact with the | ||
| /// model during automatic function invocation so that the model explains why it is calling a function. | ||
| /// </remarks> | ||
| private sealed class RespondExplainFunctionInvocationFilter : IAutoFunctionInvocationFilter | ||
| { | ||
| private readonly HashSet<string> _functionNames = []; | ||
|
|
||
| public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext context, Func<AutoFunctionInvocationContext, Task> next) | ||
| { | ||
| // Get the function calls for which we need an explanation | ||
| var functionCalls = FunctionCallContent.GetFunctionCalls(context.ChatHistory.Last()); | ||
| var needExplanation = 0; | ||
| foreach (var functionCall in functionCalls) | ||
| { | ||
| var functionName = $"{functionCall.PluginName}-{functionCall.FunctionName}"; | ||
| if (_functionNames.Add(functionName)) | ||
| { | ||
| needExplanation++; | ||
| } | ||
| } | ||
|
|
||
| if (needExplanation > 0) | ||
| { | ||
| // Create a response asking why these functions are being called | ||
| context.Result = new FunctionResult(context.Result, $"Provide an explanation why you are calling function {string.Join(',', _functionNames)} and try again"); | ||
| return; | ||
| } | ||
|
|
||
| // Invoke the functions | ||
| await next(context); | ||
| } | ||
| } | ||
|
|
||
| /// <summary> | ||
| /// This <see cref="IAutoFunctionInvocationFilter"/> uses the currently available <see cref="IChatCompletionService"/> to query the model | ||
| /// to find out what certain functions are being called. | ||
| /// </summary> | ||
| /// <remarks> | ||
| /// This filter implementation is not intended for production use. It is a demonstration of how to use filters to interact with the | ||
| /// model during automatic function invocation so that the model explains why it is calling a function. | ||
| /// </remarks> | ||
| private sealed class QueryExplainFunctionInvocationFilter(ITestOutputHelper output) : IAutoFunctionInvocationFilter | ||
|
westey-m marked this conversation as resolved.
|
||
| { | ||
| private readonly ITestOutputHelper _output = output; | ||
|
|
||
| public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext context, Func<AutoFunctionInvocationContext, Task> next) | ||
| { | ||
| // Invoke the model to explain why the functions are being called | ||
| var message = context.ChatHistory[^2]; | ||
| var functionCalls = FunctionCallContent.GetFunctionCalls(context.ChatHistory.Last()); | ||
| var functionNames = functionCalls.Select(fc => $"{fc.PluginName}-{fc.FunctionName}").ToList(); | ||
| var service = context.Kernel.GetRequiredService<IChatCompletionService>(); | ||
|
westey-m marked this conversation as resolved.
|
||
|
|
||
| var chatHistory = new ChatHistory | ||
| { | ||
| new ChatMessageContent(AuthorRole.User, $"Provide an explanation why these functions: {string.Join(',', functionNames)} need to be called to answer this query: {message.Content}") | ||
| }; | ||
| var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions }; | ||
| var result = await service.GetChatMessageContentAsync(chatHistory, executionSettings, context.Kernel); | ||
| this._output.WriteLine(result); | ||
|
|
||
| // Invoke the functions | ||
| await next(context); | ||
| } | ||
| } | ||
| private sealed class WeatherPlugin | ||
| { | ||
| [KernelFunction] | ||
| [Description("Get the current weather in a given location.")] | ||
| public string GetWeather( | ||
| [Description("The city and department, e.g. Marseille, 13")] string location | ||
| ) => $"12°C\nWind: 11 KMPH\nHumidity: 48%\nMostly cloudy\nLocation: {location}"; | ||
| } | ||
|
|
||
| private sealed class DecoratedWeatherPlugin | ||
| { | ||
| private readonly WeatherPlugin _weatherPlugin = new(); | ||
|
|
||
| [KernelFunction] | ||
| [Description("Get the current weather in a given location.")] | ||
| public string GetWeather( | ||
| [Description("A detailed explanation why this function is being called")] string explanation, | ||
| [Description("The city and department, e.g. Marseille, 13")] string location | ||
| ) => this._weatherPlugin.GetWeather(location); | ||
| } | ||
|
|
||
| private Kernel CreateKernelWithPlugin<T>() | ||
| { | ||
| // Create a logging handler to output HTTP requests and responses | ||
| var handler = new LoggingHandler(new HttpClientHandler(), this.Output); | ||
| HttpClient httpClient = new(handler); | ||
|
|
||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| IKernelBuilder kernelBuilder = Kernel.CreateBuilder(); | ||
| kernelBuilder.AddOpenAIChatCompletion( | ||
| modelId: TestConfiguration.OpenAI.ChatModelId!, | ||
| apiKey: TestConfiguration.OpenAI.ApiKey!, | ||
| httpClient: httpClient); | ||
| kernelBuilder.Plugins.AddFromType<T>(); | ||
| Kernel kernel = kernelBuilder.Build(); | ||
| return kernel; | ||
| } | ||
| } | ||
76 changes: 76 additions & 0 deletions
76
dotnet/samples/Concepts/ChatCompletion/OpenAI_RepeatedFunctionCalling.cs
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,76 @@ | ||
| // Copyright (c) Microsoft. All rights reserved. | ||
|
|
||
| using System.ComponentModel; | ||
| using Microsoft.SemanticKernel; | ||
| using Microsoft.SemanticKernel.ChatCompletion; | ||
| using Microsoft.SemanticKernel.Connectors.OpenAI; | ||
|
|
||
| namespace ChatCompletion; | ||
|
|
||
| /// <summary> | ||
| /// Sample shows how to the model will reuse a function result from the chat history. | ||
| /// </summary> | ||
| public sealed class OpenAI_RepeatedFunctionCalling(ITestOutputHelper output) : BaseTest(output) | ||
| { | ||
| /// <summary> | ||
| /// Sample shows a chat history where each ask requires a function to be called but when | ||
| /// an ask is repeated the model will reuse the previous function result. | ||
| /// </summary> | ||
| [Fact] | ||
| public async Task ReuseFunctionResultExecutionAsync() | ||
| { | ||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| Kernel kernel = CreateKernelWithPlugin<WeatherPlugin>(); | ||
| var service = kernel.GetRequiredService<IChatCompletionService>(); | ||
|
|
||
| // Invoke chat prompt with auto invocation of functions enabled | ||
| var chatHistory = new ChatHistory | ||
| { | ||
| new ChatMessageContent(AuthorRole.User, "What is the weather like in Boston?") | ||
| }; | ||
| var executionSettings = new OpenAIPromptExecutionSettings { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }; | ||
| var result1 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result1); | ||
| Console.WriteLine(result1); | ||
|
|
||
| chatHistory.Add(new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?")); | ||
| var result2 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result2); | ||
| Console.WriteLine(result2); | ||
|
|
||
| chatHistory.Add(new ChatMessageContent(AuthorRole.User, "What is the weather like in Dublin?")); | ||
| var result3 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result3); | ||
| Console.WriteLine(result3); | ||
|
|
||
| chatHistory.Add(new ChatMessageContent(AuthorRole.User, "What is the weather like in Boston?")); | ||
| var result4 = await service.GetChatMessageContentAsync(chatHistory, executionSettings, kernel); | ||
| chatHistory.Add(result4); | ||
| Console.WriteLine(result4); | ||
| } | ||
| private sealed class WeatherPlugin | ||
| { | ||
| [KernelFunction] | ||
| [Description("Get the current weather in a given location.")] | ||
| public string GetWeather( | ||
| [Description("The city and department, e.g. Marseille, 13")] string location | ||
| ) => $"12°C\nWind: 11 KMPH\nHumidity: 48%\nMostly cloudy\nLocation: {location}"; | ||
| } | ||
|
|
||
| private Kernel CreateKernelWithPlugin<T>() | ||
| { | ||
| // Create a logging handler to output HTTP requests and responses | ||
| var handler = new LoggingHandler(new HttpClientHandler(), this.Output); | ||
| HttpClient httpClient = new(handler); | ||
|
|
||
| // Create a kernel with OpenAI chat completion and WeatherPlugin | ||
| IKernelBuilder kernelBuilder = Kernel.CreateBuilder(); | ||
| kernelBuilder.AddOpenAIChatCompletion( | ||
| modelId: TestConfiguration.OpenAI.ChatModelId!, | ||
| apiKey: TestConfiguration.OpenAI.ApiKey!, | ||
| httpClient: httpClient); | ||
| kernelBuilder.Plugins.AddFromType<T>(); | ||
| Kernel kernel = kernelBuilder.Build(); | ||
| return kernel; | ||
| } | ||
| } |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.