diff --git a/.github/actions/azure-functions-integration-setup/action.yml b/.github/actions/azure-functions-integration-setup/action.yml index 357168d92e..28c1c6cd1d 100644 --- a/.github/actions/azure-functions-integration-setup/action.yml +++ b/.github/actions/azure-functions-integration-setup/action.yml @@ -12,7 +12,7 @@ runs: docker rm -f dts-emulator fi echo "Starting Durable Task Scheduler Emulator" - docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 mcr.microsoft.com/dts/dts-emulator:latest + docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 -e DTS_USE_DYNAMIC_TASK_HUBS=true mcr.microsoft.com/dts/dts-emulator:latest echo "Waiting for Durable Task Scheduler Emulator to be ready" timeout 30 bash -c 'until curl --silent http://localhost:8080/healthz; do sleep 1; done' echo "Durable Task Scheduler Emulator is ready" diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 352d0b22f7..5866f1f895 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -14,6 +14,8 @@ Here are some general guidelines that apply to all code. - The top of all *.cs files should have a copyright notice: `// Copyright (c) Microsoft. All rights reserved.` - All public methods and classes should have XML documentation comments. +- After adding, modifying or deleting code, run `dotnet build`, and then fix any reported build errors. +- After adding or modifying code, run `dotnet format` to automatically fix any formatting errors. ### C# Sample Code Guidelines diff --git a/.github/workflows/dotnet-build-and-test.yml b/.github/workflows/dotnet-build-and-test.yml index 692f3e7c45..31d1420c64 100644 --- a/.github/workflows/dotnet-build-and-test.yml +++ b/.github/workflows/dotnet-build-and-test.yml @@ -95,7 +95,7 @@ jobs: echo "COSMOS_EMULATOR_AVAILABLE=true" >> $env:GITHUB_ENV - name: Setup dotnet - uses: actions/setup-dotnet@v5.0.1 + uses: actions/setup-dotnet@v5.1.0 with: global-json-file: ${{ github.workspace }}/dotnet/global.json - name: Build dotnet solutions diff --git a/.github/workflows/merge-gatekeeper.yml b/.github/workflows/merge-gatekeeper.yml index 49d04183d5..de1a68a78e 100644 --- a/.github/workflows/merge-gatekeeper.yml +++ b/.github/workflows/merge-gatekeeper.yml @@ -29,4 +29,4 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} timeout: 3600 interval: 30 - ignored: CodeQL + ignored: CodeQL,CodeQL analysis (csharp) diff --git a/.github/workflows/python-test-coverage-report.yml b/.github/workflows/python-test-coverage-report.yml index e09d9c8870..92e13f9168 100644 --- a/.github/workflows/python-test-coverage-report.yml +++ b/.github/workflows/python-test-coverage-report.yml @@ -34,9 +34,16 @@ jobs: # because the workflow_run event does not have access to the PR number # The PR number is needed to post the comment on the PR run: | - PR_NUMBER=$(cat pr_number) - echo "PR number: $PR_NUMBER" - echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV + if [ ! -s pr_number ]; then + echo "PR number file 'pr_number' is missing or empty" + exit 1 + fi + PR_NUMBER=$(head -1 pr_number | tr -dc '0-9') + if [ -z "$PR_NUMBER" ]; then + echo "PR number file 'pr_number' does not contain a valid PR number" + exit 1 + fi + echo "PR_NUMBER=$PR_NUMBER" >> "$GITHUB_ENV" - name: Pytest coverage comment id: coverageComment uses: MishaKav/pytest-coverage-comment@v1.2.0 diff --git a/docs/decisions/0011-python-typeddict-options.md b/docs/decisions/0012-python-typeddict-options.md similarity index 100% rename from docs/decisions/0011-python-typeddict-options.md rename to docs/decisions/0012-python-typeddict-options.md diff --git a/docs/decisions/0012-python-get-response-simplification.md b/docs/decisions/0013-python-get-response-simplification.md similarity index 100% rename from docs/decisions/0012-python-get-response-simplification.md rename to docs/decisions/0013-python-get-response-simplification.md diff --git a/docs/decisions/0014-feature-collections.md b/docs/decisions/0014-feature-collections.md new file mode 100644 index 0000000000..d96ab4ca5a --- /dev/null +++ b/docs/decisions/0014-feature-collections.md @@ -0,0 +1,423 @@ +--- +status: accepted +contact: westey-m +date: 2025-01-21 +deciders: sergeymenshykh, markwallace, rbarreto, westey-m, stephentoub +consulted: reubenbond +informed: +--- + +# Feature Collections + +## Context and Problem Statement + +When using agents, we often have cases where we want to pass some arbitrary services or data to an agent or some component in the agent execution stack. +These services or data are not necessarily known at compile time and can vary by the agent stack that the user has built. +E.g., there may be an agent decorator or chat client decorator that was added to the stack by the user, and an arbitrary payload needs to be passed to that decorator. + +Since these payloads are related to components that are not integral parts of the agent framework, they cannot be added as strongly typed settings to the agent run options. +However, the payloads could be added to the agent run options as loosely typed 'features', that can be retrieved as needed. + +In some cases certain classes of agents may support the same capability, but not all agents do. +Having the configuration for such a capability on the main abstraction would advertise the functionality to all users, even if their chosen agent does not support it. +The user may type test for certain agent types, and call overloads on the appropriate agent types, with the strongly typed configuration. +Having a feature collection though, would be an alternative way of passing such configuration, without needing to type check the agent type. +All agents that support the functionality would be able to check for the configuration and use it, simplifying the user code. +If the agent does not support the capability, that configuration would be ignored. + +### Sample Scenario 1 - Per Run ChatMessageStore Override for hosting Libraries + +We are building an agent hosting library, that can host any agent built using the agent framework. +Where an agent is not built on a service that uses in-service chat history storage, the hosting library wants to force the agent to use +the hosting library's chat history storage implementation. +This chat history storage implementation may be specifically tailored to the type of protocol that the hosting library uses, e.g. conversation id based storage or response id based storage. +The hosting library does not know what type of agent it is hosting, so it cannot provide a strongly typed parameter on the agent. +Instead, it adds the chat history storage implementation to a feature collection, and if the agent supports custom chat history storage, it retrieves the implementation from the feature collection and uses it. + +```csharp +// Pseudo-code for an agent hosting library that supports conversation id based hosting. +public async Task HandleConversationsBasedRequestAsync(AIAgent agent, string conversationId, string userInput) +{ + var thread = await this._threadStore.GetOrCreateThread(conversationId); + + // The hosting library can set a per-run chat message store via Features that only applies for that run. + // This message store will load and save messages under the conversation id provided. + ConversationsChatMessageStore messageStore = new(this._dbClient, conversationId); + var response = await agent.RunAsync( + userInput, + thread, + options: new AgentRunOptions() + { + Features = new AgentFeatureCollection().WithFeature(messageStore) + }); + + await this._threadStore.SaveThreadAsync(conversationId, thread); + return response.Text; +} + +// Pseudo-code for an agent hosting library that supports response id based hosting. +public async Task<(string responseMessage, string responseId)> HandleResponseIdBasedRequestAsync(AIAgent agent, string previousResponseId, string userInput) +{ + var thread = await this._threadStore.GetOrCreateThreadAsync(previousResponseId); + + // The hosting library can set a per-run chat message store via Features that only applies for that run. + // This message store will buffer newly added messages until explicitly saved after the run. + ResponsesChatMessageStore messageStore = new(this._dbClient, previousResponseId); + + var response = await agent.RunAsync( + userInput, + thread, + options: new AgentRunOptions() + { + Features = new AgentFeatureCollection().WithFeature(messageStore) + }); + + // Since the message store may not actually have been used at all (if the agent's underlying chat client requires service-based chat history storage), + // we may not have anything to save back to the database. + // We still want to generate a new response id though, so that we can save the updated thread state under that id. + // We should also use the same id to save any buffered messages in the message store if there are any. + var newResponseId = this.GenerateResponseId(); + if (messageStore.HasBufferedMessages) + { + await messageStore.SaveBufferedMessagesAsync(newResponseId); + } + + // Save the updated thread state under the new response id that was generated by the store. + await this._threadStore.SaveThreadAsync(newResponseId, thread); + return (response.Text, newResponseId); +} +``` + +### Sample Scenario 2 - Structured output + +Currently our base abstraction does not support structured output, since the capability is not supported by all agents. +For those agents that don't support structured output, we could add an agent decorator that takes the response from the underlying agent, and applies structured output parsing on top of it via an additional LLM call. + +If we add structured output configuration as a feature, then any agent that supports structured output could retrieve the configuration from the feature collection and apply it, and where it is not supported, the configuration would simply be ignored. + +We could add a simple StructuredOutputAgentFeature that can be added to the list of features and also be used to return the generated structured output. + +```csharp +internal class StructuredOutputAgentFeature +{ + public Type? OutputType { get; set; } + + public JsonSerializerOptions? SerializerOptions { get; set; } + + public bool? UseJsonSchemaResponseFormat { get; set; } + + // Contains the result of the structured output parsing request. + public ChatResponse? ChatResponse { get; set; } +} +``` + +We can add a simple decorator class that does the chat client invocation. + +```csharp +public class StructuredOutputAgent : DelegatingAIAgent +{ + private readonly IChatClient _chatClient; + public StructuredOutputAgent(AIAgent innerAgent, IChatClient chatClient) + : base(innerAgent) + { + this._chatClient = Throw.IfNull(chatClient); + } + + public override async Task RunAsync( + IEnumerable messages, + AgentThread? thread = null, + AgentRunOptions? options = null, + CancellationToken cancellationToken = default) + { + // Run the inner agent first, to get back the text response we want to convert. + var response = await base.RunAsync(messages, thread, options, cancellationToken).ConfigureAwait(false); + + if (options?.Features?.TryGet(out var responseFormatFeature) is true + && responseFormatFeature.OutputType is not null) + { + // Create the chat options to request structured output. + ChatOptions chatOptions = new() + { + ResponseFormat = ChatResponseFormat.ForJsonSchema(responseFormatFeature.OutputType, responseFormatFeature.SerializerOptions) + }; + + // Invoke the chat client to transform the text output into structured data. + // The feature is updated with the result. + // The code can be simplified by adding a non-generic structured output GetResponseAsync + // overload that takes Type as input. + responseFormatFeature.ChatResponse = await this._chatClient.GetResponseAsync( + messages: new[] + { + new ChatMessage(ChatRole.System, "You are a json expert and when provided with any text, will convert it to the requested json format."), + new ChatMessage(ChatRole.User, response.Text) + }, + options: chatOptions, + cancellationToken: cancellationToken).ConfigureAwait(false); + } + + return response; + } +} +``` + +Finally, we can add an extension method on `AIAgent` that can add the feature to the run options and check the feature for the structured output result and add the deserialized result to the response. + +```csharp +public static async Task> RunAsync( + this AIAgent agent, + IEnumerable messages, + AgentThread? thread = null, + JsonSerializerOptions? serializerOptions = null, + AgentRunOptions? options = null, + bool? useJsonSchemaResponseFormat = null, + CancellationToken cancellationToken = default) +{ + // Create the structured output feature. + var structuredOutputFeature = new StructuredOutputAgentFeature(); + structuredOutputFeature.OutputType = typeof(T); + structuredOutputFeature.UseJsonSchemaResponseFormat = useJsonSchemaResponseFormat; + + // Run the agent. + options ??= new AgentRunOptions(); + options.Features ??= new AgentFeatureCollection(); + options.Features.Set(structuredOutputFeature); + + var response = await agent.RunAsync(messages, thread, options, cancellationToken).ConfigureAwait(false); + + // Deserialize the JSON output. + if (structuredOutputFeature.ChatResponse is not null) + { + var typed = new ChatResponse(structuredOutputFeature.ChatResponse, serializerOptions ?? AgentJsonUtilities.DefaultOptions); + return new AgentRunResponse(response, typed.Result); + } + + throw new InvalidOperationException("No structured output response was generated by the agent."); +} +``` + +We can then use the extension method with any agent that supports structured output or that has +been decorated with the `StructuredOutputAgent` decorator. + +```csharp +agent = new StructuredOutputAgent(agent, chatClient); + +AgentRunResponse response = await agent.RunAsync([new ChatMessage( + ChatRole.User, + "Please provide information about John Smith, who is a 35-year-old software engineer.")]); +``` + +## Implementation Options + +Three options were considered for implementing feature collections: + +- **Option 1**: FeatureCollections similar to ASP.NET Core +- **Option 2**: AdditionalProperties Dictionary +- **Option 3**: IServiceProvider + +Here are some comparisons about their suitability for our use case: + +| Criteria | Feature Collection | Additional Properties | IServiceProvider | +|------------------|--------------------|-----------------------|------------------| +|Ease of use |✅ Good |❌ Bad |✅ Good | +|User familiarity |❌ Bad |✅ Good |✅ Good | +|Type safety |✅ Good |❌ Bad |✅ Good | +|Ability to modify registered options when progressing down the stack|✅ Supported|✅ Supported|❌ Not-Supported (IServiceProvider is read-only)| +|Already available in MEAI stack|❌ No|✅ Yes|❌ No| +|Ambiguity with existing AdditionalProperties|❌ Yes|✅ No|❌ Yes| + +## IServiceProvider + +Service Collections and Service Providers provide a very popular way to register and retrieve services by type and could be used as a way to pass features to agents and chat clients. + +However, since IServiceProvider is read-only, it is not possible to modify the registered services when progressing down the execution stack. +E.g. an agent decorator cannot add additional services to the IServiceProvider passed to it when calling into the inner agent. + +IServiceProvider also does not expose a way to list all services contained in it, making it difficult to copy services from one provider to another. + +This lack of mutability makes IServiceProvider unsuitable for our use case, since we will not be able to use it to build sample scenario 2. + +## AdditionalProperties dictionary + +The AdditionalProperties dictionary is already available on various options classes in the agent framework as well as in the MEAI stack and +allows storing arbitrary key/value pairs, where the key is a string and the value is an object. + +While FeatureCollection uses Type as a key, AdditionalProperties uses string keys. +This means that users need to agree on string keys to use for specific features, however it is also possible to use Type.FullName as a key by convention +to avoid key collisions, which is an easy convention to follow. + +Since the value of AdditionalProperties is of type object, users need to cast the value to the expected type when retrieving it, which is also +a drawback, but when using the convention of using Type.FullName as a key, there is at least a clear expectation of what type to cast to. + +```csharp +// Setting a feature +options.AdditionalProperties[typeof(MyFeature).FullName] = new MyFeature(); + +// Retrieving a feature +if (options.AdditionalProperties.TryGetValue(typeof(MyFeature).FullName, out var featureObj) + && featureObj is MyFeature myFeature) +{ + // Use myFeature +} +``` + +It would also be possible to add extension methods to simplify setting and getting features from AdditionalProperties. +Having a base class for features should help make this more feature rich. + +```csharp +// Setting a feature, this can use Type.FullName as the key. +options.AdditionalProperties + .WithFeature(new MyFeature()); + +// Retrieving a feature, this can use Type.FullName as the key. +if (options.AdditionalProperties.TryGetFeature(out var myFeature)) +{ + // Use myFeature +} +``` + +It would also be possible to add extension methods for a feature to simplify setting and getting features from AdditionalProperties. + +```csharp +// Setting a feature +options.AdditionalProperties + .WithMyFeature(new MyFeature()); +// Retrieving a feature +if (options.AdditionalProperties.TryGetMyFeature(out var myFeature)) +{ + // Use myFeature +} +``` + +## Feature Collection + +If we choose the feature collection option, we need to decide on the design of the feature collection itself. + +### Feature Collections extension points + +We need to decide the set of actions that feature collections would be supported for. Here is the suggested list of actions: + +**MAAI.AIAgent:** + +1. GetNewThread + 1. E.g. this would allow passing an already existing storage id for the thread to use, or an initialized custom chat message store to use. +1. DeserializeThread + 1. E.g. this would allow passing an already existing storage id for the thread to use, or an initialized custom chat message store to use. +1. Run / RunStreaming + 1. E.g. this would allow passing an override chat message store just for that run, or a desired schema for a structured output middleware component. + +**MEAI.ChatClient:** + +1. GetResponse / GetStreamingResponse + +### Reconciling with existing AdditionalProperties + +If we decide to add feature collections, separately from the existing AdditionalProperties dictionaries, we need to consider how to explain to users when to use each one. +One possible approach though is to have the one use the other under the hood. +AdditionalProperties could be stored as a feature in the feature collection. + +Users would be able to retrieve additional properties from the feature collection, in addition to retrieving it via a dedicated AdditionalProperties property. +E.g. `features.Get()` + +One challenge with this approach is that when setting a value in the AdditionalProperties dictionary, the feature collection would need to be created first if it does not already exist. + +```csharp +public class AgentRunOptions +{ + public AdditionalPropertiesDictionary? AdditionalProperties { get; set; } + public IAgentFeatureCollection? Features { get; set; } +} + +var options = new AgentRunOptions(); +// This would need to create the feature collection first, if it does not already exist. +options.AdditionalProperties ??= new AdditionalPropertiesDictionary(); +``` + +Since IAgentFeatureCollection is an interface, AgentRunOptions would need to have a concrete implementation of the interface to create, meaning that the user cannot decide. +It also means that if the user doesn't realise that AdditionalProperties is implemented using feature collections, they may set a value on AdditionalProperties, and then later overwrite the entire feature collection, losing the AdditionalProperties feature. + +Options to avoid these issues: + +1. Make `Features` readonly. + 1. This would prevent the user from overwriting the feature collection after setting AdditionalProperties. + 1. Since the user cannot set their own implementation of IAgentFeatureCollection, having an interface for it may not be necessary. + +### Feature Collection Implementation + +We have two options for implementing feature collections: + +1. Create our own [IAgentFeatureCollection interface](https://github.com/microsoft/agent-framework/pull/2354/files#diff-9c42f3e60d70a791af9841d9214e038c6de3eebfc10e3997cb4cdffeb2f1246d) and [implementation](https://github.com/microsoft/agent-framework/pull/2354/files#diff-a435cc738baec500b8799f7f58c1538e3bb06c772a208afc2615ff90ada3f4ca). +2. Reuse the asp.net [IFeatureCollection interface](https://github.com/dotnet/aspnetcore/blob/main/src/Extensions/Features/src/IFeatureCollection.cs) and [implementation](https://github.com/dotnet/aspnetcore/blob/main/src/Extensions/Features/src/FeatureCollection.cs). + +#### Roll our own + +Advantages: + +Creating our own IAgentFeatureCollection interface and implementation has the advantage of being more clearly associated with the agent framework and allows us to +improve on some of the design decisions made in asp.net core's IFeatureCollection. + +Drawbacks: + +It would mean a different implementation to maintain and test. + +#### Reuse asp.net IFeatureCollection + +Advantages: + +Reusing the asp.net IFeatureCollection has the advantage of being able to reuse the well-established and tested implementation from asp.net +core. Users who are using agents in an asp.net core application may be able to pass feature collections from asp.net core to the agent framework directly. + +Drawbacks: + +While the package name is `Microsoft.Extensions.Features`, the namespaces of the types are `Microsoft.AspNetCore.Http.Features`, which may create confusion for users of agent framework who are not building web applications or services. +Users may rightly ask: Why do I need to use a class from asp.net core when I'm not building a web application / service? + +The current design has some design issues that would be good to avoid. E.g. it does not distinguish between a feature being "not set" and "null". Get returns both as null and there is no tryget method. +Since the [default implementation](https://github.com/dotnet/aspnetcore/blob/main/src/Extensions/Features/src/FeatureCollection.cs) also supports value types, it throws for null values of value types. +A TryGet method would be more appropriate. + +## Feature Layering + +One possible scenario when adding support for feature collections is to allow layering of features by scope. + +The following levels of scope could be supported: + +1. Application - Application wide features that apply to all agents / chat clients +2. Artifact (Agent / ChatClient) - Features that apply to all runs of a specific agent or chat client instance +3. Action (GetNewThread / Run / GetResponse) - Feature that apply to a single action only + +When retrieving a feature from the collection, the search would start from the most specific scope (Action) and progress to the least specific scope (Application), returning the first matching feature found. + +Introducing layering adds some challenges: + +- There may be multiple feature collections at the same scope level, e.g. an Agent that uses a ChatClient where both have their own feature collections. + - Do we layer the agent feature collection over the chat client feature collection (Application -> ChatClient -> Agent -> Run), or only use the agent feature collection in the agent (Application -> Agent -> Run), and the chat client feature collection in the chat client (Application -> ChatClient -> Run)? +- The appropriate base feature collection may change when progressing down the stack, e.g. when an Agent calls a ChatClient, the action feature collection stays the same, but the artifact feature collection changes. +- Who creates the feature collection hierarchy? + - Since the hierarchy changes as it progresses down the execution stack, and the caller can only pass in the action level feature collection, the callee needs to combine it with its own artifact level feature collection and the application level feature collection. Each action will need to build the appropriate feature collection hierarchy, at the start of its execution. +- For Artifact level features, it seems odd to pass them in as a bag of untyped features, when we are constructing a known artifact type and therefore can have typed settings. + - E.g. today we have a strongly typed setting on ChatClientAgentOptions to configure a ChatMessageStore for the agent. +- To avoid global statics for application level features, the user would need to pass in the application level feature collection to each artifact that they create. + - This would be very odd if the user also already has to strongly typed settings for each feature that they want to set at the artifact level. + +### Layering Options + +1. No layering - only a single feature collection is supported per action (the caller can still create a layered collection if desired, but the callee does not do any layering automatically). + 1. Fallback is to any features configured on the artifact via strongly typed settings. +1. Full layering - support layering at all levels (Application -> Artifact -> Action). + 1. Only apply applicable artifact level features when calling into that artifact. + 1. Apply upstream artifact features when calling into downstream artifacts, e.g. Feature hierarchy in ChatClientAgent would be `Application -> Agent -> Run` and in ChatClient would be `Application -> ChatClient -> Agent -> Run` or `Application -> Agent -> ChatClient -> Run` + 1. The user needs to provide the application level feature collection to each artifact that they create and artifact features are passed via strongly typed settings. + +### Accessing application level features Options + +We need to consider how application level features would be accessed if supported. + +1. The user provides the application level feature collection to each artifact that the user constructs + 1. Passing the application level feature collection to each artifact is tedious for the user. +1. There is a static application level feature collection that can be accessed globally. + 1. Statics create issues with testing and isolation. + +## Decisions + +- Feature Collections Container: Use AdditionalProperties +- Feature Layering: No layering - only a single collection/dictionary is supported per action. Application layers can be added later if needed. diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index c7e53ea256..d721e208ff 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -33,18 +33,18 @@ - + - + - - + + @@ -61,9 +61,9 @@ - - - + + + @@ -71,11 +71,11 @@ - + - + @@ -143,6 +143,7 @@ + diff --git a/dotnet/agent-framework-dotnet.slnx b/dotnet/agent-framework-dotnet.slnx index 002efdbab1..8b1b00fd2b 100644 --- a/dotnet/agent-framework-dotnet.slnx +++ b/dotnet/agent-framework-dotnet.slnx @@ -35,6 +35,18 @@ + + + + + + + + + + + + @@ -81,6 +93,7 @@ + @@ -286,6 +299,11 @@ + + + + + @@ -396,6 +414,7 @@ + @@ -435,6 +454,7 @@ + \ No newline at end of file diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 4982c91ce5..9fd1487c47 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -2,9 +2,9 @@ 1.0.0 - $(VersionPrefix)-$(VersionSuffix).260108.1 - $(VersionPrefix)-preview.260108.1 - 1.0.0-preview.260108.1 + $(VersionPrefix)-$(VersionSuffix).260121.1 + $(VersionPrefix)-preview.260121.1 + 1.0.0-preview.260121.1 Debug;Release;Publish true diff --git a/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/01_SingleAgent.csproj b/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/01_SingleAgent.csproj new file mode 100644 index 0000000000..6dc2007867 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/01_SingleAgent.csproj @@ -0,0 +1,30 @@ + + + net10.0 + Exe + enable + enable + SingleAgent + SingleAgent + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/Program.cs b/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/Program.cs new file mode 100644 index 0000000000..9d0fe3359b --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/Program.cs @@ -0,0 +1,103 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.DurableTask.Client.AzureManaged; +using Microsoft.DurableTask.Worker.AzureManaged; +using Microsoft.Extensions.AI; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get DTS connection string from environment variable +string dtsConnectionString = Environment.GetEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING") + ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Set up an AI agent following the standard Microsoft Agent Framework pattern. +const string JokerName = "Joker"; +const string JokerInstructions = "You are good at telling jokes."; + +AIAgent agent = client.GetChatClient(deploymentName).AsAIAgent(JokerInstructions, JokerName); + +// Configure the console app to host the AI agent. +IHost host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(logging => logging.SetMinimumLevel(LogLevel.Warning)) + .ConfigureServices(services => + { + services.ConfigureDurableAgents( + options => options.AddAIAgent(agent, timeToLive: TimeSpan.FromHours(1)), + workerBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString), + clientBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString)); + }) + .Build(); + +await host.StartAsync(); + +// Get the agent proxy from services +IServiceProvider services = host.Services; +AIAgent agentProxy = services.GetRequiredKeyedService(JokerName); + +// Console colors for better UX +Console.ForegroundColor = ConsoleColor.Cyan; +Console.WriteLine("=== Single Agent Console Sample ==="); +Console.ResetColor(); +Console.WriteLine("Enter a message for the Joker agent (or 'exit' to quit):"); +Console.WriteLine(); + +// Create a thread for the conversation +AgentThread thread = await agentProxy.GetNewThreadAsync(); + +while (true) +{ + // Read input from stdin + Console.ForegroundColor = ConsoleColor.Yellow; + Console.Write("You: "); + Console.ResetColor(); + + string? input = Console.ReadLine(); + if (string.IsNullOrWhiteSpace(input) || input.Equals("exit", StringComparison.OrdinalIgnoreCase)) + { + break; + } + + // Run the agent + Console.ForegroundColor = ConsoleColor.Green; + Console.Write("Joker: "); + Console.ResetColor(); + + try + { + AgentResponse agentResponse = await agentProxy.RunAsync( + message: input, + thread: thread, + cancellationToken: CancellationToken.None); + + Console.WriteLine(agentResponse.Text); + Console.WriteLine(); + } + catch (Exception ex) + { + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"Error: {ex.Message}"); + Console.ResetColor(); + Console.WriteLine(); + } +} + +await host.StopAsync(); diff --git a/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/README.md b/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/README.md new file mode 100644 index 0000000000..7c921b0d87 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent/README.md @@ -0,0 +1,56 @@ +# Single Agent Sample + +This sample demonstrates how to use the durable agents extension to create a simple console app that hosts a single AI agent and provides interactive conversation via stdin/stdout. + +## Key Concepts Demonstrated + +- Using the Microsoft Agent Framework to define a simple AI agent with a name and instructions. +- Registering durable agents with the console app and running them interactively. +- Conversation management (via threads) for isolated interactions. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent +dotnet run --framework net10.0 +``` + +The app will prompt you for input. You can interact with the Joker agent: + +```text +=== Single Agent Console Sample === +Enter a message for the Joker agent (or 'exit' to quit): + +You: Tell me a joke about a pirate. +Joker: Why don't pirates ever learn the alphabet? Because they always get stuck at "C"! + +You: Now explain the joke. +Joker: The joke plays on the word "sea" (C), which pirates are famously associated with... + +You: exit +``` + +## Scriptable Usage + +You can also pipe input to the app for scriptable usage: + +```bash +echo "Tell me a joke about a pirate." | dotnet run +``` + +The app will read from stdin, process the input, and write the response to stdout. + +## Viewing Agent State + +You can view the state of the agent in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view the state of the Joker agent, including its conversation history and current state + +The agent maintains conversation state across multiple interactions, and you can inspect this state in the dashboard to understand how the durable agents extension manages conversation context. diff --git a/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/02_AgentOrchestration_Chaining.csproj b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/02_AgentOrchestration_Chaining.csproj new file mode 100644 index 0000000000..ef74da183b --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/02_AgentOrchestration_Chaining.csproj @@ -0,0 +1,30 @@ + + + net10.0 + Exe + enable + enable + AgentOrchestration_Chaining + AgentOrchestration_Chaining + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/Models.cs b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/Models.cs new file mode 100644 index 0000000000..593b468457 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/Models.cs @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace AgentOrchestration_Chaining; + +// Response model +public sealed record TextResponse(string Text); diff --git a/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/Program.cs b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/Program.cs new file mode 100644 index 0000000000..74c299ae0c --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/Program.cs @@ -0,0 +1,148 @@ +// Copyright (c) Microsoft. All rights reserved. + +using AgentOrchestration_Chaining; +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.DurableTask; +using Microsoft.DurableTask.Client; +using Microsoft.DurableTask.Client.AzureManaged; +using Microsoft.DurableTask.Worker; +using Microsoft.DurableTask.Worker.AzureManaged; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; +using Environment = System.Environment; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get DTS connection string from environment variable +string dtsConnectionString = Environment.GetEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING") + ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Single agent used by the orchestration to demonstrate sequential calls on the same thread. +const string WriterName = "WriterAgent"; +const string WriterInstructions = + """ + You refine short pieces of text. When given an initial sentence you enhance it; + when given an improved sentence you polish it further. + """; + +AIAgent writerAgent = client.GetChatClient(deploymentName).AsAIAgent(WriterInstructions, WriterName); + +// Orchestrator function +static async Task RunOrchestratorAsync(TaskOrchestrationContext context) +{ + DurableAIAgent writer = context.GetAgent("WriterAgent"); + AgentThread writerThread = await writer.GetNewThreadAsync(); + + AgentResponse initial = await writer.RunAsync( + message: "Write a concise inspirational sentence about learning.", + thread: writerThread); + + AgentResponse refined = await writer.RunAsync( + message: $"Improve this further while keeping it under 25 words: {initial.Result.Text}", + thread: writerThread); + + return refined.Result.Text; +} + +// Configure the console app to host the AI agent. +IHost host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(loggingBuilder => loggingBuilder.SetMinimumLevel(LogLevel.Warning)) + .ConfigureServices(services => + { + services.ConfigureDurableAgents( + options => options.AddAIAgent(writerAgent), + workerBuilder: builder => + { + builder.UseDurableTaskScheduler(dtsConnectionString); + builder.AddTasks(registry => registry.AddOrchestratorFunc(nameof(RunOrchestratorAsync), RunOrchestratorAsync)); + }, + clientBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString)); + }) + .Build(); + +await host.StartAsync(); + +DurableTaskClient durableClient = host.Services.GetRequiredService(); + +// Console colors for better UX +Console.ForegroundColor = ConsoleColor.Cyan; +Console.WriteLine("=== Single Agent Orchestration Chaining Sample ==="); +Console.ResetColor(); +Console.WriteLine("Starting orchestration..."); +Console.WriteLine(); + +try +{ + // Start the orchestration + string instanceId = await durableClient.ScheduleNewOrchestrationInstanceAsync( + orchestratorName: nameof(RunOrchestratorAsync)); + + Console.ForegroundColor = ConsoleColor.Gray; + Console.WriteLine($"Orchestration started with instance ID: {instanceId}"); + Console.WriteLine("Waiting for completion..."); + Console.ResetColor(); + + // Wait for orchestration to complete + OrchestrationMetadata status = await durableClient.WaitForInstanceCompletionAsync( + instanceId, + getInputsAndOutputs: true, + CancellationToken.None); + + Console.WriteLine(); + + if (status.RuntimeStatus == OrchestrationRuntimeStatus.Completed) + { + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine("✓ Orchestration completed successfully!"); + Console.ResetColor(); + Console.WriteLine(); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.Write("Result: "); + Console.ResetColor(); + Console.WriteLine(status.ReadOutputAs()); + } + else if (status.RuntimeStatus == OrchestrationRuntimeStatus.Failed) + { + Console.ForegroundColor = ConsoleColor.Red; + Console.WriteLine("✗ Orchestration failed!"); + Console.ResetColor(); + if (status.FailureDetails != null) + { + Console.WriteLine($"Error: {status.FailureDetails.ErrorMessage}"); + } + Environment.Exit(1); + } + else + { + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine($"Orchestration status: {status.RuntimeStatus}"); + Console.ResetColor(); + } +} +catch (Exception ex) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"Error: {ex.Message}"); + Console.ResetColor(); + Environment.Exit(1); +} +finally +{ + await host.StopAsync(); +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/README.md b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/README.md new file mode 100644 index 0000000000..715a72ada0 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining/README.md @@ -0,0 +1,53 @@ +# Single Agent Orchestration Sample + +This sample demonstrates how to use the durable agents extension to create a simple console app that orchestrates sequential calls to a single AI agent using the same conversation thread for context continuity. + +## Key Concepts Demonstrated + +- Orchestrating multiple interactions with the same agent in a deterministic order +- Using the same `AgentThread` across multiple calls to maintain conversational context +- Durable orchestration with automatic checkpointing and resumption from failures +- Waiting for orchestration completion using `WaitForInstanceCompletionAsync` + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/02_AgentOrchestration_Chaining +dotnet run --framework net10.0 +``` + +The app will start the orchestration, wait for it to complete, and display the result: + +```text +=== Single Agent Orchestration Chaining Sample === +Starting orchestration... + +Orchestration started with instance ID: 86313f1d45fb42eeb50b1852626bf3ff +Waiting for completion... + +✓ Orchestration completed successfully! + +Result: Learning serves as the key, opening doors to boundless opportunities and a brighter future. +``` + +The orchestration will proceed to run the WriterAgent twice in sequence: + +1. First, it writes an inspirational sentence about learning +2. Then, it refines the initial output using the same conversation thread + +## Viewing Orchestration State + +You can view the state of the orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can see: + - **Orchestrations**: View the orchestration instance, including its runtime status, input, output, and execution history + - **Agents**: View the state of the WriterAgent, including conversation history maintained across the orchestration steps + +The orchestration instance ID is displayed in the console output. You can use this ID to find the specific orchestration in the dashboard and inspect its execution details, including the sequence of agent calls and their results. diff --git a/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/03_AgentOrchestration_Concurrency.csproj b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/03_AgentOrchestration_Concurrency.csproj new file mode 100644 index 0000000000..017b5fe300 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/03_AgentOrchestration_Concurrency.csproj @@ -0,0 +1,30 @@ + + + net10.0 + Exe + enable + enable + AgentOrchestration_Concurrency + AgentOrchestration_Concurrency + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/Models.cs b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/Models.cs new file mode 100644 index 0000000000..042e245f7f --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/Models.cs @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace AgentOrchestration_Concurrency; + +// Response model +public sealed record TextResponse(string Text); diff --git a/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/Program.cs b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/Program.cs new file mode 100644 index 0000000000..2093cd01f1 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/Program.cs @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using AgentOrchestration_Concurrency; +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.DurableTask; +using Microsoft.DurableTask.Client; +using Microsoft.DurableTask.Client.AzureManaged; +using Microsoft.DurableTask.Worker; +using Microsoft.DurableTask.Worker.AzureManaged; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get DTS connection string from environment variable +string dtsConnectionString = Environment.GetEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING") + ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Two agents used by the orchestration to demonstrate concurrent execution. +const string PhysicistName = "PhysicistAgent"; +const string PhysicistInstructions = "You are an expert in physics. You answer questions from a physics perspective."; + +const string ChemistName = "ChemistAgent"; +const string ChemistInstructions = "You are a middle school chemistry teacher. You answer questions so that middle school students can understand."; + +AIAgent physicistAgent = client.GetChatClient(deploymentName).AsAIAgent(PhysicistInstructions, PhysicistName); +AIAgent chemistAgent = client.GetChatClient(deploymentName).AsAIAgent(ChemistInstructions, ChemistName); + +// Orchestrator function +static async Task RunOrchestratorAsync(TaskOrchestrationContext context, string prompt) +{ + // Get both agents + DurableAIAgent physicist = context.GetAgent(PhysicistName); + DurableAIAgent chemist = context.GetAgent(ChemistName); + + // Start both agent runs concurrently + Task> physicistTask = physicist.RunAsync(prompt); + Task> chemistTask = chemist.RunAsync(prompt); + + // Wait for both tasks to complete using Task.WhenAll + await Task.WhenAll(physicistTask, chemistTask); + + // Get the results + TextResponse physicistResponse = (await physicistTask).Result; + TextResponse chemistResponse = (await chemistTask).Result; + + // Return the result as a structured, anonymous type + return new + { + physicist = physicistResponse.Text, + chemist = chemistResponse.Text, + }; +} + +// Configure the console app to host the AI agents. +IHost host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(loggingBuilder => loggingBuilder.SetMinimumLevel(LogLevel.Warning)) + .ConfigureServices(services => + { + services.ConfigureDurableAgents( + options => + { + options + .AddAIAgent(physicistAgent) + .AddAIAgent(chemistAgent); + }, + workerBuilder: builder => + { + builder.UseDurableTaskScheduler(dtsConnectionString); + builder.AddTasks( + registry => registry.AddOrchestratorFunc(nameof(RunOrchestratorAsync), RunOrchestratorAsync)); + }, + clientBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString)); + }) + .Build(); + +await host.StartAsync(); + +DurableTaskClient durableTaskClient = host.Services.GetRequiredService(); + +// Console colors for better UX +Console.ForegroundColor = ConsoleColor.Cyan; +Console.WriteLine("=== Multi-Agent Concurrent Orchestration Sample ==="); +Console.ResetColor(); +Console.WriteLine("Enter a question for the agents:"); +Console.WriteLine(); + +// Read prompt from stdin +string? prompt = Console.ReadLine(); +if (string.IsNullOrWhiteSpace(prompt)) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine("Error: Prompt is required."); + Console.ResetColor(); + Environment.Exit(1); + return; +} + +Console.WriteLine(); +Console.ForegroundColor = ConsoleColor.Gray; +Console.WriteLine("Starting orchestration..."); +Console.ResetColor(); + +try +{ + // Start the orchestration + string instanceId = await durableTaskClient.ScheduleNewOrchestrationInstanceAsync( + orchestratorName: nameof(RunOrchestratorAsync), + input: prompt); + + Console.ForegroundColor = ConsoleColor.Gray; + Console.WriteLine($"Orchestration started with instance ID: {instanceId}"); + Console.WriteLine("Waiting for completion..."); + Console.ResetColor(); + + // Wait for orchestration to complete + OrchestrationMetadata status = await durableTaskClient.WaitForInstanceCompletionAsync( + instanceId, + getInputsAndOutputs: true, + CancellationToken.None); + + Console.WriteLine(); + + if (status.RuntimeStatus == OrchestrationRuntimeStatus.Completed) + { + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine("✓ Orchestration completed successfully!"); + Console.ResetColor(); + Console.WriteLine(); + + // Parse the output + using JsonDocument doc = JsonDocument.Parse(status.SerializedOutput!); + JsonElement output = doc.RootElement; + + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("Physicist's response:"); + Console.ResetColor(); + Console.WriteLine(output.GetProperty("physicist").GetString()); + Console.WriteLine(); + + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("Chemist's response:"); + Console.ResetColor(); + Console.WriteLine(output.GetProperty("chemist").GetString()); + } + else if (status.RuntimeStatus == OrchestrationRuntimeStatus.Failed) + { + Console.ForegroundColor = ConsoleColor.Red; + Console.WriteLine("✗ Orchestration failed!"); + Console.ResetColor(); + if (status.FailureDetails != null) + { + Console.WriteLine($"Error: {status.FailureDetails.ErrorMessage}"); + } + Environment.Exit(1); + } + else + { + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine($"Orchestration status: {status.RuntimeStatus}"); + Console.ResetColor(); + } +} +catch (Exception ex) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"Error: {ex.Message}"); + Console.ResetColor(); + Environment.Exit(1); +} +finally +{ + await host.StopAsync(); +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/README.md b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/README.md new file mode 100644 index 0000000000..2ac1a504c8 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency/README.md @@ -0,0 +1,68 @@ +# Multi-Agent Concurrent Orchestration Sample + +This sample demonstrates how to use the durable agents extension to create a console app that orchestrates concurrent execution of multiple AI agents using durable orchestration. + +## Key Concepts Demonstrated + +- Running multiple agents concurrently in a single orchestration +- Using `Task.WhenAll` to wait for concurrent agent executions +- Combining results from multiple agents into a single response +- Waiting for orchestration completion using `WaitForInstanceCompletionAsync` + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/03_AgentOrchestration_Concurrency +dotnet run --framework net10.0 +``` + +The app will prompt you for a question: + +```text +=== Multi-Agent Concurrent Orchestration Sample === +Enter a question for the agents: + +What is temperature? +``` + +The orchestration will run both agents concurrently and display their responses: + +```text +Orchestration started with instance ID: 86313f1d45fb42eeb50b1852626bf3ff +Waiting for completion... + +✓ Orchestration completed successfully! + +Physicist's response: +Temperature is a measure of the average kinetic energy of particles in a system... + +Chemist's response: +From a chemistry perspective, temperature is crucial for chemical reactions... +``` + +Both agents run in parallel, and the orchestration waits for both to complete before returning the combined results. + +## Viewing Orchestration State + +You can view the state of the orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can see: + - **Orchestrations**: View the orchestration instance, including its runtime status, input, output, and execution history + - **Agents**: View the state of both the PhysicistAgent and ChemistAgent, including their individual conversation histories + +The orchestration instance ID is displayed in the console output. You can use this ID to find the specific orchestration in the dashboard and inspect how the concurrent agent executions were coordinated, including the timing of when each agent started and completed. + +## Scriptable Usage + +You can also pipe input to the app: + +```bash +echo "What is temperature?" | dotnet run +``` diff --git a/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/04_AgentOrchestration_Conditionals.csproj b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/04_AgentOrchestration_Conditionals.csproj new file mode 100644 index 0000000000..46e348dfec --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/04_AgentOrchestration_Conditionals.csproj @@ -0,0 +1,30 @@ + + + net10.0 + Exe + enable + enable + AgentOrchestration_Conditionals + AgentOrchestration_Conditionals + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/Models.cs b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/Models.cs new file mode 100644 index 0000000000..a39695d7d0 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/Models.cs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace AgentOrchestration_Conditionals; + +/// +/// Represents an email input for spam detection and response generation. +/// +public sealed class Email +{ + [JsonPropertyName("email_id")] + public string EmailId { get; set; } = string.Empty; + + [JsonPropertyName("email_content")] + public string EmailContent { get; set; } = string.Empty; +} + +/// +/// Represents the result of spam detection analysis. +/// +public sealed class DetectionResult +{ + [JsonPropertyName("is_spam")] + public bool IsSpam { get; set; } + + [JsonPropertyName("reason")] + public string Reason { get; set; } = string.Empty; +} + +/// +/// Represents a generated email response. +/// +public sealed class EmailResponse +{ + [JsonPropertyName("response")] + public string Response { get; set; } = string.Empty; +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/Program.cs b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/Program.cs new file mode 100644 index 0000000000..9e12f9192f --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/Program.cs @@ -0,0 +1,228 @@ +// Copyright (c) Microsoft. All rights reserved. + +using AgentOrchestration_Conditionals; +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.DurableTask; +using Microsoft.DurableTask.Client; +using Microsoft.DurableTask.Client.AzureManaged; +using Microsoft.DurableTask.Worker; +using Microsoft.DurableTask.Worker.AzureManaged; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get DTS connection string from environment variable +string dtsConnectionString = Environment.GetEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING") + ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Spam detection agent +const string SpamDetectionAgentName = "SpamDetectionAgent"; +const string SpamDetectionAgentInstructions = + """ + You are an expert email spam detection system. Analyze emails and determine if they are spam. + Return your analysis as JSON with 'is_spam' (boolean) and 'reason' (string) fields. + """; + +// Email assistant agent +const string EmailAssistantAgentName = "EmailAssistantAgent"; +const string EmailAssistantAgentInstructions = + """ + You are a professional email assistant. Draft professional, courteous, and helpful email responses. + Return your response as JSON with a 'response' field containing the reply. + """; + +AIAgent spamDetectionAgent = client.GetChatClient(deploymentName).AsAIAgent(SpamDetectionAgentInstructions, SpamDetectionAgentName); +AIAgent emailAssistantAgent = client.GetChatClient(deploymentName).AsAIAgent(EmailAssistantAgentInstructions, EmailAssistantAgentName); + +// Orchestrator function +static async Task RunOrchestratorAsync(TaskOrchestrationContext context, Email email) +{ + // Get the spam detection agent + DurableAIAgent spamDetectionAgent = context.GetAgent(SpamDetectionAgentName); + AgentThread spamThread = await spamDetectionAgent.GetNewThreadAsync(); + + // Step 1: Check if the email is spam + AgentResponse spamDetectionResponse = await spamDetectionAgent.RunAsync( + message: + $""" + Analyze this email for spam content and return a JSON response with 'is_spam' (boolean) and 'reason' (string) fields: + Email ID: {email.EmailId} + Content: {email.EmailContent} + """, + thread: spamThread); + DetectionResult result = spamDetectionResponse.Result; + + // Step 2: Conditional logic based on spam detection result + if (result.IsSpam) + { + // Handle spam email + return await context.CallActivityAsync(nameof(HandleSpamEmail), result.Reason); + } + + // Generate and send response for legitimate email + DurableAIAgent emailAssistantAgent = context.GetAgent(EmailAssistantAgentName); + AgentThread emailThread = await emailAssistantAgent.GetNewThreadAsync(); + + AgentResponse emailAssistantResponse = await emailAssistantAgent.RunAsync( + message: + $""" + Draft a professional response to this email. Return a JSON response with a 'response' field containing the reply: + + Email ID: {email.EmailId} + Content: {email.EmailContent} + """, + thread: emailThread); + + EmailResponse emailResponse = emailAssistantResponse.Result; + + return await context.CallActivityAsync(nameof(SendEmail), emailResponse.Response); +} + +// Activity functions +static void HandleSpamEmail(TaskActivityContext context, string reason) +{ + Console.WriteLine($"Email marked as spam: {reason}"); +} + +static void SendEmail(TaskActivityContext context, string message) +{ + Console.WriteLine($"Email sent: {message}"); +} + +// Configure the console app to host the AI agents. +IHost host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(loggingBuilder => loggingBuilder.SetMinimumLevel(LogLevel.Warning)) + .ConfigureServices(services => + { + services.ConfigureDurableAgents( + options => + { + options + .AddAIAgent(spamDetectionAgent) + .AddAIAgent(emailAssistantAgent); + }, + workerBuilder: builder => + { + builder.UseDurableTaskScheduler(dtsConnectionString); + builder.AddTasks(registry => + { + registry.AddOrchestratorFunc(nameof(RunOrchestratorAsync), RunOrchestratorAsync); + registry.AddActivityFunc(nameof(HandleSpamEmail), HandleSpamEmail); + registry.AddActivityFunc(nameof(SendEmail), SendEmail); + }); + }, + clientBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString)); + }) + .Build(); + +await host.StartAsync(); + +DurableTaskClient durableTaskClient = host.Services.GetRequiredService(); + +// Console colors for better UX +Console.ForegroundColor = ConsoleColor.Cyan; +Console.WriteLine("=== Multi-Agent Conditional Orchestration Sample ==="); +Console.ResetColor(); +Console.WriteLine("Enter email content:"); +Console.WriteLine(); + +// Read email content from stdin +string? emailContent = Console.ReadLine(); +if (string.IsNullOrWhiteSpace(emailContent)) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine("Error: Email content is required."); + Console.ResetColor(); + Environment.Exit(1); + return; +} + +// Generate email ID automatically +Email email = new() +{ + EmailId = $"email-{Guid.NewGuid():N}", + EmailContent = emailContent +}; + +Console.WriteLine(); +Console.ForegroundColor = ConsoleColor.Gray; +Console.WriteLine("Starting orchestration..."); +Console.ResetColor(); + +try +{ + // Start the orchestration + string instanceId = await durableTaskClient.ScheduleNewOrchestrationInstanceAsync( + orchestratorName: nameof(RunOrchestratorAsync), + input: email); + + Console.ForegroundColor = ConsoleColor.Gray; + Console.WriteLine($"Orchestration started with instance ID: {instanceId}"); + Console.WriteLine("Waiting for completion..."); + Console.ResetColor(); + + // Wait for orchestration to complete + OrchestrationMetadata status = await durableTaskClient.WaitForInstanceCompletionAsync( + instanceId, + getInputsAndOutputs: true, + CancellationToken.None); + + Console.WriteLine(); + + if (status.RuntimeStatus == OrchestrationRuntimeStatus.Completed) + { + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine("✓ Orchestration completed successfully!"); + Console.ResetColor(); + Console.WriteLine(); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.Write("Result: "); + Console.ResetColor(); + Console.WriteLine(status.ReadOutputAs()); + } + else if (status.RuntimeStatus == OrchestrationRuntimeStatus.Failed) + { + Console.ForegroundColor = ConsoleColor.Red; + Console.WriteLine("✗ Orchestration failed!"); + Console.ResetColor(); + if (status.FailureDetails != null) + { + Console.WriteLine($"Error: {status.FailureDetails.ErrorMessage}"); + } + Environment.Exit(1); + } + else + { + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine($"Orchestration status: {status.RuntimeStatus}"); + Console.ResetColor(); + } +} +catch (Exception ex) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"Error: {ex.Message}"); + Console.ResetColor(); + Environment.Exit(1); +} +finally +{ + await host.StopAsync(); +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/README.md b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/README.md new file mode 100644 index 0000000000..646e5eda4e --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals/README.md @@ -0,0 +1,95 @@ +# Multi-Agent Conditional Orchestration Sample + +This sample demonstrates how to use the durable agents extension to create a console app that orchestrates multiple AI agents with conditional logic based on the results of previous agent interactions. + +## Key Concepts Demonstrated + +- Multi-agent orchestration with conditional branching +- Using agent responses to determine workflow paths +- Activity functions for non-agent operations +- Waiting for orchestration completion using `WaitForInstanceCompletionAsync` + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/04_AgentOrchestration_Conditionals +dotnet run --framework net10.0 +``` + +The app will prompt you for email content. You can test both legitimate emails and spam emails: + +### Testing with a Legitimate Email + +```text +=== Multi-Agent Conditional Orchestration Sample === +Enter email content: + +Hi John, I hope you're doing well. I wanted to follow up on our meeting yesterday about the quarterly report. Could you please send me the updated figures by Friday? Thanks! +``` + +The orchestration will analyze the email and display the result: + +```text +Orchestration started with instance ID: 86313f1d45fb42eeb50b1852626bf3ff +Waiting for completion... + +✓ Orchestration completed successfully! + +Result: Email sent: Thank you for your email. I'll prepare the updated figures... +``` + +### Testing with a Spam Email + +```text +=== Multi-Agent Conditional Orchestration Sample === +Enter email content: + +URGENT! You've won $1,000,000! Click here now to claim your prize! Limited time offer! Don't miss out! +``` + +The orchestration will detect it as spam and display: + +```text +Orchestration started with instance ID: 86313f1d45fb42eeb50b1852626bf3ff +Waiting for completion... + +✓ Orchestration completed successfully! + +Result: Email marked as spam: Contains suspicious claims about winning money and urgent action requests... +``` + +## Scriptable Usage + +You can also pipe email content to the app: + +```bash +# Test with a legitimate email +echo "Hi John, I hope you're doing well..." | dotnet run + +# Test with a spam email +echo "URGENT! You've won $1,000,000! Click here now!" | dotnet run +``` + +The orchestration will proceed as follows: + +1. The SpamDetectionAgent analyzes the email to determine if it's spam +2. Based on the result: + - If spam: The orchestration calls the `HandleSpamEmail` activity function + - If not spam: The EmailAssistantAgent drafts a response, then the `SendEmail` activity function is called + +## Viewing Orchestration State + +You can view the state of the orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can see: + - **Orchestrations**: View the orchestration instance, including its runtime status, input, output, and execution history + - **Agents**: View the state of both the SpamDetectionAgent and EmailAssistantAgent + +The orchestration instance ID is displayed in the console output. You can use this ID to find the specific orchestration in the dashboard and inspect the conditional branching logic, including which path was taken based on the spam detection result. diff --git a/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/05_AgentOrchestration_HITL.csproj b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/05_AgentOrchestration_HITL.csproj new file mode 100644 index 0000000000..21db94a33f --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/05_AgentOrchestration_HITL.csproj @@ -0,0 +1,30 @@ + + + net10.0 + Exe + enable + enable + AgentOrchestration_HITL + AgentOrchestration_HITL + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/Models.cs b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/Models.cs new file mode 100644 index 0000000000..1eaf1407eb --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/Models.cs @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace AgentOrchestration_HITL; + +/// +/// Represents the input for the Human-in-the-Loop content generation workflow. +/// +public sealed class ContentGenerationInput +{ + [JsonPropertyName("topic")] + public string Topic { get; set; } = string.Empty; + + [JsonPropertyName("max_review_attempts")] + public int MaxReviewAttempts { get; set; } = 3; + + [JsonPropertyName("approval_timeout_hours")] + public float ApprovalTimeoutHours { get; set; } = 72; +} + +/// +/// Represents the content generated by the writer agent. +/// +public sealed class GeneratedContent +{ + [JsonPropertyName("title")] + public string Title { get; set; } = string.Empty; + + [JsonPropertyName("content")] + public string Content { get; set; } = string.Empty; +} + +/// +/// Represents the human approval response. +/// +public sealed class HumanApprovalResponse +{ + [JsonPropertyName("approved")] + public bool Approved { get; set; } + + [JsonPropertyName("feedback")] + public string Feedback { get; set; } = string.Empty; +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/Program.cs b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/Program.cs new file mode 100644 index 0000000000..2369e6a364 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/Program.cs @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using AgentOrchestration_HITL; +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.DurableTask; +using Microsoft.DurableTask.Client; +using Microsoft.DurableTask.Client.AzureManaged; +using Microsoft.DurableTask.Worker; +using Microsoft.DurableTask.Worker.AzureManaged; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get DTS connection string from environment variable +string dtsConnectionString = Environment.GetEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING") + ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Single agent used by the orchestration to demonstrate human-in-the-loop workflow. +const string WriterName = "WriterAgent"; +const string WriterInstructions = + """ + You are a professional content writer who creates high-quality articles on various topics. + You write engaging, informative, and well-structured content that follows best practices for readability and accuracy. + """; + +AIAgent writerAgent = client.GetChatClient(deploymentName).AsAIAgent(WriterInstructions, WriterName); + +// Orchestrator function +static async Task RunOrchestratorAsync(TaskOrchestrationContext context, ContentGenerationInput input) +{ + // Get the writer agent + DurableAIAgent writerAgent = context.GetAgent("WriterAgent"); + AgentThread writerThread = await writerAgent.GetNewThreadAsync(); + + // Set initial status + context.SetCustomStatus($"Starting content generation for topic: {input.Topic}"); + + // Step 1: Generate initial content + AgentResponse writerResponse = await writerAgent.RunAsync( + message: $"Write a short article about '{input.Topic}' in less than 300 words.", + thread: writerThread); + GeneratedContent content = writerResponse.Result; + + // Human-in-the-loop iteration - we set a maximum number of attempts to avoid infinite loops + int iterationCount = 0; + while (iterationCount++ < input.MaxReviewAttempts) + { + context.SetCustomStatus( + $"Requesting human feedback. Iteration #{iterationCount}. Timeout: {input.ApprovalTimeoutHours} hour(s)."); + + // Step 2: Notify user to review the content + await context.CallActivityAsync(nameof(NotifyUserForApproval), content); + + // Step 3: Wait for human feedback with configurable timeout + HumanApprovalResponse humanResponse; + try + { + humanResponse = await context.WaitForExternalEvent( + eventName: "HumanApproval", + timeout: TimeSpan.FromHours(input.ApprovalTimeoutHours)); + } + catch (OperationCanceledException) + { + // Timeout occurred - treat as rejection + context.SetCustomStatus( + $"Human approval timed out after {input.ApprovalTimeoutHours} hour(s). Treating as rejection."); + throw new TimeoutException($"Human approval timed out after {input.ApprovalTimeoutHours} hour(s)."); + } + + if (humanResponse.Approved) + { + context.SetCustomStatus("Content approved by human reviewer. Publishing content..."); + + // Step 4: Publish the approved content + await context.CallActivityAsync(nameof(PublishContent), content); + + context.SetCustomStatus($"Content published successfully at {context.CurrentUtcDateTime:s}"); + return new { content = content.Content }; + } + + context.SetCustomStatus("Content rejected by human reviewer. Incorporating feedback and regenerating..."); + + // Incorporate human feedback and regenerate + writerResponse = await writerAgent.RunAsync( + message: $""" + The content was rejected by a human reviewer. Please rewrite the article incorporating their feedback. + + Human Feedback: {humanResponse.Feedback} + """, + thread: writerThread); + + content = writerResponse.Result; + } + + // If we reach here, it means we exhausted the maximum number of iterations + throw new InvalidOperationException( + $"Content could not be approved after {input.MaxReviewAttempts} iterations."); +} + +// Activity functions +static void NotifyUserForApproval(TaskActivityContext context, GeneratedContent content) +{ + // In a real implementation, this would send notifications via email, SMS, etc. + Console.WriteLine( + $""" + NOTIFICATION: Please review the following content for approval: + Title: {content.Title} + Content: {content.Content} + Use the approval endpoint to approve or reject this content. + """); +} + +static void PublishContent(TaskActivityContext context, GeneratedContent content) +{ + // In a real implementation, this would publish to a CMS, website, etc. + Console.WriteLine( + $""" + PUBLISHING: Content has been published successfully. + Title: {content.Title} + Content: {content.Content} + """); +} + +// Configure the console app to host the AI agent. +IHost host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(loggingBuilder => loggingBuilder.SetMinimumLevel(LogLevel.Warning)) + .ConfigureServices(services => + { + services.ConfigureDurableAgents( + options => options.AddAIAgent(writerAgent), + workerBuilder: builder => + { + builder.UseDurableTaskScheduler(dtsConnectionString); + builder.AddTasks(registry => + { + registry.AddOrchestratorFunc(nameof(RunOrchestratorAsync), RunOrchestratorAsync); + registry.AddActivityFunc(nameof(NotifyUserForApproval), NotifyUserForApproval); + registry.AddActivityFunc(nameof(PublishContent), PublishContent); + }); + }, + clientBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString)); + }) + .Build(); + +await host.StartAsync(); + +DurableTaskClient durableTaskClient = host.Services.GetRequiredService(); + +// Console colors for better UX +Console.ForegroundColor = ConsoleColor.Cyan; +Console.WriteLine("=== Human-in-the-Loop Orchestration Sample ==="); +Console.ResetColor(); +Console.WriteLine("Enter topic for content generation:"); +Console.WriteLine(); + +// Read topic from stdin +string? topic = Console.ReadLine(); +if (string.IsNullOrWhiteSpace(topic)) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine("Error: Topic is required."); + Console.ResetColor(); + Environment.Exit(1); + return; +} + +// Prompt for optional parameters with defaults +Console.WriteLine(); +Console.WriteLine("Max review attempts (default: 3):"); +string? maxAttemptsInput = Console.ReadLine(); +int maxReviewAttempts = int.TryParse(maxAttemptsInput, out int maxAttempts) && maxAttempts > 0 + ? maxAttempts + : 3; + +Console.WriteLine("Approval timeout in hours (default: 72):"); +string? timeoutInput = Console.ReadLine(); +float approvalTimeoutHours = float.TryParse(timeoutInput, out float timeout) && timeout > 0 + ? timeout + : 72; + +ContentGenerationInput input = new() +{ + Topic = topic, + MaxReviewAttempts = maxReviewAttempts, + ApprovalTimeoutHours = approvalTimeoutHours +}; + +Console.WriteLine(); +Console.ForegroundColor = ConsoleColor.Gray; +Console.WriteLine("Starting orchestration..."); +Console.ResetColor(); + +try +{ + // Start the orchestration + string instanceId = await durableTaskClient.ScheduleNewOrchestrationInstanceAsync( + orchestratorName: nameof(RunOrchestratorAsync), + input: input); + + Console.ForegroundColor = ConsoleColor.Gray; + Console.WriteLine($"Orchestration started with instance ID: {instanceId}"); + Console.WriteLine("Waiting for human approval..."); + Console.ResetColor(); + Console.WriteLine(); + + // Monitor orchestration status and handle approval prompts + using CancellationTokenSource cts = new(); + Task orchestrationTask = Task.Run(async () => + { + while (!cts.Token.IsCancellationRequested) + { + OrchestrationMetadata? status = await durableTaskClient.GetInstanceAsync( + instanceId, + getInputsAndOutputs: true, + cts.Token); + + if (status == null) + { + await Task.Delay(TimeSpan.FromSeconds(1), cts.Token); + continue; + } + + // Check if we're waiting for approval + if (status.SerializedCustomStatus != null) + { + string? customStatus = status.ReadCustomStatusAs(); + if (customStatus?.StartsWith("Requesting human feedback", StringComparison.OrdinalIgnoreCase) == true) + { + // Prompt user for approval + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("Content is ready for review. Check the logs above for details."); + Console.Write("Approve? (y/n): "); + Console.ResetColor(); + + string? approvalInput = Console.ReadLine(); + bool approved = approvalInput?.Trim().Equals("y", StringComparison.OrdinalIgnoreCase) == true; + + Console.Write("Feedback (optional): "); + string? feedback = Console.ReadLine() ?? ""; + + HumanApprovalResponse approvalResponse = new() + { + Approved = approved, + Feedback = feedback + }; + + await durableTaskClient.RaiseEventAsync(instanceId, "HumanApproval", approvalResponse); + } + } + + if (status.RuntimeStatus is OrchestrationRuntimeStatus.Completed or OrchestrationRuntimeStatus.Failed or OrchestrationRuntimeStatus.Terminated) + { + break; + } + + await Task.Delay(TimeSpan.FromSeconds(1), cts.Token); + } + }, cts.Token); + + // Wait for orchestration to complete + OrchestrationMetadata finalStatus = await durableTaskClient.WaitForInstanceCompletionAsync( + instanceId, + getInputsAndOutputs: true, + CancellationToken.None); + + cts.Cancel(); + await orchestrationTask; + + Console.WriteLine(); + + if (finalStatus.RuntimeStatus == OrchestrationRuntimeStatus.Completed) + { + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine("✓ Orchestration completed successfully!"); + Console.ResetColor(); + Console.WriteLine(); + + JsonElement output = finalStatus.ReadOutputAs(); + if (output.TryGetProperty("content", out JsonElement contentElement)) + { + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("Published content:"); + Console.ResetColor(); + Console.WriteLine(contentElement.GetString()); + } + } + else if (finalStatus.RuntimeStatus == OrchestrationRuntimeStatus.Failed) + { + Console.ForegroundColor = ConsoleColor.Red; + Console.WriteLine("✗ Orchestration failed!"); + Console.ResetColor(); + if (finalStatus.FailureDetails != null) + { + Console.WriteLine($"Error: {finalStatus.FailureDetails.ErrorMessage}"); + } + Environment.Exit(1); + } + else + { + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine($"Orchestration status: {finalStatus.RuntimeStatus}"); + Console.ResetColor(); + } +} +catch (Exception ex) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"Error: {ex.Message}"); + Console.ResetColor(); + Environment.Exit(1); +} +finally +{ + await host.StopAsync(); +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/README.md b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/README.md new file mode 100644 index 0000000000..1386dfbcb1 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL/README.md @@ -0,0 +1,73 @@ +# Human-in-the-Loop Orchestration Sample + +This sample demonstrates how to use the durable agents extension to create a console app that implements a human-in-the-loop workflow using durable orchestration, including interactive approval prompts. + +## Key Concepts Demonstrated + +- Human-in-the-loop workflows with durable orchestration +- External event handling for human approval/rejection +- Timeout handling for approval requests +- Iterative content refinement based on human feedback + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/05_AgentOrchestration_HITL +dotnet run --framework net10.0 +``` + +The app will prompt you for input: + +```text +=== Human-in-the-Loop Orchestration Sample === +Enter topic for content generation: + +The Future of Artificial Intelligence + +Max review attempts (default: 3): +3 +Approval timeout in hours (default: 72): +72 +``` + +The orchestration will generate content and prompt you for approval: + +```text +Orchestration started with instance ID: 86313f1d45fb42eeb50b1852626bf3ff + +=== NOTIFICATION: Content Ready for Review === +Title: The Future of Artificial Intelligence + +Content: +[Generated content appears here] + +Please review the content above and provide your approval. + +Content is ready for review. Check the logs above for details. +Approve? (y/n): n +Feedback (optional): Please add more details about the ethical implications. +``` + +The orchestration will incorporate your feedback and regenerate the content. Once approved, it will publish and complete. + +## Viewing Orchestration State + +You can view the state of the orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can see: + - **Orchestrations**: View the orchestration instance, including its runtime status, custom status (which shows approval state), input, output, and execution history + - **Agents**: View the state of the WriterAgent, including conversation history + +The orchestration instance ID is displayed in the console output. You can use this ID to find the specific orchestration in the dashboard and inspect: + +- The custom status field, which shows the current state of the approval workflow +- When the orchestration is waiting for external events +- The iteration count and feedback history +- The final published content diff --git a/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/06_LongRunningTools.csproj b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/06_LongRunningTools.csproj new file mode 100644 index 0000000000..d7557dbdfc --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/06_LongRunningTools.csproj @@ -0,0 +1,30 @@ + + + net10.0 + Exe + enable + enable + LongRunningTools + LongRunningTools + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/Models.cs b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/Models.cs new file mode 100644 index 0000000000..43ab9d99f8 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/Models.cs @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace LongRunningTools; + +/// +/// Represents the input for the content generation workflow. +/// +public sealed class ContentGenerationInput +{ + [JsonPropertyName("topic")] + public string Topic { get; set; } = string.Empty; + + [JsonPropertyName("max_review_attempts")] + public int MaxReviewAttempts { get; set; } = 3; + + [JsonPropertyName("approval_timeout_hours")] + public float ApprovalTimeoutHours { get; set; } = 72; +} + +/// +/// Represents the content generated by the writer agent. +/// +public sealed class GeneratedContent +{ + [JsonPropertyName("title")] + public string Title { get; set; } = string.Empty; + + [JsonPropertyName("content")] + public string Content { get; set; } = string.Empty; +} + +/// +/// Represents the human feedback response. +/// +public sealed class HumanFeedbackResponse +{ + [JsonPropertyName("approved")] + public bool Approved { get; set; } + + [JsonPropertyName("feedback")] + public string Feedback { get; set; } = string.Empty; +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/Program.cs b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/Program.cs new file mode 100644 index 0000000000..e429d9ca9b --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/Program.cs @@ -0,0 +1,351 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.ComponentModel; +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using LongRunningTools; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.DurableTask; +using Microsoft.DurableTask.Client; +using Microsoft.DurableTask.Client.AzureManaged; +using Microsoft.DurableTask.Worker; +using Microsoft.DurableTask.Worker.AzureManaged; +using Microsoft.Extensions.AI; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get DTS connection string from environment variable +string dtsConnectionString = Environment.GetEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING") + ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Agent used by the orchestration to write content. +const string WriterAgentName = "Writer"; +const string WriterAgentInstructions = + """ + You are a professional content writer who creates high-quality articles on various topics. + You write engaging, informative, and well-structured content that follows best practices for readability and accuracy. + """; + +AIAgent writerAgent = client.GetChatClient(deploymentName).AsAIAgent(WriterAgentInstructions, WriterAgentName); + +// Agent that can start content generation workflows using tools +const string PublisherAgentName = "Publisher"; +const string PublisherAgentInstructions = + """ + You are a publishing agent that can manage content generation workflows. + You have access to tools to start, monitor, and raise events for content generation workflows. + """; + +const string HumanFeedbackEventName = "HumanFeedback"; + +// Orchestrator function +static async Task RunOrchestratorAsync(TaskOrchestrationContext context, ContentGenerationInput input) +{ + // Get the writer agent + DurableAIAgent writerAgent = context.GetAgent(WriterAgentName); + AgentThread writerThread = await writerAgent.GetNewThreadAsync(); + + // Set initial status + context.SetCustomStatus($"Starting content generation for topic: {input.Topic}"); + + // Step 1: Generate initial content + AgentResponse writerResponse = await writerAgent.RunAsync( + message: $"Write a short article about '{input.Topic}'.", + thread: writerThread); + GeneratedContent content = writerResponse.Result; + + // Human-in-the-loop iteration - we set a maximum number of attempts to avoid infinite loops + int iterationCount = 0; + while (iterationCount++ < input.MaxReviewAttempts) + { + context.SetCustomStatus( + new + { + message = "Requesting human feedback.", + approvalTimeoutHours = input.ApprovalTimeoutHours, + iterationCount, + content + }); + + // Step 2: Notify user to review the content + await context.CallActivityAsync(nameof(NotifyUserForApproval), content); + + // Step 3: Wait for human feedback with configurable timeout + HumanFeedbackResponse humanResponse; + try + { + humanResponse = await context.WaitForExternalEvent( + eventName: HumanFeedbackEventName, + timeout: TimeSpan.FromHours(input.ApprovalTimeoutHours)); + } + catch (OperationCanceledException) + { + // Timeout occurred - treat as rejection + context.SetCustomStatus( + new + { + message = $"Human approval timed out after {input.ApprovalTimeoutHours} hour(s). Treating as rejection.", + iterationCount, + content + }); + throw new TimeoutException($"Human approval timed out after {input.ApprovalTimeoutHours} hour(s)."); + } + + if (humanResponse.Approved) + { + context.SetCustomStatus(new + { + message = "Content approved by human reviewer. Publishing content...", + content + }); + + // Step 4: Publish the approved content + await context.CallActivityAsync(nameof(PublishContent), content); + + context.SetCustomStatus(new + { + message = $"Content published successfully at {context.CurrentUtcDateTime:s}", + humanFeedback = humanResponse, + content + }); + return new { content = content.Content }; + } + + context.SetCustomStatus(new + { + message = "Content rejected by human reviewer. Incorporating feedback and regenerating...", + humanFeedback = humanResponse, + content + }); + + // Incorporate human feedback and regenerate + writerResponse = await writerAgent.RunAsync( + message: $""" + The content was rejected by a human reviewer. Please rewrite the article incorporating their feedback. + + Human Feedback: {humanResponse.Feedback} + """, + thread: writerThread); + + content = writerResponse.Result; + } + + // If we reach here, it means we exhausted the maximum number of iterations + throw new InvalidOperationException( + $"Content could not be approved after {input.MaxReviewAttempts} iterations."); +} + +// Activity functions +static void NotifyUserForApproval(TaskActivityContext context, GeneratedContent content) +{ + // In a real implementation, this would send notifications via email, SMS, etc. + Console.ForegroundColor = ConsoleColor.DarkMagenta; + Console.WriteLine( + $""" + NOTIFICATION: Please review the following content for approval: + Title: {content.Title} + Content: {content.Content} + """); + Console.ResetColor(); +} + +static void PublishContent(TaskActivityContext context, GeneratedContent content) +{ + // In a real implementation, this would publish to a CMS, website, etc. + Console.ForegroundColor = ConsoleColor.DarkMagenta; + Console.WriteLine( + $""" + PUBLISHING: Content has been published successfully. + Title: {content.Title} + Content: {content.Content} + """); + Console.ResetColor(); +} + +// Tools that demonstrate starting orchestrations from agent tool calls. +[Description("Starts a content generation workflow and returns the instance ID for tracking.")] +static string StartContentGenerationWorkflow([Description("The topic for content generation")] string topic) +{ + const int MaxReviewAttempts = 3; + const float ApprovalTimeoutHours = 72; + + // Schedule the orchestration, which will start running after the tool call completes. + string instanceId = DurableAgentContext.Current.ScheduleNewOrchestration( + name: nameof(RunOrchestratorAsync), + input: new ContentGenerationInput + { + Topic = topic, + MaxReviewAttempts = MaxReviewAttempts, + ApprovalTimeoutHours = ApprovalTimeoutHours + }); + + return $"Workflow started with instance ID: {instanceId}"; +} + +[Description("Gets the status of a workflow orchestration and returns a summary of the workflow's current status.")] +static async Task GetWorkflowStatusAsync( + [Description("The instance ID of the workflow to check")] string instanceId, + [Description("Whether to include detailed information")] bool includeDetails = true) +{ + // Get the current agent context using the thread-static property + OrchestrationMetadata? status = await DurableAgentContext.Current.GetOrchestrationStatusAsync( + instanceId, + includeDetails); + + if (status is null) + { + return new + { + instanceId, + error = $"Workflow instance '{instanceId}' not found.", + }; + } + + return new + { + instanceId = status.InstanceId, + createdAt = status.CreatedAt, + executionStatus = status.RuntimeStatus, + workflowStatus = status.SerializedCustomStatus, + lastUpdatedAt = status.LastUpdatedAt, + failureDetails = status.FailureDetails + }; +} + +[Description( + "Raises a feedback event for the content generation workflow. If approved, the workflow will be published. " + + "If rejected, the workflow will generate new content.")] +static async Task SubmitHumanFeedbackAsync( + [Description("The instance ID of the workflow to submit feedback for")] string instanceId, + [Description("Feedback to submit")] HumanFeedbackResponse feedback) +{ + await DurableAgentContext.Current.RaiseOrchestrationEventAsync(instanceId, HumanFeedbackEventName, feedback); +} + +// Configure the console app to host the AI agents. +IHost host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(loggingBuilder => loggingBuilder.SetMinimumLevel(LogLevel.Warning)) + .ConfigureServices(services => + { + services.ConfigureDurableAgents( + options => + { + // Add the writer agent used by the orchestration + options.AddAIAgent(writerAgent); + + // Define the agent that can start orchestrations from tool calls + options.AddAIAgentFactory(PublisherAgentName, sp => + { + return client.GetChatClient(deploymentName).AsAIAgent( + instructions: PublisherAgentInstructions, + name: PublisherAgentName, + services: sp, + tools: [ + AIFunctionFactory.Create(StartContentGenerationWorkflow), + AIFunctionFactory.Create(GetWorkflowStatusAsync), + AIFunctionFactory.Create(SubmitHumanFeedbackAsync), + ]); + }); + }, + workerBuilder: builder => + { + builder.UseDurableTaskScheduler(dtsConnectionString); + builder.AddTasks(registry => + { + registry.AddOrchestratorFunc(nameof(RunOrchestratorAsync), RunOrchestratorAsync); + registry.AddActivityFunc(nameof(NotifyUserForApproval), NotifyUserForApproval); + registry.AddActivityFunc(nameof(PublishContent), PublishContent); + }); + }, + clientBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString)); + }) + .Build(); + +await host.StartAsync(); + +// Get the agent proxy from services +IServiceProvider services = host.Services; +AIAgent? agentProxy = services.GetKeyedService(PublisherAgentName); +if (agentProxy == null) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine("Agent 'Publisher' not found."); + Console.ResetColor(); + Environment.Exit(1); + return; +} + +// Console colors for better UX +Console.ForegroundColor = ConsoleColor.Cyan; +Console.WriteLine("=== Long Running Tools Sample ==="); +Console.ResetColor(); +Console.WriteLine("Enter a topic for the Publisher agent to write about (or 'exit' to quit):"); +Console.WriteLine(); + +// Create a thread for the conversation +AgentThread thread = await agentProxy.GetNewThreadAsync(); + +using CancellationTokenSource cts = new(); +Console.CancelKeyPress += (sender, e) => +{ + e.Cancel = true; + cts.Cancel(); +}; + +while (!cts.Token.IsCancellationRequested) +{ + // Read input from stdin + Console.ForegroundColor = ConsoleColor.Yellow; + Console.Write("You: "); + Console.ResetColor(); + + string? input = Console.ReadLine(); + if (string.IsNullOrWhiteSpace(input) || input.Equals("exit", StringComparison.OrdinalIgnoreCase)) + { + break; + } + + // Run the agent + Console.ForegroundColor = ConsoleColor.Green; + Console.Write("Publisher: "); + Console.ResetColor(); + + try + { + AgentResponse agentResponse = await agentProxy.RunAsync( + message: input, + thread: thread, + cancellationToken: cts.Token); + + Console.WriteLine(agentResponse.Text); + Console.WriteLine(); + } + catch (Exception ex) + { + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"Error: {ex.Message}"); + Console.ResetColor(); + Console.WriteLine(); + } + + Console.WriteLine("(Press Enter to prompt the Publisher agent again)"); + _ = Console.ReadLine(); +} + +await host.StopAsync(); diff --git a/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/README.md b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/README.md new file mode 100644 index 0000000000..b0dd69b129 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools/README.md @@ -0,0 +1,90 @@ +# Long Running Tools Sample + +This sample demonstrates how to use the durable agents extension to create a console app with agents that have long running tools. This sample builds on the [05_AgentOrchestration_HITL](../05_AgentOrchestration_HITL) sample by adding a publisher agent that can start and manage content generation workflows. A key difference is that the publisher agent knows the IDs of the workflows it starts, so it can check the status of the workflows and approve or reject them without being explicitly given the context (instance IDs, etc). + +## Key Concepts Demonstrated + +The same key concepts as the [05_AgentOrchestration_HITL](../05_AgentOrchestration_HITL) sample are demonstrated, but with the following additional concepts: + +- **Long running tools**: Using `DurableAgentContext.Current` to start orchestrations from tool calls +- **Multi-agent orchestration**: Agents can start and manage workflows that orchestrate other agents +- **Human-in-the-loop (with delegation)**: The agent acts as an intermediary between the human and the workflow. The human remains in the loop, but delegates to the agent to start the workflow and approve or reject the content. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/06_LongRunningTools +dotnet run --framework net10.0 +``` + +The app will prompt you for input. You can interact with the Publisher agent: + +```text +=== Long Running Tools Sample === +Enter a topic for the Publisher agent to write about (or 'exit' to quit): + +You: Start a content generation workflow for the topic 'The Future of Artificial Intelligence' +Publisher: The content generation workflow for the topic "The Future of Artificial Intelligence" has been successfully started, and the instance ID is **6a04276e8d824d8d941e1dc4142cc254**. If you need any further assistance or updates on the workflow, feel free to ask! +``` + +Behind the scenes, the publisher agent will: + +1. Start the content generation workflow via a tool call +2. The workflow will generate initial content using the Writer agent and wait for human approval, which will be visible in the terminal + +Once the workflow is waiting for human approval, you can send approval or rejection by prompting the publisher agent accordingly. + +> [!NOTE] +> You must press Enter after each message to continue the conversation. The sample is set up this way because the workflow is running in the background and may write to the console asynchronously. + +To tell the agent to rewrite the content with feedback, you can prompt it to reject the content with feedback. + +```text +You: Reject the content with feedback: The article needs more technical depth and better examples. +Publisher: The content has been successfully rejected with the feedback: "The article needs more technical depth and better examples." The workflow will now generate new content based on this feedback. +``` + +Once you're satisfied with the content, you can approve it for publishing. + +```text +You: Approve the content +Publisher: The content has been successfully approved for publishing. If you need any more assistance or have further requests, feel free to let me know! +``` + +Once the workflow has completed, you can get the status by prompting the publisher agent to give you the status. + +```text +You: Get the status of the workflow you previously started +Publisher: The status of the workflow with instance ID **6a04276e8d824d8d941e1dc4142cc254** is as follows: + +- **Execution Status:** Completed +- **Created At:** December 22, 2025, 23:08:13 UTC +- **Last Updated At:** December 22, 2025, 23:09:59 UTC +- **Workflow Status:** + - Message: Content published successfully at December 22, 2025, 23:09:59 UTC + - Human Feedback: Approved +``` + +## Viewing Agent and Orchestration State + +You can view the state of both the agent and the orchestrations it starts in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can see: + - **Agents**: View the state of the Publisher agent, including its conversation history and tool call history + - **Orchestrations**: View the content generation orchestration instances that were started by the agent via tool calls, including their runtime status, custom status, input, output, and execution history + +When the publisher agent starts a workflow, the orchestration instance ID is included in the agent's response. You can use this ID to find the specific orchestration in the dashboard and inspect: + +- The orchestration's execution progress +- When it's waiting for human approval (visible in custom status) +- The content generation workflow state +- The WriterAgent state within the orchestration + +This demonstrates how agents can manage long-running workflows and how you can monitor both the agent's state and the workflows it orchestrates. diff --git a/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/07_ReliableStreaming.csproj b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/07_ReliableStreaming.csproj new file mode 100644 index 0000000000..09c6a8cdd3 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/07_ReliableStreaming.csproj @@ -0,0 +1,31 @@ + + + net10.0 + Exe + enable + enable + ReliableStreaming + ReliableStreaming + + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/Program.cs b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/Program.cs new file mode 100644 index 0000000000..afd00cc200 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/Program.cs @@ -0,0 +1,363 @@ +// Copyright (c) Microsoft. All rights reserved. + +// This sample demonstrates how to implement reliable streaming for durable agents using Redis Streams. +// It reads prompts from stdin and streams agent responses to stdout in real-time. + +using System.ComponentModel; +using Azure; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using Microsoft.DurableTask.Client.AzureManaged; +using Microsoft.DurableTask.Worker.AzureManaged; +using Microsoft.Extensions.AI; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; +using ReliableStreaming; +using StackExchange.Redis; + +// Get the Azure OpenAI endpoint and deployment name from environment variables. +string endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +string deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT") + ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT is not set."); + +// Get Redis connection string from environment variable. +string redisConnectionString = Environment.GetEnvironmentVariable("REDIS_CONNECTION_STRING") + ?? "localhost:6379"; + +// Get the Redis stream TTL from environment variable (default: 10 minutes). +int redisStreamTtlMinutes = int.Parse(Environment.GetEnvironmentVariable("REDIS_STREAM_TTL_MINUTES") ?? "10"); + +// Get DTS connection string from environment variable +string dtsConnectionString = Environment.GetEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING") + ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + +// Use Azure Key Credential if provided, otherwise use Azure CLI Credential. +string? azureOpenAiKey = Environment.GetEnvironmentVariable("AZURE_OPENAI_KEY"); +AzureOpenAIClient client = !string.IsNullOrEmpty(azureOpenAiKey) + ? new AzureOpenAIClient(new Uri(endpoint), new AzureKeyCredential(azureOpenAiKey)) + : new AzureOpenAIClient(new Uri(endpoint), new AzureCliCredential()); + +// Travel Planner agent instructions - designed to produce longer responses for demonstrating streaming. +const string TravelPlannerName = "TravelPlanner"; +const string TravelPlannerInstructions = + """ + You are an expert travel planner who creates detailed, personalized travel itineraries. + When asked to plan a trip, you should: + 1. Create a comprehensive day-by-day itinerary + 2. Include specific recommendations for activities, restaurants, and attractions + 3. Provide practical tips for each destination + 4. Consider weather and local events when making recommendations + 5. Include estimated times and logistics between activities + + Always use the available tools to get current weather forecasts and local events + for the destination to make your recommendations more relevant and timely. + + Format your response with clear headings for each day and include emoji icons + to make the itinerary easy to scan and visually appealing. + """; + +// Mock travel tools that return hardcoded data for demonstration purposes. +[Description("Gets the weather forecast for a destination on a specific date. Use this to provide weather-aware recommendations in the itinerary.")] +static string GetWeatherForecast(string destination, string date) +{ + Dictionary weatherByRegion = new(StringComparer.OrdinalIgnoreCase) + { + ["Tokyo"] = ("Partly cloudy with a chance of light rain", 58, 45), + ["Paris"] = ("Overcast with occasional drizzle", 52, 41), + ["New York"] = ("Clear and cold", 42, 28), + ["London"] = ("Foggy morning, clearing in afternoon", 48, 38), + ["Sydney"] = ("Sunny and warm", 82, 68), + ["Rome"] = ("Sunny with light breeze", 62, 48), + ["Barcelona"] = ("Partly sunny", 59, 47), + ["Amsterdam"] = ("Cloudy with light rain", 46, 38), + ["Dubai"] = ("Sunny and hot", 85, 72), + ["Singapore"] = ("Tropical thunderstorms in afternoon", 88, 77), + ["Bangkok"] = ("Hot and humid, afternoon showers", 91, 78), + ["Los Angeles"] = ("Sunny and pleasant", 72, 55), + ["San Francisco"] = ("Morning fog, afternoon sun", 62, 52), + ["Seattle"] = ("Rainy with breaks", 48, 40), + ["Miami"] = ("Warm and sunny", 78, 65), + ["Honolulu"] = ("Tropical paradise weather", 82, 72), + }; + + (string condition, int highF, int lowF) forecast = ("Partly cloudy", 65, 50); + foreach (KeyValuePair entry in weatherByRegion) + { + if (destination.Contains(entry.Key, StringComparison.OrdinalIgnoreCase)) + { + forecast = entry.Value; + break; + } + } + + return $""" + Weather forecast for {destination} on {date}: + Conditions: {forecast.condition} + High: {forecast.highF}°F ({(forecast.highF - 32) * 5 / 9}°C) + Low: {forecast.lowF}°F ({(forecast.lowF - 32) * 5 / 9}°C) + + Recommendation: {GetWeatherRecommendation(forecast.condition)} + """; +} + +[Description("Gets local events and activities happening at a destination around a specific date. Use this to suggest timely activities and experiences.")] +static string GetLocalEvents(string destination, string date) +{ + Dictionary eventsByCity = new(StringComparer.OrdinalIgnoreCase) + { + ["Tokyo"] = [ + "🎭 Kabuki Theater Performance at Kabukiza Theatre - Traditional Japanese drama", + "🌸 Winter Illuminations at Yoyogi Park - Spectacular light displays", + "🍜 Ramen Festival at Tokyo Station - Sample ramen from across Japan", + "🎮 Gaming Expo at Tokyo Big Sight - Latest video games and technology", + ], + ["Paris"] = [ + "🎨 Impressionist Exhibition at Musée d'Orsay - Extended evening hours", + "🍷 Wine Tasting Tour in Le Marais - Local sommelier guided", + "🎵 Jazz Night at Le Caveau de la Huchette - Historic jazz club", + "🥐 French Pastry Workshop - Learn from master pâtissiers", + ], + ["New York"] = [ + "🎭 Broadway Show: Hamilton - Limited engagement performances", + "🏀 Knicks vs Lakers at Madison Square Garden", + "🎨 Modern Art Exhibit at MoMA - New installations", + "🍕 Pizza Walking Tour of Brooklyn - Artisan pizzerias", + ], + ["London"] = [ + "👑 Royal Collection Exhibition at Buckingham Palace", + "🎭 West End Musical: The Phantom of the Opera", + "🍺 Craft Beer Festival at Brick Lane", + "🎪 Winter Wonderland at Hyde Park - Rides and markets", + ], + ["Sydney"] = [ + "🏄 Pro Surfing Competition at Bondi Beach", + "🎵 Opera at Sydney Opera House - La Bohème", + "🦘 Wildlife Night Safari at Taronga Zoo", + "🍽️ Harbor Dinner Cruise with fireworks", + ], + ["Rome"] = [ + "🏛️ After-Hours Vatican Tour - Skip the crowds", + "🍝 Pasta Making Class in Trastevere", + "🎵 Classical Concert at Borghese Gallery", + "🍷 Wine Tasting in Roman Cellars", + ], + }; + + string[] events = [ + "🎭 Local theater performance", + "🍽️ Food and wine festival", + "🎨 Art gallery opening", + "🎵 Live music at local venues", + ]; + + foreach (KeyValuePair entry in eventsByCity) + { + if (destination.Contains(entry.Key, StringComparison.OrdinalIgnoreCase)) + { + events = entry.Value; + break; + } + } + + string eventList = string.Join("\n• ", events); + return $""" + Local events in {destination} around {date}: + + • {eventList} + + 💡 Tip: Book popular events in advance as they may sell out quickly! + """; +} + +static string GetWeatherRecommendation(string condition) +{ + return condition switch + { + string c when c.Contains("rain", StringComparison.OrdinalIgnoreCase) || c.Contains("drizzle", StringComparison.OrdinalIgnoreCase) => + "Bring an umbrella and waterproof jacket. Consider indoor activities for backup.", + string c when c.Contains("fog", StringComparison.OrdinalIgnoreCase) => + "Morning visibility may be limited. Plan outdoor sightseeing for afternoon.", + string c when c.Contains("cold", StringComparison.OrdinalIgnoreCase) => + "Layer up with warm clothing. Hot drinks and cozy cafés recommended.", + string c when c.Contains("hot", StringComparison.OrdinalIgnoreCase) || c.Contains("warm", StringComparison.OrdinalIgnoreCase) => + "Stay hydrated and use sunscreen. Plan strenuous activities for cooler morning hours.", + string c when c.Contains("thunder", StringComparison.OrdinalIgnoreCase) || c.Contains("storm", StringComparison.OrdinalIgnoreCase) => + "Keep an eye on weather updates. Have indoor alternatives ready.", + _ => "Pleasant conditions expected. Great day for outdoor exploration!" + }; +} + +// Configure the console app to host the AI agent. +IHost host = Host.CreateDefaultBuilder(args) + .ConfigureLogging(loggingBuilder => loggingBuilder.SetMinimumLevel(LogLevel.Warning)) + .ConfigureServices(services => + { + services.ConfigureDurableAgents( + options => + { + // Define the Travel Planner agent with tools for weather and events + options.AddAIAgentFactory(TravelPlannerName, sp => + { + return client.GetChatClient(deploymentName).AsAIAgent( + instructions: TravelPlannerInstructions, + name: TravelPlannerName, + services: sp, + tools: [ + AIFunctionFactory.Create(GetWeatherForecast), + AIFunctionFactory.Create(GetLocalEvents), + ]); + }); + }, + workerBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString), + clientBuilder: builder => builder.UseDurableTaskScheduler(dtsConnectionString)); + + // Register Redis connection as a singleton + services.AddSingleton(_ => + ConnectionMultiplexer.Connect(redisConnectionString)); + + // Register the Redis stream response handler - this captures agent responses + // and publishes them to Redis Streams for reliable delivery. + services.AddSingleton(sp => + new RedisStreamResponseHandler( + sp.GetRequiredService(), + TimeSpan.FromMinutes(redisStreamTtlMinutes))); + services.AddSingleton(sp => + sp.GetRequiredService()); + }) + .Build(); + +await host.StartAsync(); + +// Get the agent proxy from services +IServiceProvider services = host.Services; +AIAgent? agentProxy = services.GetKeyedService(TravelPlannerName); +RedisStreamResponseHandler streamHandler = services.GetRequiredService(); + +if (agentProxy == null) +{ + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"Agent '{TravelPlannerName}' not found."); + Console.ResetColor(); + Environment.Exit(1); + return; +} + +// Console colors for better UX +Console.ForegroundColor = ConsoleColor.Cyan; +Console.WriteLine("=== Reliable Streaming Sample ==="); +Console.ResetColor(); +Console.WriteLine("Enter a travel planning request (or 'exit' to quit):"); +Console.WriteLine(); + +string? lastCursor = null; + +async Task ReadStreamTask(string conversationId, string? cursor, CancellationToken cancellationToken) +{ + // Initialize lastCursor to the starting cursor position + // This ensures we have a valid cursor even if cancellation happens before any chunks are processed + lastCursor = cursor; + + await foreach (StreamChunk chunk in streamHandler.ReadStreamAsync(conversationId, cursor, cancellationToken)) + { + if (chunk.Error != null) + { + Console.ForegroundColor = ConsoleColor.Red; + Console.Error.WriteLine($"\n[Error: {chunk.Error}]"); + Console.ResetColor(); + break; + } + + if (chunk.IsDone) + { + Console.WriteLine(); + Console.WriteLine(); + break; + } + + if (chunk.Text != null) + { + Console.Write(chunk.Text); + } + + // Always update lastCursor to track the latest entry ID, even if text is null + // This ensures we can resume from the correct position after interruption + if (!string.IsNullOrEmpty(chunk.EntryId)) + { + lastCursor = chunk.EntryId; + } + } +} + +// New conversation: prompt from stdin +Console.ForegroundColor = ConsoleColor.Yellow; +Console.Write("You: "); +Console.ResetColor(); + +string? prompt = Console.ReadLine(); +if (string.IsNullOrWhiteSpace(prompt) || prompt.Equals("exit", StringComparison.OrdinalIgnoreCase)) +{ + return; +} + +// Create a new agent thread +AgentThread thread = await agentProxy.GetNewThreadAsync(); +AgentSessionId sessionId = thread.GetService(); +string conversationId = sessionId.ToString(); + +Console.ForegroundColor = ConsoleColor.Green; +Console.WriteLine($"Conversation ID: {conversationId}"); +Console.WriteLine("Press [Enter] to interrupt the stream."); +Console.ResetColor(); + +// Run the agent in the background +DurableAgentRunOptions options = new() { IsFireAndForget = true }; +await agentProxy.RunAsync(prompt, thread, options, CancellationToken.None); + +bool streamCompleted = false; +while (!streamCompleted) +{ + // On a key press, cancel the cancellation token to stop the stream + using CancellationTokenSource userCancellationSource = new(); + _ = Task.Run(() => + { + _ = Console.ReadLine(); + userCancellationSource.Cancel(); + }); + + try + { + // Start reading the stream and wait for it to complete + await ReadStreamTask(conversationId, lastCursor, userCancellationSource.Token); + streamCompleted = true; + } + catch (OperationCanceledException) + { + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("Stream cancelled. Press [Enter] to reconnect and resume the stream from the last cursor."); + // Ensure lastCursor is set - if it's still null, we at least have the starting cursor + string cursorValue = lastCursor ?? "(n/a)"; + Console.WriteLine($"Last cursor: {cursorValue}"); + Console.ResetColor(); + // Explicitly flush to ensure the message is written immediately + Console.Out.Flush(); + } + + if (!streamCompleted) + { + Console.ReadLine(); + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine($"Resuming conversation: {conversationId} from cursor: {lastCursor ?? "(beginning)"}"); + Console.ResetColor(); + } +} + +Console.ForegroundColor = ConsoleColor.Green; +Console.WriteLine("Conversation completed."); +Console.ResetColor(); + +await host.StopAsync(); diff --git a/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/README.md b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/README.md new file mode 100644 index 0000000000..c1956157e8 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/README.md @@ -0,0 +1,181 @@ +# Reliable Streaming with Redis + +This sample demonstrates how to implement reliable streaming for durable agents using Redis Streams as a message broker. It enables clients to disconnect and reconnect to ongoing agent responses without losing messages, inspired by [OpenAI's background mode](https://platform.openai.com/docs/guides/background) for the Responses API. + +## Key Concepts Demonstrated + +- **Reliable message delivery**: Agent responses are persisted to Redis Streams, allowing clients to resume from any point +- **Real-time streaming**: Chunks are printed to stdout as they arrive (like `tail -f`) +- **Cursor-based resumption**: Each chunk includes an entry ID that can be used to resume the stream +- **Fire-and-forget agent invocation**: The agent runs in the background while the client streams from Redis + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +### Additional Requirements: Redis + +This sample requires a Redis instance. Start a local Redis instance using Docker: + +```bash +docker run -d --name redis -p 6379:6379 redis:latest +``` + +To verify Redis is running: + +```bash +docker ps | grep redis +``` + +## Running the Sample + +With the environment setup, you can run the sample: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming +dotnet run --framework net10.0 +``` + +The app will prompt you for a travel planning request: + +```text +=== Reliable Streaming Sample === +Enter a travel planning request (or 'exit' to quit): + +You: Plan a 7-day trip to Tokyo, Japan for next month. Include daily activities, restaurant recommendations, and tips for getting around. +``` + +The agent's response will stream to your console in real-time as chunks arrive from Redis: + +```text +Starting new conversation: @dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890 +Press [Enter] to interrupt the stream. + +TravelPlanner: # 7-Day Tokyo Adventure + +## Day 1: Arrival and Exploration +... +``` + +### Demonstrating Stream Interruption and Resumption + +This is the key feature of reliable streaming. Follow these steps to see it in action: + +1. **Start a stream**: Run the app and enter a travel planning request +2. **Note the conversation ID**: The conversation ID is displayed at the start of the stream (e.g., `Starting new conversation: @dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890`) +3. **Interrupt the stream**: While the agent is still generating text, press **`Enter`** to interrupt. The agent continues running in the background - your messages are being saved to Redis. +4. **Resume the stream**: Press **`Enter`** again to reconnect and resume the stream from the last cursor position. The app will automatically resume from where it left off. + +```text +Starting new conversation: @dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890 +Press [Enter] to interrupt the stream. + +TravelPlanner: # 7-Day Tokyo Adventure + +## Day 1: Arrival and Exploration +[Streaming content...] + +[Press Enter to interrupt] +Stream cancelled. Press [Enter] to reconnect and resume the stream from the last cursor. +Last cursor: 1734567890123-0 + +[Press Enter to resume] +Resuming conversation: @dafx-travelplanner@a1b2c3d4e5f67890abcdef1234567890 from cursor: 1734567890123-0 + +[Stream continues from where it left off...] +``` + +## Viewing Agent State + +You can view the state of the agent in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can see: + - **Agents**: View the state of the TravelPlanner agent, including conversation history and current state + - **Orchestrations**: View any orchestrations that may have been triggered by the agent + +The conversation ID displayed in the console output (shown as "Starting new conversation: {conversationId}") corresponds to the agent's conversation thread. You can use this to identify the agent in the dashboard and inspect: + +- The agent's conversation state +- Tool calls made by the agent (weather and events lookups) +- The streaming response state + +Note that while the console app streams responses from Redis, the agent state in DTS shows the underlying durable agent execution, including all tool calls and conversation context. + +## Architecture Overview + +```text +┌─────────────┐ stdin (prompt) ┌─────────────────────┐ +│ Client │ ─────────────────────► │ Console App │ +│ (stdin) │ │ (Program.cs) │ +└─────────────┘ └──────────────┬──────┘ + ▲ │ + │ stdout (chunks) Signal Entity + │ │ + │ ▼ + │ ┌─────────────────────┐ + │ │ AgentEntity │ + │ │ (Durable Entity) │ + │ └──────────┬──────────┘ + │ │ + │ IAgentResponseHandler + │ │ + │ ▼ + │ ┌─────────────────────┐ + │ │ RedisStreamResponse │ + │ │ Handler │ + │ └──────────┬──────────┘ + │ │ + │ XADD (write) + │ │ + │ ▼ + │ ┌─────────────────────┐ + └─────────── XREAD (poll) ────────── │ Redis Streams │ + │ (Durable Log) │ + └─────────────────────┘ +``` + +### Data Flow + +1. **Client sends prompt**: The console app reads the prompt from stdin and generates a new agent thread. + +2. **Agent invoked**: The durable agent is signaled to run the travel planner agent. This is fire-and-forget from the console app's perspective. + +3. **Responses captured**: As the agent generates responses, the `RedisStreamResponseHandler` (implementing `IAgentResponseHandler`) extracts the text from each `AgentRunResponseUpdate` and publishes it to a Redis Stream keyed by the agent session's conversation ID. + +4. **Client polls Redis**: The console app streams events by polling the Redis Stream and printing chunks to stdout as they arrive. + +5. **Resumption**: If the client interrupts the stream (e.g., by pressing Enter in the sample), it can resume from the last cursor position by providing the conversation ID and cursor to the call to resume the stream. + +## Message Delivery Guarantees + +This sample provides **at-least-once delivery** with the following characteristics: + +- **Durability**: Messages are persisted to Redis Streams with configurable TTL (default: 10 minutes). +- **Ordering**: Messages are delivered in order within a session. +- **Real-time**: Chunks are printed as soon as they arrive from Redis. + +### Important Considerations + +- **No exactly-once delivery**: If a client disconnects exactly when receiving a message, it may receive that message again upon resumption. Clients should handle duplicate messages idempotently. +- **TTL expiration**: Streams expire after the configured TTL. Clients cannot resume streams that have expired. +- **Redis guarantees**: Redis streams are backed by Redis persistence mechanisms (RDB/AOF). Ensure your Redis instance is configured for durability as needed. + +## Configuration + +| Environment Variable | Description | Default | +|---------------------|-------------|---------| +| `REDIS_CONNECTION_STRING` | Redis connection string | `localhost:6379` | +| `REDIS_STREAM_TTL_MINUTES` | How long streams are retained after last write | `10` | +| `AZURE_OPENAI_ENDPOINT` | Azure OpenAI endpoint URL | (required) | +| `AZURE_OPENAI_DEPLOYMENT` | Azure OpenAI deployment name | (required) | +| `AZURE_OPENAI_KEY` | API key (optional, uses Azure CLI auth if not set) | (optional) | + +## Cleanup + +To stop and remove the Redis Docker containers: + +```bash +docker stop redis +docker rm redis +``` diff --git a/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/RedisStreamResponseHandler.cs b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/RedisStreamResponseHandler.cs new file mode 100644 index 0000000000..6838583342 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/07_ReliableStreaming/RedisStreamResponseHandler.cs @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Runtime.CompilerServices; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.DurableTask; +using StackExchange.Redis; + +namespace ReliableStreaming; + +/// +/// Represents a chunk of data read from a Redis stream. +/// +/// The Redis stream entry ID (can be used as a cursor for resumption). +/// The text content of the chunk, or null if this is a completion/error marker. +/// True if this chunk marks the end of the stream. +/// An error message if something went wrong, or null otherwise. +public readonly record struct StreamChunk(string EntryId, string? Text, bool IsDone, string? Error); + +/// +/// An implementation of that publishes agent response updates +/// to Redis Streams for reliable delivery. This enables clients to disconnect and reconnect +/// to ongoing agent responses without losing messages. +/// +/// +/// +/// Redis Streams provide a durable, append-only log that supports consumer groups and message +/// acknowledgment. This implementation uses auto-generated IDs (which are timestamp-based) +/// as sequence numbers, allowing clients to resume from any point in the stream. +/// +/// +/// Each agent session gets its own Redis Stream, keyed by session ID. The stream entries +/// contain text chunks extracted from objects. +/// +/// +public sealed class RedisStreamResponseHandler : IAgentResponseHandler +{ + private const int MaxEmptyReads = 300; // 5 minutes at 1 second intervals + private const int PollIntervalMs = 1000; + + private readonly IConnectionMultiplexer _redis; + private readonly TimeSpan _streamTtl; + + /// + /// Initializes a new instance of the class. + /// + /// The Redis connection multiplexer. + /// The time-to-live for stream entries. Streams will expire after this duration of inactivity. + public RedisStreamResponseHandler(IConnectionMultiplexer redis, TimeSpan streamTtl) + { + this._redis = redis; + this._streamTtl = streamTtl; + } + + /// + public async ValueTask OnStreamingResponseUpdateAsync( + IAsyncEnumerable messageStream, + CancellationToken cancellationToken) + { + // Get the current session ID from the DurableAgentContext + // This is set by the AgentEntity before invoking the response handler + DurableAgentContext context = DurableAgentContext.Current + ?? throw new InvalidOperationException("DurableAgentContext.Current is not set. This handler must be used within a durable agent context."); + + // Get conversation ID from the current thread context, which is only available in the context of + // a durable agent execution. + string conversationId = context.CurrentThread.GetService().ToString(); + if (string.IsNullOrEmpty(conversationId)) + { + throw new InvalidOperationException("Unable to determine conversation ID from the current thread."); + } + + string streamKey = GetStreamKey(conversationId); + + IDatabase db = this._redis.GetDatabase(); + int sequenceNumber = 0; + + await foreach (AgentResponseUpdate update in messageStream.WithCancellation(cancellationToken)) + { + // Extract just the text content - this avoids serialization round-trip issues + string text = update.Text; + + // Only publish non-empty text chunks + if (!string.IsNullOrEmpty(text)) + { + // Create the stream entry with the text and metadata + NameValueEntry[] entries = + [ + new NameValueEntry("text", text), + new NameValueEntry("sequence", sequenceNumber++), + new NameValueEntry("timestamp", DateTimeOffset.UtcNow.ToUnixTimeMilliseconds()), + ]; + + // Add to the Redis Stream with auto-generated ID (timestamp-based) + await db.StreamAddAsync(streamKey, entries); + + // Refresh the TTL on each write to keep the stream alive during active streaming + await db.KeyExpireAsync(streamKey, this._streamTtl); + } + } + + // Add a sentinel entry to mark the end of the stream + NameValueEntry[] endEntries = + [ + new NameValueEntry("text", ""), + new NameValueEntry("sequence", sequenceNumber), + new NameValueEntry("timestamp", DateTimeOffset.UtcNow.ToUnixTimeMilliseconds()), + new NameValueEntry("done", "true"), + ]; + await db.StreamAddAsync(streamKey, endEntries); + + // Set final TTL - the stream will be cleaned up after this duration + await db.KeyExpireAsync(streamKey, this._streamTtl); + } + + /// + public ValueTask OnAgentResponseAsync(AgentResponse message, CancellationToken cancellationToken) + { + // This handler is optimized for streaming responses. + // For non-streaming responses, we don't need to store in Redis since + // the response is returned directly to the caller. + return ValueTask.CompletedTask; + } + + /// + /// Reads chunks from a Redis stream for the given session, yielding them as they become available. + /// + /// The conversation ID to read from. + /// Optional cursor to resume from. If null, reads from the beginning. + /// Cancellation token. + /// An async enumerable of stream chunks. + public async IAsyncEnumerable ReadStreamAsync( + string conversationId, + string? cursor, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + string streamKey = GetStreamKey(conversationId); + + IDatabase db = this._redis.GetDatabase(); + string startId = string.IsNullOrEmpty(cursor) ? "0-0" : cursor; + + int emptyReadCount = 0; + bool hasSeenData = false; + + while (!cancellationToken.IsCancellationRequested) + { + StreamEntry[]? entries = null; + string? errorMessage = null; + + try + { + entries = await db.StreamReadAsync(streamKey, startId, count: 100); + } + catch (Exception ex) + { + errorMessage = ex.Message; + } + + if (errorMessage != null) + { + yield return new StreamChunk(startId, null, false, errorMessage); + yield break; + } + + // entries is guaranteed to be non-null if errorMessage is null + if (entries!.Length == 0) + { + if (!hasSeenData) + { + emptyReadCount++; + if (emptyReadCount >= MaxEmptyReads) + { + yield return new StreamChunk( + startId, + null, + false, + $"Stream not found or timed out after {MaxEmptyReads * PollIntervalMs / 1000} seconds"); + yield break; + } + } + + await Task.Delay(PollIntervalMs, cancellationToken); + continue; + } + + hasSeenData = true; + + foreach (StreamEntry entry in entries) + { + startId = entry.Id.ToString(); + string? text = entry["text"]; + string? done = entry["done"]; + + if (done == "true") + { + yield return new StreamChunk(startId, null, true, null); + yield break; + } + + if (!string.IsNullOrEmpty(text)) + { + yield return new StreamChunk(startId, text, false, null); + } + } + } + + // If we exited the loop due to cancellation, throw to signal the caller + cancellationToken.ThrowIfCancellationRequested(); + } + + /// + /// Gets the Redis Stream key for a given conversation ID. + /// + /// The conversation ID. + /// The Redis Stream key. + internal static string GetStreamKey(string conversationId) => $"agent-stream:{conversationId}"; +} diff --git a/dotnet/samples/DurableAgents/ConsoleApps/README.md b/dotnet/samples/DurableAgents/ConsoleApps/README.md new file mode 100644 index 0000000000..1bd2b0d224 --- /dev/null +++ b/dotnet/samples/DurableAgents/ConsoleApps/README.md @@ -0,0 +1,109 @@ +# Console App Samples + +This directory contains samples for console app hosting of durable agents. These samples use standard I/O (stdin/stdout) for interaction, making them both interactive and scriptable. + +- **[01_SingleAgent](01_SingleAgent)**: A sample that demonstrates how to host a single conversational agent in a console app and interact with it via stdin/stdout. +- **[02_AgentOrchestration_Chaining](02_AgentOrchestration_Chaining)**: A sample that demonstrates how to host a single conversational agent in a console app and invoke it using a durable orchestration. +- **[03_AgentOrchestration_Concurrency](03_AgentOrchestration_Concurrency)**: A sample that demonstrates how to host multiple agents in a console app and run them concurrently using a durable orchestration. +- **[04_AgentOrchestration_Conditionals](04_AgentOrchestration_Conditionals)**: A sample that demonstrates how to host multiple agents in a console app and run them sequentially using a durable orchestration with conditionals. +- **[05_AgentOrchestration_HITL](05_AgentOrchestration_HITL)**: A sample that demonstrates how to implement a human-in-the-loop workflow using durable orchestration, including interactive approval prompts. +- **[06_LongRunningTools](06_LongRunningTools)**: A sample that demonstrates how agents can start and interact with durable orchestrations from tool calls to enable long-running tool scenarios. +- **[07_ReliableStreaming](07_ReliableStreaming)**: A sample that demonstrates how to implement reliable streaming for durable agents using Redis Streams, enabling clients to disconnect and reconnect without losing messages. + +## Running the Samples + +These samples are designed to be run locally in a cloned repository. + +### Prerequisites + +The following prerequisites are required to run the samples: + +- [.NET 10.0 SDK or later](https://dotnet.microsoft.com/download/dotnet) +- [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed and authenticated (`az login`) or an API key for the Azure OpenAI service +- [Azure OpenAI Service](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource) with a deployed model (gpt-4o-mini or better is recommended) +- [Durable Task Scheduler](https://learn.microsoft.com/azure/azure-functions/durable/durable-task-scheduler/develop-with-durable-task-scheduler) (local emulator or Azure-hosted) +- [Docker](https://docs.docker.com/get-docker/) installed if running the Durable Task Scheduler emulator locally +- [Redis](https://redis.io/) (for sample 07 only) - can be run locally using Docker + +### Configuring RBAC Permissions for Azure OpenAI + +These samples are configured to use the Azure OpenAI service with RBAC permissions to access the model. You'll need to configure the RBAC permissions for the Azure OpenAI service to allow the console app to access the model. + +Below is an example of how to configure the RBAC permissions for the Azure OpenAI service to allow the current user to access the model. + +Bash (Linux/macOS/WSL): + +```bash +az role assignment create \ + --assignee "yourname@contoso.com" \ + --role "Cognitive Services OpenAI User" \ + --scope /subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts/ +``` + +PowerShell: + +```powershell +az role assignment create ` + --assignee "yourname@contoso.com" ` + --role "Cognitive Services OpenAI User" ` + --scope /subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts/ +``` + +More information on how to configure RBAC permissions for Azure OpenAI can be found in the [Azure OpenAI documentation](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=cli). + +### Setting an API key for the Azure OpenAI service + +As an alternative to configuring Azure RBAC permissions, you can set an API key for the Azure OpenAI service by setting the `AZURE_OPENAI_KEY` environment variable. + +Bash (Linux/macOS/WSL): + +```bash +export AZURE_OPENAI_KEY="your-api-key" +``` + +PowerShell: + +```powershell +$env:AZURE_OPENAI_KEY="your-api-key" +``` + +### Start Durable Task Scheduler + +Most samples use the Durable Task Scheduler (DTS) to support hosted agents and durable orchestrations. DTS also allows you to view the status of orchestrations and their inputs and outputs from a web UI. + +To run the Durable Task Scheduler locally, you can use the following `docker` command: + +```bash +docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 mcr.microsoft.com/dts/dts-emulator:latest +``` + +The DTS dashboard will be available at `http://localhost:8080`. + +### Environment Configuration + +Each sample reads configuration from environment variables. You'll need to set the following environment variables: + +```bash +export AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/" +export AZURE_OPENAI_DEPLOYMENT="your-deployment-name" +``` + +### Running the Console Apps + +Navigate to the sample directory and run the console app: + +```bash +cd dotnet/samples/DurableAgents/ConsoleApps/01_SingleAgent +dotnet run --framework net10.0 +``` + +> [!NOTE] +> The `--framework` option is required to specify the target framework for the console app because the samples are designed to support multiple target frameworks. If you are using a different target framework, you can specify it with the `--framework` option. + +The app will prompt you for input via stdin. + +### Viewing the sample output + +The console app output is displayed directly in the terminal where you ran `dotnet run`. Agent responses are printed to stdout with subtle color coding for better readability. + +You can also see the state of agents and orchestrations in the Durable Task Scheduler dashboard at `http://localhost:8082`. diff --git a/dotnet/samples/DurableAgents/Directory.Build.props b/dotnet/samples/DurableAgents/Directory.Build.props new file mode 100644 index 0000000000..7c4cb7dea2 --- /dev/null +++ b/dotnet/samples/DurableAgents/Directory.Build.props @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureAIProject/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureAIProject/Program.cs index 4ca52aa268..ba51c8c0e7 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureAIProject/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_AzureAIProject/Program.cs @@ -26,14 +26,14 @@ // agentVersion.Version = , // agentVersion.Name = -// You can retrieve an AIAgent for an already created server side agent version. +// You can use an AIAgent with an already created server side agent version. AIAgent existingJokerAgent = aiProjectClient.AsAIAgent(createdAgentVersion); // You can also create another AIAgent version by providing the same name with a different definition. -AIAgent newJokerAgent = aiProjectClient.CreateAIAgent(name: JokerName, model: deploymentName, instructions: "You are extremely hilarious at telling jokes."); +AIAgent newJokerAgent = await aiProjectClient.CreateAIAgentAsync(name: JokerName, model: deploymentName, instructions: "You are extremely hilarious at telling jokes."); // You can also get the AIAgent latest version just providing its name. -AIAgent jokerAgentLatest = aiProjectClient.GetAIAgent(name: JokerName); +AIAgent jokerAgentLatest = await aiProjectClient.GetAIAgentAsync(name: JokerName); var latestAgentVersion = jokerAgentLatest.GetService()!; // The AIAgent version can be accessed via the GetService method. diff --git a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs index 3e52a5f01a..52efda2cf6 100644 --- a/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs +++ b/dotnet/samples/GettingStarted/AgentProviders/Agent_With_CustomImplementation/Program.cs @@ -45,18 +45,18 @@ protected override async Task RunCoreAsync(IEnumerable responseMessages = CloneAndToUpperCase(messages, this.Name).ToList(); // Notify the thread of the input and output messages. - var invokedContext = new ChatMessageStore.InvokedContext(messages, storeMessages) + var invokedContext = new ChatHistoryProvider.InvokedContext(messages, storeMessages) { ResponseMessages = responseMessages }; - await typedThread.MessageStore.InvokedAsync(invokedContext, cancellationToken); + await typedThread.ChatHistoryProvider.InvokedAsync(invokedContext, cancellationToken); return new AgentResponse { @@ -77,18 +77,18 @@ protected override async IAsyncEnumerable RunCoreStreamingA } // Get existing messages from the store - var invokingContext = new ChatMessageStore.InvokingContext(messages); - var storeMessages = await typedThread.MessageStore.InvokingAsync(invokingContext, cancellationToken); + var invokingContext = new ChatHistoryProvider.InvokingContext(messages); + var storeMessages = await typedThread.ChatHistoryProvider.InvokingAsync(invokingContext, cancellationToken); // Clone the input messages and turn them into response messages with upper case text. List responseMessages = CloneAndToUpperCase(messages, this.Name).ToList(); // Notify the thread of the input and output messages. - var invokedContext = new ChatMessageStore.InvokedContext(messages, storeMessages) + var invokedContext = new ChatHistoryProvider.InvokedContext(messages, storeMessages) { ResponseMessages = responseMessages }; - await typedThread.MessageStore.InvokedAsync(invokedContext, cancellationToken); + await typedThread.ChatHistoryProvider.InvokedAsync(invokedContext, cancellationToken); foreach (var message in responseMessages) { diff --git a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs index a4904ecf77..1371f07ba7 100644 --- a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs +++ b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step01_BasicTextRAG/Program.cs @@ -66,7 +66,7 @@ // Since we are using ChatCompletion which stores chat history locally, we can also add a message removal policy // that removes messages produced by the TextSearchProvider before they are added to the chat history, so that // we don't bloat chat history with all the search result messages. - ChatMessageStoreFactory = (ctx, ct) => new ValueTask(new InMemoryChatMessageStore(ctx.SerializedState, ctx.JsonSerializerOptions) + ChatHistoryProviderFactory = (ctx, ct) => new ValueTask(new InMemoryChatHistoryProvider(ctx.SerializedState, ctx.JsonSerializerOptions) .WithAIContextProviderMessageRemoval()), }); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs index a03b3bb349..50ad22784f 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step07_3rdPartyThreadStorage/Program.cs @@ -31,17 +31,17 @@ { ChatOptions = new() { Instructions = "You are good at telling jokes." }, Name = "Joker", - ChatMessageStoreFactory = (ctx, ct) => new ValueTask( - // Create a new chat message store for this agent that stores the messages in a vector store. - // Each thread must get its own copy of the VectorChatMessageStore, since the store - // also contains the id that the thread is stored under. - new VectorChatMessageStore(vectorStore, ctx.SerializedState, ctx.JsonSerializerOptions)) + ChatHistoryProviderFactory = (ctx, ct) => new ValueTask( + // Create a new ChatHistoryProvider for this agent that stores chat history in a vector store. + // Each thread must get its own copy of the VectorChatHistoryProvider, since the provider + // also contains the id that the chat history is stored under. + new VectorChatHistoryProvider(vectorStore, ctx.SerializedState, ctx.JsonSerializerOptions)) }); // Start a new thread for the agent conversation. AgentThread thread = await agent.GetNewThreadAsync(); -// Run the agent with the thread that stores conversation history in the vector store. +// Run the agent with the thread that stores chat history in the vector store. Console.WriteLine(await agent.RunAsync("Tell me a joke about a pirate.", thread)); // Serialize the thread state, so it can be stored for later use. @@ -58,30 +58,30 @@ // Deserialize the thread state after loading from storage. AgentThread resumedThread = await agent.DeserializeThreadAsync(serializedThread); -// Run the agent with the thread that stores conversation history in the vector store a second time. +// Run the agent with the thread that stores chat history in the vector store a second time. Console.WriteLine(await agent.RunAsync("Now tell the same joke in the voice of a pirate, and add some emojis to the joke.", resumedThread)); -// We can access the VectorChatMessageStore via the thread's GetService method if we need to read the key under which threads are stored. -var messageStore = resumedThread.GetService()!; -Console.WriteLine($"\nThread is stored in vector store under key: {messageStore.ThreadDbKey}"); +// We can access the VectorChatHistoryProvider via the thread's GetService method if we need to read the key under which chat history is stored. +var chatHistoryProvider = resumedThread.GetService()!; +Console.WriteLine($"\nThread is stored in vector store under key: {chatHistoryProvider.ThreadDbKey}"); namespace SampleApp { /// - /// A sample implementation of that stores chat messages in a vector store. + /// A sample implementation of that stores chat history in a vector store. /// - internal sealed class VectorChatMessageStore : ChatMessageStore + internal sealed class VectorChatHistoryProvider : ChatHistoryProvider { private readonly VectorStore _vectorStore; - public VectorChatMessageStore(VectorStore vectorStore, JsonElement serializedStoreState, JsonSerializerOptions? jsonSerializerOptions = null) + public VectorChatHistoryProvider(VectorStore vectorStore, JsonElement serializedState, JsonSerializerOptions? jsonSerializerOptions = null) { this._vectorStore = vectorStore ?? throw new ArgumentNullException(nameof(vectorStore)); - if (serializedStoreState.ValueKind is JsonValueKind.String) + if (serializedState.ValueKind is JsonValueKind.String) { // Here we can deserialize the thread id so that we can access the same messages as before the suspension. - this.ThreadDbKey = serializedStoreState.Deserialize(); + this.ThreadDbKey = serializedState.Deserialize(); } } diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs index a80dd0fed0..a822f3a423 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step16_ChatReduction/Program.cs @@ -24,7 +24,7 @@ { ChatOptions = new() { Instructions = "You are good at telling jokes." }, Name = "Joker", - ChatMessageStoreFactory = (ctx, ct) => new ValueTask(new InMemoryChatMessageStore(new MessageCountingChatReducer(2), ctx.SerializedState, ctx.JsonSerializerOptions)) + ChatHistoryProviderFactory = (ctx, ct) => new ValueTask(new InMemoryChatHistoryProvider(new MessageCountingChatReducer(2), ctx.SerializedState, ctx.JsonSerializerOptions)) }); AgentThread thread = await agent.GetNewThreadAsync(); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step20_AdditionalAIContext/Agent_Step20_AdditionalAIContext.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step20_AdditionalAIContext/Agent_Step20_AdditionalAIContext.csproj new file mode 100644 index 0000000000..550e1f22cb --- /dev/null +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step20_AdditionalAIContext/Agent_Step20_AdditionalAIContext.csproj @@ -0,0 +1,25 @@ + + + + Exe + net10.0 + + enable + enable + + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step20_AdditionalAIContext/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step20_AdditionalAIContext/Program.cs new file mode 100644 index 0000000000..63b6f8d4fc --- /dev/null +++ b/dotnet/samples/GettingStarted/Agents/Agent_Step20_AdditionalAIContext/Program.cs @@ -0,0 +1,228 @@ +// Copyright (c) Microsoft. All rights reserved. + +// This sample shows how to inject additional AI context into a ChatClientAgent using a custom AIContextProvider component that is attached to the agent. +// The sample also shows how to combine the results from multiple providers into a single class, in order to attach multiple of these to an agent. +// This mechanism can be used for various purposes, such as injecting RAG search results or memories into the agent's context. +// Also note that Agent Framework already provides built-in AIContextProviders for many of these scenarios. + +#pragma warning disable CA1869 // Cache and reuse 'JsonSerializerOptions' instances + +using System.ComponentModel; +using System.Text; +using System.Text.Json; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Extensions.AI; +using OpenAI.Chat; +using SampleApp; +using MEAI = Microsoft.Extensions.AI; + +var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); +var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-5-mini"; + +// A sample function to load the next three calendar events for the user. +Func> loadNextThreeCalendarEvents = async () => +{ + // In a real implementation, this method would connect to a calendar service + return new string[] + { + "Doctor's appointment today at 15:00", + "Team meeting today at 17:00", + "Birthday party today at 20:00" + }; +}; + +// Create an agent with an AI context provider attached that aggregates two other providers: +AIAgent agent = new AzureOpenAIClient( + new Uri(endpoint), + new AzureCliCredential()) + .GetChatClient(deploymentName) + .AsAIAgent(new ChatClientAgentOptions() + { + ChatOptions = new() { Instructions = """ + You are a helpful personal assistant. + You manage a TODO list for the user. When the user has completed one of the tasks it can be removed from the TODO list. Only provide the list of TODO items if asked. + You remind users of upcoming calendar events when the user interacts with you. + """ }, + ChatHistoryProviderFactory = (ctx, ct) => new ValueTask(new InMemoryChatHistoryProvider() + // Use WithAIContextProviderMessageRemoval, so that we don't store the messages from the AI context provider in the chat history. + // You may want to store these messages, depending on their content and your requirements. + .WithAIContextProviderMessageRemoval()), + // Add an AI context provider that maintains a todo list for the agent and one that provides upcoming calendar entries. + // Wrap these in an AI context provider that aggregates the other two. + AIContextProviderFactory = (ctx, ct) => new ValueTask(new AggregatingAIContextProvider([ + AggregatingAIContextProvider.CreateFactory((jsonElement, jsonSerializerOptions) => new TodoListAIContextProvider(jsonElement, jsonSerializerOptions)), + AggregatingAIContextProvider.CreateFactory((_, _) => new CalendarSearchAIContextProvider(loadNextThreeCalendarEvents)) + ], ctx.SerializedState, ctx.JsonSerializerOptions)), + }); + +// Invoke the agent and output the text result. +AgentThread thread = await agent.GetNewThreadAsync(); +Console.WriteLine(await agent.RunAsync("I need to pick up milk from the supermarket.", thread) + "\n"); +Console.WriteLine(await agent.RunAsync("I need to take Sally for soccer practice.", thread) + "\n"); +Console.WriteLine(await agent.RunAsync("I need to make a dentist appointment for Jimmy.", thread) + "\n"); +Console.WriteLine(await agent.RunAsync("I've taken Sally to soccer practice.", thread) + "\n"); + +// We can serialize the thread, and it will contain both the chat history and the data that each AI context provider serialized. +JsonElement serializedThread = thread.Serialize(); +// Let's print it to console to show the contents. +Console.WriteLine(JsonSerializer.Serialize(serializedThread, options: new JsonSerializerOptions() { WriteIndented = true, IndentSize = 2 }) + "\n"); +// The serialized thread can be stored long term in a persistent store, but in this case we will just deserialize again and continue the conversation. +thread = await agent.DeserializeThreadAsync(serializedThread); + +Console.WriteLine(await agent.RunAsync("Considering my appointments, can you create a plan for my day that plans out when I should complete the items on my todo list?", thread) + "\n"); + +namespace SampleApp +{ + /// + /// An , which maintains a todo list for the agent. + /// + internal sealed class TodoListAIContextProvider : AIContextProvider + { + private readonly List _todoItems = new(); + + public TodoListAIContextProvider(JsonElement jsonElement, JsonSerializerOptions? jsonSerializerOptions = null) + { + // Only try and restore the state if we got an array, since any other json would be invalid or undefined/null meaning + // it's the first time we are running. + if (jsonElement.ValueKind == JsonValueKind.Array) + { + this._todoItems = JsonSerializer.Deserialize>(jsonElement.GetRawText(), jsonSerializerOptions) ?? new List(); + } + } + + public override ValueTask InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) + { + StringBuilder outputMessageBuilder = new(); + outputMessageBuilder.AppendLine("Your todo list contains the following items:"); + + if (this._todoItems.Count == 0) + { + outputMessageBuilder.AppendLine(" (no items)"); + } + else + { + for (int i = 0; i < this._todoItems.Count; i++) + { + outputMessageBuilder.AppendLine($"{i}. {this._todoItems[i]}"); + } + } + + return new ValueTask(new AIContext + { + Tools = [AIFunctionFactory.Create(this.AddTodoItem), AIFunctionFactory.Create(this.RemoveTodoItem)], + Messages = [new MEAI.ChatMessage(ChatRole.User, outputMessageBuilder.ToString())] + }); + } + + [Description("Adds an item to the todo list. Index is zero based.")] + private void RemoveTodoItem(int index) => + this._todoItems.RemoveAt(index); + + private void AddTodoItem(string item) => + this._todoItems.Add(string.IsNullOrWhiteSpace(item) ? throw new ArgumentException("Item must have a value") : item); + + public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) => + JsonSerializer.SerializeToElement(this._todoItems, jsonSerializerOptions); + } + + /// + /// An which searches for upcoming calendar events and adds them to the AI context. + /// + internal sealed class CalendarSearchAIContextProvider(Func> loadNextThreeCalendarEvents) : AIContextProvider + { + public override async ValueTask InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) + { + var events = await loadNextThreeCalendarEvents(); + + StringBuilder outputMessageBuilder = new(); + outputMessageBuilder.AppendLine("You have the following upcoming calendar events:"); + foreach (var calendarEvent in events) + { + outputMessageBuilder.AppendLine($" - {calendarEvent}"); + } + + return new() + { + Messages = + [ + new MEAI.ChatMessage(ChatRole.User, outputMessageBuilder.ToString()), + ] + }; + } + } + + /// + /// An which aggregates multiple AI context providers into one. + /// Serialized state for the different providers are stored under their type name. + /// Tools and messages from all providers are combined, and instructions are concatenated. + /// + internal sealed class AggregatingAIContextProvider : AIContextProvider + { + private readonly List _providers = new(); + + public AggregatingAIContextProvider(ProviderFactory[] providerFactories, JsonElement jsonElement, JsonSerializerOptions? jsonSerializerOptions) + { + // We received a json object, so let's check if it has some previously serialized state that we can use. + if (jsonElement.ValueKind == JsonValueKind.Object) + { + this._providers = providerFactories + .Select(factory => factory.FactoryMethod(jsonElement.TryGetProperty(factory.ProviderType.Name, out var prop) ? prop : default, jsonSerializerOptions)) + .ToList(); + return; + } + + // We didn't receive any valid json, so we can just construct fresh providers. + this._providers = providerFactories + .Select(factory => factory.FactoryMethod(default, jsonSerializerOptions)) + .ToList(); + } + + public override async ValueTask InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) + { + // Invoke all the sub providers. + var tasks = this._providers.Select(provider => provider.InvokingAsync(context, cancellationToken).AsTask()); + var results = await Task.WhenAll(tasks); + + // Combine the results from each sub provider. + return new AIContext + { + Tools = results.SelectMany(r => r.Tools ?? []).ToList(), + Messages = results.SelectMany(r => r.Messages ?? []).ToList(), + Instructions = string.Join("\n", results.Select(r => r.Instructions).Where(s => !string.IsNullOrEmpty(s))) + }; + } + + public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) + { + Dictionary elements = new(); + foreach (var provider in this._providers) + { + JsonElement element = provider.Serialize(jsonSerializerOptions); + + // Don't try to store state for any providers that aren't producing any. + if (element.ValueKind != JsonValueKind.Undefined && element.ValueKind != JsonValueKind.Null) + { + elements[provider.GetType().Name] = element; + } + } + + return JsonSerializer.SerializeToElement(elements, jsonSerializerOptions); + } + + public static ProviderFactory CreateFactory(Func factoryMethod) + where TProviderType : AIContextProvider => new() + { + FactoryMethod = (jsonElement, jsonSerializerOptions) => factoryMethod(jsonElement, jsonSerializerOptions), + ProviderType = typeof(TProviderType) + }; + + public readonly struct ProviderFactory + { + public Func FactoryMethod { get; init; } + + public Type ProviderType { get; init; } + } + } +} diff --git a/dotnet/samples/GettingStarted/Agents/README.md b/dotnet/samples/GettingStarted/Agents/README.md index d023d6455c..032353aea1 100644 --- a/dotnet/samples/GettingStarted/Agents/README.md +++ b/dotnet/samples/GettingStarted/Agents/README.md @@ -46,6 +46,7 @@ Before you begin, ensure you have the following prerequisites: |[Background responses](./Agent_Step17_BackgroundResponses/)|This sample demonstrates how to use background responses for long-running operations with polling and resumption support| |[Deep research with an agent](./Agent_Step18_DeepResearch/)|This sample demonstrates how to use the Deep Research Tool to perform comprehensive research on complex topics| |[Declarative agent](./Agent_Step19_Declarative/)|This sample demonstrates how to declaratively define an agent.| +|[Providing additional AI Context to an agent using multiple AIContextProviders](./Agent_Step20_AdditionalAIContext/)|This sample demonstrates how to inject additional AI context into a ChatClientAgent using multiple custom AIContextProvider components that are attached to the agent.| ## Running the samples from the console diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.1_Basics/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.1_Basics/Program.cs index 9a7ee0736a..0a10930b75 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.1_Basics/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.1_Basics/Program.cs @@ -28,14 +28,14 @@ // agentVersion.Version = , // agentVersion.Name = -// You can retrieve an AIAgent for an already created server side agent version. -AIAgent existingJokerAgent = aiProjectClient.GetAIAgent(createdAgentVersion); +// You can use an AIAgent with an already created server side agent version. +AIAgent existingJokerAgent = aiProjectClient.AsAIAgent(createdAgentVersion); // You can also create another AIAgent version by providing the same name with a different definition/instruction. -AIAgent newJokerAgent = aiProjectClient.CreateAIAgent(name: JokerName, model: deploymentName, instructions: "You are extremely hilarious at telling jokes."); +AIAgent newJokerAgent = await aiProjectClient.CreateAIAgentAsync(name: JokerName, model: deploymentName, instructions: "You are extremely hilarious at telling jokes."); // You can also get the AIAgent latest version by just providing its name. -AIAgent jokerAgentLatest = aiProjectClient.GetAIAgent(name: JokerName); +AIAgent jokerAgentLatest = await aiProjectClient.GetAIAgentAsync(name: JokerName); AgentVersion latestAgentVersion = jokerAgentLatest.GetService()!; // The AIAgent version can be accessed via the GetService method. diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.2_Running/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.2_Running/Program.cs index 8ebceaea26..6da363905e 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.2_Running/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step01.2_Running/Program.cs @@ -23,8 +23,8 @@ // You can create a server side agent version with the Azure.AI.Agents SDK client below. AgentVersion agentVersion = aiProjectClient.Agents.CreateAgentVersion(agentName: JokerName, options); -// You can retrieve an AIAgent for a already created server side agent version. -AIAgent jokerAgent = aiProjectClient.GetAIAgent(agentVersion); +// You can use an AIAgent with an already created server side agent version. +AIAgent jokerAgent = aiProjectClient.AsAIAgent(agentVersion); // Invoke the agent with streaming support. await foreach (AgentResponseUpdate update in jokerAgent.RunStreamingAsync("Tell me a joke about a pirate.")) diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/Program.cs index 4ad3d86255..07bd1b149b 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/Program.cs @@ -19,19 +19,23 @@ // Define the agent you want to create. (Prompt Agent in this case) AgentVersionCreationOptions options = new(new PromptAgentDefinition(model: deploymentName) { Instructions = JokerInstructions }); -// Create a server side agent version with the Azure.AI.Agents SDK client. -AgentVersion agentVersion = aiProjectClient.Agents.CreateAgentVersion(agentName: JokerName, options); - // Retrieve an AIAgent for the created server side agent version. -AIAgent jokerAgent = aiProjectClient.GetAIAgent(agentVersion); +ChatClientAgent jokerAgent = await aiProjectClient.CreateAIAgentAsync(name: JokerName, options); // Invoke the agent with a multi-turn conversation, where the context is preserved in the thread object. -AgentThread thread = await jokerAgent.GetNewThreadAsync(); +// Create a conversation in the server +ProjectConversationsClient conversationsClient = aiProjectClient.GetProjectOpenAIClient().GetProjectConversationsClient(); +ProjectConversation conversation = await conversationsClient.CreateProjectConversationAsync(); + +// Providing the conversation Id is not strictly necessary, but by not providing it no information will show up in the Foundry Project UI as conversations. +// Threads that doesn't have a conversation Id will work based on the `PreviousResponseId`. +AgentThread thread = await jokerAgent.GetNewThreadAsync(conversation.Id); + Console.WriteLine(await jokerAgent.RunAsync("Tell me a joke about a pirate.", thread)); Console.WriteLine(await jokerAgent.RunAsync("Now add some emojis to the joke and tell it in the voice of a pirate's parrot.", thread)); // Invoke the agent with a multi-turn conversation and streaming, where the context is preserved in the thread object. -thread = await jokerAgent.GetNewThreadAsync(); +thread = await jokerAgent.GetNewThreadAsync(conversation.Id); await foreach (AgentResponseUpdate update in jokerAgent.RunStreamingAsync("Tell me a joke about a pirate.", thread)) { Console.WriteLine(update); @@ -43,3 +47,6 @@ // Cleanup by agent name removes the agent version created. await aiProjectClient.Agents.DeleteAgentAsync(jokerAgent.Name); + +// Cleanup the conversation created. +await conversationsClient.DeleteConversationAsync(conversation.Id); diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/README.md b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/README.md index dab9f596db..44c50f7e8a 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/README.md +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step02_MultiturnConversation/README.md @@ -1,14 +1,15 @@ # Multi-turn Conversation with AI Agents -This sample demonstrates how to implement multi-turn conversations with AI agents, where context is preserved across multiple agent runs using threads. +This sample demonstrates how to implement multi-turn conversations with AI agents, where context is preserved across multiple agent runs using threads and conversation IDs. ## What this sample demonstrates - Creating an AI agent with instructions -- Using threads to maintain conversation context +- Creating a project conversation to track conversations in the Foundry UI +- Using threads with conversation IDs to maintain conversation context - Running multi-turn conversations with text output - Running multi-turn conversations with streaming output -- Managing agent lifecycle (creation and deletion) +- Managing agent and conversation lifecycle (creation and deletion) ## Prerequisites @@ -41,10 +42,18 @@ dotnet run --project .\FoundryAgents_Step02_MultiturnConversation The sample will: 1. Create an agent named "JokerAgent" with instructions to tell jokes -2. Create a thread for conversation context -3. Run the agent with a text prompt and display the response -4. Send a follow-up message to the same thread, demonstrating context preservation -5. Create a new thread and run the agent with streaming -6. Send a follow-up streaming message to demonstrate multi-turn streaming -7. Clean up resources by deleting the agent +2. Create a project conversation to enable visibility in the Azure Foundry UI +3. Create a thread linked to the conversation ID for context tracking +4. Run the agent with a text prompt and display the response +5. Send a follow-up message to the same thread, demonstrating context preservation +6. Create a new thread sharing the same conversation ID and run the agent with streaming +7. Send a follow-up streaming message to demonstrate multi-turn streaming +8. Clean up resources by deleting the agent and conversation + +## Conversation ID vs PreviousResponseId + +When working with multi-turn conversations, there are two approaches: + +- **With Conversation ID**: By passing a `conversation.Id` to `GetNewThreadAsync()`, the conversation will be visible in the Azure Foundry Project UI. This is useful for tracking and debugging conversations. +- **Without Conversation ID**: Threads created without a conversation ID still work correctly, maintaining context via `PreviousResponseId`. However, these conversations may not appear in the Foundry UI. diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step05_StructuredOutput/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step05_StructuredOutput/Program.cs index aaeb90fafa..d252b82b1e 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step05_StructuredOutput/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step05_StructuredOutput/Program.cs @@ -44,7 +44,7 @@ Console.WriteLine($"Occupation: {response.Result.Occupation}"); // Create the ChatClientAgent with the specified name, instructions, and expected structured output the agent should produce. -ChatClientAgent agentWithPersonInfo = aiProjectClient.CreateAIAgent( +ChatClientAgent agentWithPersonInfo = await aiProjectClient.CreateAIAgentAsync( model: deploymentName, new ChatClientAgentOptions() { diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step07_Observability/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step07_Observability/Program.cs index 4ba3ee4d34..a247c1d0f7 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step07_Observability/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step07_Observability/Program.cs @@ -32,7 +32,7 @@ AIProjectClient aiProjectClient = new(new Uri(endpoint), new AzureCliCredential()); // Define the agent you want to create. (Prompt Agent in this case) -AIAgent agent = aiProjectClient.CreateAIAgent(name: JokerName, model: deploymentName, instructions: JokerInstructions) +AIAgent agent = (await aiProjectClient.CreateAIAgentAsync(name: JokerName, model: deploymentName, instructions: JokerInstructions)) .AsBuilder() .UseOpenTelemetry(sourceName: sourceName) .Build(); diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step08_DependencyInjection/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step08_DependencyInjection/Program.cs index d6d1dd8d9f..f94acf1e38 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step08_DependencyInjection/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step08_DependencyInjection/Program.cs @@ -2,6 +2,7 @@ // This sample shows how to use dependency injection to register an AIAgent and use it from a hosted service with a user input chat loop. +using System.ClientModel; using Azure.AI.Projects; using Azure.Identity; using Microsoft.Agents.AI; @@ -14,16 +15,27 @@ const string JokerInstructions = "You are good at telling jokes."; const string JokerName = "JokerAgent"; +AIProjectClient aIProjectClient = new(new Uri(endpoint), new AzureCliCredential()); + +// Create a new agent if one doesn't exist already. +ChatClientAgent agent; +try +{ + agent = await aIProjectClient.GetAIAgentAsync(name: JokerName); +} +catch (ClientResultException ex) when (ex.Status == 404) +{ + agent = await aIProjectClient.CreateAIAgentAsync(name: JokerName, model: deploymentName, instructions: JokerInstructions); +} + // Create a host builder that we will register services with and then run. HostApplicationBuilder builder = Host.CreateApplicationBuilder(args); // Add the agents client to the service collection. -builder.Services.AddSingleton((sp) => new AIProjectClient(new Uri(endpoint), new AzureCliCredential())); +builder.Services.AddSingleton((sp) => aIProjectClient); // Add the AI agent to the service collection. -builder.Services.AddSingleton((sp) - => sp.GetRequiredService() - .CreateAIAgent(name: JokerName, model: deploymentName, instructions: JokerInstructions)); +builder.Services.AddSingleton((sp) => agent); // Add a sample service that will use the agent to respond to user input. builder.Services.AddHostedService(); diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step09_UsingMcpClientAsTools/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step09_UsingMcpClientAsTools/Program.cs index a821c1194b..cfa4b39534 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step09_UsingMcpClientAsTools/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step09_UsingMcpClientAsTools/Program.cs @@ -30,7 +30,7 @@ Console.WriteLine($"Creating the agent '{agentName}' ..."); // Define the agent you want to create. (Prompt Agent in this case) -AIAgent agent = aiProjectClient.CreateAIAgent( +AIAgent agent = await aiProjectClient.CreateAIAgentAsync( name: agentName, model: deploymentName, instructions: "You answer questions related to GitHub repositories only.", diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step10_UsingImages/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step10_UsingImages/Program.cs index fa841ca913..efaab99b8a 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step10_UsingImages/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step10_UsingImages/Program.cs @@ -17,7 +17,7 @@ AIProjectClient aiProjectClient = new(new Uri(endpoint), new AzureCliCredential()); // Define the agent you want to create. (Prompt Agent in this case) -AIAgent agent = aiProjectClient.CreateAIAgent(name: VisionName, model: deploymentName, instructions: VisionInstructions); +AIAgent agent = await aiProjectClient.CreateAIAgentAsync(name: VisionName, model: deploymentName, instructions: VisionInstructions); ChatMessage message = new(ChatRole.User, [ new TextContent("What do you see in this image?"), diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step11_AsFunctionTool/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step11_AsFunctionTool/Program.cs index e00b05d9cb..e29dff2953 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step11_AsFunctionTool/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step11_AsFunctionTool/Program.cs @@ -25,14 +25,14 @@ static string GetWeather([Description("The location to get the weather for.")] s // Create the weather agent with function tools. AITool weatherTool = AIFunctionFactory.Create(GetWeather); -AIAgent weatherAgent = aiProjectClient.CreateAIAgent( +AIAgent weatherAgent = await aiProjectClient.CreateAIAgentAsync( name: WeatherName, model: deploymentName, instructions: WeatherInstructions, tools: [weatherTool]); // Create the main agent, and provide the weather agent as a function tool. -AIAgent agent = aiProjectClient.CreateAIAgent( +AIAgent agent = await aiProjectClient.CreateAIAgentAsync( name: MainName, model: deploymentName, instructions: MainInstructions, diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step12_Middleware/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step12_Middleware/Program.cs index 0c6b76612c..c1750e3387 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step12_Middleware/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step12_Middleware/Program.cs @@ -34,7 +34,7 @@ static string GetDateTime() AITool getWeatherTool = AIFunctionFactory.Create(GetWeather, name: nameof(GetWeather)); // Define the agent you want to create. (Prompt Agent in this case) -AIAgent originalAgent = aiProjectClient.CreateAIAgent( +AIAgent originalAgent = await aiProjectClient.CreateAIAgentAsync( name: AssistantName, model: deploymentName, instructions: AssistantInstructions, @@ -69,7 +69,7 @@ static string GetDateTime() // Special per-request middleware agent. Console.WriteLine("\n\n=== Example 4: Middleware with human in the loop function approval ==="); -AIAgent humanInTheLoopAgent = aiProjectClient.CreateAIAgent( +AIAgent humanInTheLoopAgent = await aiProjectClient.CreateAIAgentAsync( name: "HumanInTheLoopAgent", model: deploymentName, instructions: "You are an Human in the loop testing AI assistant that helps people find information.", diff --git a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step13_Plugins/Program.cs b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step13_Plugins/Program.cs index 0cd9674770..72ec26bdf7 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step13_Plugins/Program.cs +++ b/dotnet/samples/GettingStarted/FoundryAgents/FoundryAgents_Step13_Plugins/Program.cs @@ -34,7 +34,7 @@ // Define the agent with plugin tools // Define the agent you want to create. (Prompt Agent in this case) -AIAgent agent = aiProjectClient.CreateAIAgent( +AIAgent agent = await aiProjectClient.CreateAIAgentAsync( name: AssistantName, model: deploymentName, instructions: AssistantInstructions, diff --git a/dotnet/samples/GettingStarted/FoundryAgents/README.md b/dotnet/samples/GettingStarted/FoundryAgents/README.md index daeb2db8df..ba5af8de5a 100644 --- a/dotnet/samples/GettingStarted/FoundryAgents/README.md +++ b/dotnet/samples/GettingStarted/FoundryAgents/README.md @@ -15,6 +15,17 @@ For more information about the previous classic agents and for what's new in Fou For a sample demonstrating how to use classic Foundry Agents, see the following: [Agent with Azure AI Persistent](../AgentProviders/Agent_With_AzureAIAgentsPersistent/README.md). +## Agent Versioning and Static Definitions + +One of the key architectural changes in the new Foundry Agents compared to the classic experience is how agent definitions are handled. In the new architecture, agents have **versions** and their definitions are established at creation time. This means that the agent's configuration—including instructions, tools, and options—is fixed when the agent version is created. + +> [!IMPORTANT] +> Agent versions are static and strictly adhere to their original definition. Any attempt to provide or override tools, instructions, or options during an agent run or request will be ignored by the agent, as the API does not support runtime configuration changes. All agent behavior must be defined at agent creation time. + +This design ensures consistency and predictability in agent behavior across all interactions with a specific agent version. + +The Agent Framework intentionally ignores unsupported runtime parameters rather than throwing exceptions. This abstraction-first approach ensures that code written against the unified agent abstraction remains portable across providers (OpenAI, Azure OpenAI, Foundry Agents). It removes the need for provider-specific conditional logic. Teams can adopt Foundry Agents without rewriting existing orchestration code. Configurations that work with other providers will gracefully degrade, rather than fail, when the underlying API does not support them. + ## Getting started with Foundry Agents prerequisites Before you begin, ensure you have the following prerequisites: diff --git a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/Program.cs b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/Program.cs index bfa8741ecb..093024a873 100644 --- a/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Checkpoint/CheckpointAndRehydrate/Program.cs @@ -73,7 +73,7 @@ private static async Task Main() CheckpointInfo savedCheckpoint = checkpoints[CheckpointIndex]; await using Checkpointed newCheckpointedRun = - await InProcessExecution.ResumeStreamAsync(newWorkflow, savedCheckpoint, checkpointManager, checkpointedRun.Run.RunId); + await InProcessExecution.ResumeStreamAsync(newWorkflow, savedCheckpoint, checkpointManager); await foreach (WorkflowEvent evt in newCheckpointedRun.Run.WatchStreamAsync()) { diff --git a/dotnet/samples/GettingStarted/Workflows/Declarative/HostedWorkflow/Program.cs b/dotnet/samples/GettingStarted/Workflows/Declarative/HostedWorkflow/Program.cs index ee5c229f3d..5d76a0e319 100644 --- a/dotnet/samples/GettingStarted/Workflows/Declarative/HostedWorkflow/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Declarative/HostedWorkflow/Program.cs @@ -45,7 +45,7 @@ public static async Task Main(string[] args) string workflowInput = GetWorkflowInput(args); - AIAgent agent = aiProjectClient.GetAIAgent(agentVersion); + AIAgent agent = aiProjectClient.AsAIAgent(agentVersion); AgentThread thread = await agent.GetNewThreadAsync(); diff --git a/dotnet/samples/GettingStarted/Workflows/_Foundational/07_MixedWorkflowAgentsAndExecutors/Program.cs b/dotnet/samples/GettingStarted/Workflows/_Foundational/07_MixedWorkflowAgentsAndExecutors/Program.cs index 16250367cd..096471811c 100644 --- a/dotnet/samples/GettingStarted/Workflows/_Foundational/07_MixedWorkflowAgentsAndExecutors/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/_Foundational/07_MixedWorkflowAgentsAndExecutors/Program.cs @@ -129,7 +129,7 @@ private static async Task Main() private static async Task ExecuteWorkflowAsync(Workflow workflow, string input) { // Configure whether to show agent thinking in real-time - const bool ShowAgentThinking = false; + const bool ShowAgentThinking = true; // Execute in streaming mode to see real-time progress await using StreamingRun run = await InProcessExecution.StreamAsync(workflow, input); @@ -230,14 +230,23 @@ public override async ValueTask HandleAsync(string message, IWorkflowContext con /// Executor that synchronizes agent output and prepares it for the next stage. /// This demonstrates how executors can process agent outputs and forward to the next agent. /// -internal sealed class JailbreakSyncExecutor() : Executor("JailbreakSync") +/// +/// The AIAgentHostExecutor sends response.Messages which has runtime type List<ChatMessage>. +/// The message router uses exact type matching via message.GetType(). +/// +internal sealed class JailbreakSyncExecutor() : Executor>("JailbreakSync") { - public override async ValueTask HandleAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) + public override async ValueTask HandleAsync(List message, IWorkflowContext context, CancellationToken cancellationToken = default) { Console.WriteLine(); // New line after agent streaming Console.ForegroundColor = ConsoleColor.Magenta; - string fullAgentResponse = message.Text?.Trim() ?? "UNKNOWN"; + // Combine all response messages (typically just one for simple agents) + string fullAgentResponse = string.Join("\n", message.Select(m => m.Text?.Trim() ?? "")).Trim(); + if (string.IsNullOrEmpty(fullAgentResponse)) + { + fullAgentResponse = "UNKNOWN"; + } Console.WriteLine($"[{this.Id}] Full Agent Response:"); Console.WriteLine(fullAgentResponse); @@ -278,17 +287,24 @@ public override async ValueTask HandleAsync(ChatMessage message, IWorkflowContex /// /// Executor that outputs the final result and marks the end of the workflow. /// -internal sealed class FinalOutputExecutor() : Executor("FinalOutput") +/// +/// The AIAgentHostExecutor sends response.Messages which has runtime type List<ChatMessage>. +/// The message router uses exact type matching via message.GetType(). +/// +internal sealed class FinalOutputExecutor() : Executor, string>("FinalOutput") { - public override ValueTask HandleAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken = default) + public override ValueTask HandleAsync(List message, IWorkflowContext context, CancellationToken cancellationToken = default) { + // Combine all response messages (typically just one for simple agents) + string combinedText = string.Join("\n", message.Select(m => m.Text ?? "")).Trim(); + Console.WriteLine(); // New line after agent streaming Console.ForegroundColor = ConsoleColor.Green; Console.WriteLine($"\n[{this.Id}] Final Response:"); - Console.WriteLine($"{message.Text}"); + Console.WriteLine($"{combinedText}"); Console.WriteLine("\n[End of Workflow]"); Console.ResetColor(); - return ValueTask.FromResult(message.Text ?? string.Empty); + return ValueTask.FromResult(combinedText); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs index 10284dc25a..3314177bf1 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AIAgent.cs @@ -19,9 +19,13 @@ namespace Microsoft.Agents.AI; /// and process user requests. An agent instance may participate in multiple concurrent conversations, and each conversation /// may involve multiple agents working together. /// -[DebuggerDisplay("{DisplayName,nq}")] +[DebuggerDisplay("{DebuggerDisplay,nq}")] public abstract class AIAgent { + [DebuggerBrowsable(DebuggerBrowsableState.Never)] + private string DebuggerDisplay => + this.Name is { } name ? $"Id = {this.Id}, Name = {name}" : $"Id = {this.Id}"; + /// /// Gets the unique identifier for this agent instance. /// diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AdditionalPropertiesExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AdditionalPropertiesExtensions.cs new file mode 100644 index 0000000000..bf11a98c84 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AdditionalPropertiesExtensions.cs @@ -0,0 +1,99 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; +using Microsoft.Extensions.AI; +using Microsoft.Shared.Diagnostics; + +namespace Microsoft.Agents.AI; + +/// +/// Contains extension methods to allow storing and retrieving properties using the type name of the property as the key. +/// +public static class AdditionalPropertiesExtensions +{ + /// + /// Adds an additional property using the type name of the property as the key. + /// + /// The type of the property to add. + /// The dictionary of additional properties. + /// The value to add. + public static void Add(this AdditionalPropertiesDictionary additionalProperties, T value) + { + _ = Throw.IfNull(additionalProperties); + + additionalProperties.Add(typeof(T).FullName!, value); + } + + /// + /// Attempts to add a property using the type name of the property as the key. + /// + /// + /// This method uses the full name of the type parameter as the key. If the key already exists, + /// the value is not updated and the method returns . + /// + /// The type of the property to add. + /// The dictionary of additional properties. + /// The value to add. + /// + /// if the value was added successfully; if the key already exists. + /// + public static bool TryAdd(this AdditionalPropertiesDictionary additionalProperties, T value) + { + _ = Throw.IfNull(additionalProperties); + + return additionalProperties.TryAdd(typeof(T).FullName!, value); + } + + /// + /// Attempts to retrieve a value from the additional properties dictionary using the type name of the property as the key. + /// + /// + /// This method uses the full name of the type parameter as the key when searching the dictionary. + /// + /// The type of the property to be retrieved. + /// The dictionary containing additional properties. + /// + /// When this method returns, contains the value retrieved from the dictionary, if found and successfully converted to the requested type; + /// otherwise, the default value of . + /// + /// + /// if a non- value was found + /// in the dictionary and converted to the requested type; otherwise, . + /// + public static bool TryGetValue(this AdditionalPropertiesDictionary additionalProperties, [NotNullWhen(true)] out T? value) + { + _ = Throw.IfNull(additionalProperties); + + return additionalProperties.TryGetValue(typeof(T).FullName!, out value); + } + + /// + /// Determines whether the additional properties dictionary contains a property with the name of the provided type as the key. + /// + /// The type of the property to check for. + /// The dictionary of additional properties. + /// + /// if the dictionary contains a property with the name of the provided type as the key; otherwise, . + /// + public static bool Contains(this AdditionalPropertiesDictionary additionalProperties) + { + _ = Throw.IfNull(additionalProperties); + + return additionalProperties.ContainsKey(typeof(T).FullName!); + } + + /// + /// Removes a property from the additional properties dictionary using the name of the provided type as the key. + /// + /// The type of the property to remove. + /// The dictionary of additional properties. + /// + /// if the property was successfully removed; otherwise, . + /// + public static bool Remove(this AdditionalPropertiesDictionary additionalProperties) + { + _ = Throw.IfNull(additionalProperties); + + return additionalProperties.Remove(typeof(T).FullName!); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentAbstractionsJsonUtilities.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentAbstractionsJsonUtilities.cs index 937d871c56..3d420fb573 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentAbstractionsJsonUtilities.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentAbstractionsJsonUtilities.cs @@ -82,7 +82,7 @@ private static JsonSerializerOptions CreateDefaultOptions() [JsonSerializable(typeof(AgentResponseUpdate[]))] [JsonSerializable(typeof(ServiceIdAgentThread.ServiceIdAgentThreadState))] [JsonSerializable(typeof(InMemoryAgentThread.InMemoryAgentThreadState))] - [JsonSerializable(typeof(InMemoryChatMessageStore.StoreState))] + [JsonSerializable(typeof(InMemoryChatHistoryProvider.State))] [ExcludeFromCodeCoverage] private sealed partial class JsonContext : JsonSerializerContext; diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs index 318307ec43..579202c368 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentThread.cs @@ -68,7 +68,7 @@ public virtual JsonElement Serialize(JsonSerializerOptions? jsonSerializerOption /// is . /// /// The purpose of this method is to allow for the retrieval of strongly-typed services that might be provided by the , - /// including itself or any services it might be wrapping. For example, to access a if available for the instance, + /// including itself or any services it might be wrapping. For example, to access a if available for the instance, /// may be used to request it. /// public virtual object? GetService(Type serviceType, object? serviceKey = null) diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProvider.cs similarity index 83% rename from dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStore.cs rename to dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProvider.cs index 54cee063d7..1b75e45629 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProvider.cs @@ -11,11 +11,12 @@ namespace Microsoft.Agents.AI; /// -/// Provides an abstract base class for storing and managing chat messages associated with agent conversations. +/// Provides an abstract base class for fetching chat messages from, and adding chat messages to, chat history for the purposes of agent execution. /// /// /// -/// defines the contract for persistent storage of chat messages in agent conversations. +/// defines the contract that an can use to retrieve messsages from chat history +/// and provide notification of newly produced messages. /// Implementations are responsible for managing message persistence, retrieval, and any necessary optimization /// strategies such as truncation, summarization, or archival. /// @@ -28,11 +29,15 @@ namespace Microsoft.Agents.AI; /// Supporting serialization for thread persistence and migration /// /// +/// +/// A is only relevant for scenarios where the underlying AI service that the agent is using +/// does not use in-service chat history storage. +/// /// -public abstract class ChatMessageStore +public abstract class ChatHistoryProvider { /// - /// Called at the start of agent invocation to retrieve all messages from the store that should be provided as context for the next agent invocation. + /// Called at the start of agent invocation to provide messages from the chat history as context for the next agent invocation. /// /// Contains the request context including the caller provided messages that will be used by the agent for this invocation. /// The to monitor for cancellation requests. The default is . @@ -56,14 +61,14 @@ public abstract class ChatMessageStore /// /// /// - /// Each store instance should be associated with a single conversation thread to ensure proper message isolation + /// Each instance should be associated with a single to ensure proper message isolation /// and context management. /// /// public abstract ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default); /// - /// Called at the end of the agent invocation to add new messages to the store. + /// Called at the end of the agent invocation to add new messages to the chat history. /// /// Contains the invocation context including request messages, response messages, and any exception that occurred. /// The to monitor for cancellation requests. The default is . @@ -71,7 +76,7 @@ public abstract class ChatMessageStore /// /// /// Messages should be added in the order they were generated to maintain proper chronological sequence. - /// The store is responsible for preserving message ordering and ensuring that subsequent calls to + /// The is responsible for preserving message ordering and ensuring that subsequent calls to /// return messages in the correct chronological order. /// /// @@ -80,7 +85,6 @@ public abstract class ChatMessageStore /// Validating message content and metadata /// Applying storage optimizations or compression /// Triggering background maintenance operations - /// Updating indices or search capabilities /// /// /// @@ -97,13 +101,13 @@ public abstract class ChatMessageStore /// A representation of the object's state. public abstract JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null); - /// Asks the for an object of the specified type . + /// Asks the for an object of the specified type . /// The type of object being requested. /// An optional key that can be used to help identify the target service. /// The found object, otherwise . /// is . /// - /// The purpose of this method is to allow for the retrieval of strongly-typed services that might be provided by the , + /// The purpose of this method is to allow for the retrieval of strongly-typed services that might be provided by the , /// including itself or any services it might be wrapping. /// public virtual object? GetService(Type serviceType, object? serviceKey = null) @@ -115,12 +119,12 @@ public abstract class ChatMessageStore : null; } - /// Asks the for an object of type . + /// Asks the for an object of type . /// The type of the object to be retrieved. /// An optional key that can be used to help identify the target service. /// The found object, otherwise . /// - /// The purpose of this method is to allow for the retrieval of strongly typed services that may be provided by the , + /// The purpose of this method is to allow for the retrieval of strongly typed services that may be provided by the , /// including itself or any services it might be wrapping. /// public TService? GetService(object? serviceKey = null) @@ -130,9 +134,9 @@ public abstract class ChatMessageStore /// Contains the context information provided to . /// /// - /// This class provides context about the invocation before the messages are retrieved from the store, - /// including the new messages that will be used. Stores can use this information to determine what - /// messages should be retrieved for the invocation. + /// This class provides context about the invocation including the new messages that will be used. + /// A can use this information to determine what messages should be provided + /// for the invocation. /// public sealed class InvokingContext { @@ -169,12 +173,12 @@ public sealed class InvokedContext /// Initializes a new instance of the class with the specified request messages. /// /// The caller provided messages that were used by the agent for this invocation. - /// The messages retrieved from the for this invocation. + /// The messages retrieved from the for this invocation. /// is . - public InvokedContext(IEnumerable requestMessages, IEnumerable chatMessageStoreMessages) + public InvokedContext(IEnumerable requestMessages, IEnumerable? chatHistoryProviderMessages) { this.RequestMessages = Throw.IfNull(requestMessages); - this.ChatMessageStoreMessages = Throw.IfNull(chatMessageStoreMessages); + this.ChatHistoryProviderMessages = chatHistoryProviderMessages; } /// @@ -182,18 +186,18 @@ public InvokedContext(IEnumerable requestMessages, IEnumerable /// /// A collection of instances representing new messages that were provided by the caller. - /// This does not include any supplied messages. + /// This does not include any supplied messages. /// public IEnumerable RequestMessages { get; set { field = Throw.IfNull(value); } } /// - /// Gets the messages retrieved from the for this invocation, if any. + /// Gets the messages retrieved from the for this invocation, if any. /// /// - /// A collection of instances that were retrieved from the , + /// A collection of instances that were retrieved from the , /// and were used by the agent as part of the invocation. /// - public IEnumerable ChatMessageStoreMessages { get; set { field = Throw.IfNull(value); } } + public IEnumerable? ChatHistoryProviderMessages { get; set; } /// /// Gets or sets the messages provided by the for this invocation, if any. diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProviderExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProviderExtensions.cs new file mode 100644 index 0000000000..0f5d9524cb --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProviderExtensions.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI; + +/// +/// Contains extension methods for the class. +/// +public static class ChatHistoryProviderExtensions +{ + /// + /// Adds message filtering to an existing , so that messages passed to the and messages + /// provided by the can be filtered, updated or replaced. + /// + /// The to add the message filter to. + /// An optional filter function to apply to messages produced by the . If null, no filter is applied at this + /// stage. + /// An optional filter function to apply to the invoked context messages before they are passed to the . If null, no + /// filter is applied at this stage. + /// The with filtering applied. + public static ChatHistoryProvider WithMessageFilters( + this ChatHistoryProvider provider, + Func, IEnumerable>? invokingMessagesFilter = null, + Func? invokedMessagesFilter = null) + { + return new ChatHistoryProviderMessageFilter( + innerProvider: provider, + invokingMessagesFilter: invokingMessagesFilter, + invokedMessagesFilter: invokedMessagesFilter); + } + + /// + /// Decorates the provided chat message so that it does not add + /// messages produced by any to chat history. + /// + /// The to add the message filter to. + /// A new instance that filters out messages so they do not get added. + public static ChatHistoryProvider WithAIContextProviderMessageRemoval(this ChatHistoryProvider provider) + { + return new ChatHistoryProviderMessageFilter( + innerProvider: provider, + invokedMessagesFilter: (ctx) => + { + ctx.AIContextProviderMessages = null; + return ctx; + }); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreMessageFilter.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProviderMessageFilter.cs similarity index 63% rename from dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreMessageFilter.cs rename to dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProviderMessageFilter.cs index e58f233067..df7b536ea2 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreMessageFilter.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatHistoryProviderMessageFilter.cs @@ -11,33 +11,33 @@ namespace Microsoft.Agents.AI; /// -/// A decorator that allows filtering the messages -/// passed into and out of an inner . +/// A decorator that allows filtering the messages +/// passed into and out of an inner . /// -public sealed class ChatMessageStoreMessageFilter : ChatMessageStore +public sealed class ChatHistoryProviderMessageFilter : ChatHistoryProvider { - private readonly ChatMessageStore _innerChatMessageStore; + private readonly ChatHistoryProvider _innerProvider; private readonly Func, IEnumerable>? _invokingMessagesFilter; private readonly Func? _invokedMessagesFilter; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// Use this constructor to customize how messages are filtered before and after invocation by - /// providing appropriate filter functions. If no filters are provided, the message store operates without + /// providing appropriate filter functions. If no filters are provided, the operates without /// additional filtering. - /// The underlying chat message store to be wrapped. Cannot be null. - /// An optional filter function to apply to messages before they are invoked. If null, no filter is applied at this - /// stage. - /// An optional filter function to apply to the invocation context after messages have been invoked. If null, no + /// The underlying to be wrapped. Cannot be null. + /// An optional filter function to apply to messages provided by the + /// before they are used by the agent. If null, no filter is applied at this stage. + /// An optional filter function to apply to the invocation context after messages have been produced. If null, no /// filter is applied at this stage. - /// Thrown if innerChatMessageStore is null. - public ChatMessageStoreMessageFilter( - ChatMessageStore innerChatMessageStore, + /// Thrown if is null. + public ChatHistoryProviderMessageFilter( + ChatHistoryProvider innerProvider, Func, IEnumerable>? invokingMessagesFilter = null, Func? invokedMessagesFilter = null) { - this._innerChatMessageStore = Throw.IfNull(innerChatMessageStore); + this._innerProvider = Throw.IfNull(innerProvider); if (invokingMessagesFilter == null && invokedMessagesFilter == null) { @@ -51,7 +51,7 @@ public ChatMessageStoreMessageFilter( /// public override async ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) { - var messages = await this._innerChatMessageStore.InvokingAsync(context, cancellationToken).ConfigureAwait(false); + var messages = await this._innerProvider.InvokingAsync(context, cancellationToken).ConfigureAwait(false); return this._invokingMessagesFilter != null ? this._invokingMessagesFilter(messages) : messages; } @@ -63,12 +63,12 @@ public override ValueTask InvokedAsync(InvokedContext context, CancellationToken context = this._invokedMessagesFilter(context); } - return this._innerChatMessageStore.InvokedAsync(context, cancellationToken); + return this._innerProvider.InvokedAsync(context, cancellationToken); } /// public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) { - return this._innerChatMessageStore.Serialize(jsonSerializerOptions); + return this._innerProvider.Serialize(jsonSerializerOptions); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreExtensions.cs deleted file mode 100644 index a205fc1d9e..0000000000 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/ChatMessageStoreExtensions.cs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -using System; -using System.Collections.Generic; -using Microsoft.Extensions.AI; - -namespace Microsoft.Agents.AI; - -/// -/// Contains extension methods for the class. -/// -public static class ChatMessageStoreExtensions -{ - /// - /// Adds message filtering to an existing store, so that messages passed to the store and messages produced by the store - /// can be filtered, updated or replaced. - /// - /// The store to add the message filter to. - /// An optional filter function to apply to messages produced by the store. If null, no filter is applied at this - /// stage. - /// An optional filter function to apply to the invoked context messages before they are passed to the store. If null, no - /// filter is applied at this stage. - /// The with filtering applied. - public static ChatMessageStore WithMessageFilters( - this ChatMessageStore store, - Func, IEnumerable>? invokingMessagesFilter = null, - Func? invokedMessagesFilter = null) - { - return new ChatMessageStoreMessageFilter( - innerChatMessageStore: store, - invokingMessagesFilter: invokingMessagesFilter, - invokedMessagesFilter: invokedMessagesFilter); - } - - /// - /// Decorates the provided chat message store so that it does not store messages produced by any . - /// - /// The store to add the message filter to. - /// A new instance that filters out messages so they do not get stored. - public static ChatMessageStore WithAIContextProviderMessageRemoval(this ChatMessageStore store) - { - return new ChatMessageStoreMessageFilter( - innerChatMessageStore: store, - invokedMessagesFilter: (ctx) => - { - ctx.AIContextProviderMessages = null; - return ctx; - }); - } -} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryAgentThread.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryAgentThread.cs index 13fcc134f0..cc909b936c 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryAgentThread.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryAgentThread.cs @@ -9,11 +9,11 @@ namespace Microsoft.Agents.AI; /// -/// Provides an abstract base class for agent threads that maintain all conversation state in local memory. +/// Provides an abstract base class for an that maintain all chat history in local memory. /// /// /// -/// is designed for scenarios where conversation state should be stored locally +/// is designed for scenarios where chat history should be stored locally /// rather than in external services or databases. This approach provides high performance and simplicity while /// maintaining full control over the conversation data. /// @@ -28,17 +28,17 @@ public abstract class InMemoryAgentThread : AgentThread /// /// Initializes a new instance of the class. /// - /// - /// An optional instance to use for storing chat messages. - /// If , a new empty message store will be created. + /// + /// An optional instance to use for storing chat messages. + /// If , a new empty will be created. /// /// - /// This constructor allows sharing of message stores between threads or providing pre-configured - /// message stores with specific reduction or processing logic. + /// This constructor allows sharing of between threads or providing pre-configured + /// with specific reduction or processing logic. /// - protected InMemoryAgentThread(InMemoryChatMessageStore? messageStore = null) + protected InMemoryAgentThread(InMemoryChatHistoryProvider? chatHistoryProvider = null) { - this.MessageStore = messageStore ?? []; + this.ChatHistoryProvider = chatHistoryProvider ?? []; } /// @@ -52,7 +52,7 @@ protected InMemoryAgentThread(InMemoryChatMessageStore? messageStore = null) /// protected InMemoryAgentThread(IEnumerable messages) { - this.MessageStore = [.. messages]; + this.ChatHistoryProvider = [.. messages]; } /// @@ -60,9 +60,9 @@ protected InMemoryAgentThread(IEnumerable messages) /// /// A representing the serialized state of the thread. /// Optional settings for customizing the JSON deserialization process. - /// - /// Optional factory function to create the from its serialized state. - /// If not provided, a default factory will be used that creates a basic in-memory store. + /// + /// Optional factory function to create the from its serialized state. + /// If not provided, a default factory will be used that creates a basic . /// /// The is not a JSON object. /// The is invalid or cannot be deserialized to the expected type. @@ -73,7 +73,7 @@ protected InMemoryAgentThread(IEnumerable messages) protected InMemoryAgentThread( JsonElement serializedThreadState, JsonSerializerOptions? jsonSerializerOptions = null, - Func? messageStoreFactory = null) + Func? chatHistoryProviderFactory = null) { if (serializedThreadState.ValueKind != JsonValueKind.Object) { @@ -83,15 +83,15 @@ protected InMemoryAgentThread( var state = serializedThreadState.Deserialize( AgentAbstractionsJsonUtilities.DefaultOptions.GetTypeInfo(typeof(InMemoryAgentThreadState))) as InMemoryAgentThreadState; - this.MessageStore = - messageStoreFactory?.Invoke(state?.StoreState ?? default, jsonSerializerOptions) ?? - new InMemoryChatMessageStore(state?.StoreState ?? default, jsonSerializerOptions); + this.ChatHistoryProvider = + chatHistoryProviderFactory?.Invoke(state?.ChatHistoryProviderState ?? default, jsonSerializerOptions) ?? + new InMemoryChatHistoryProvider(state?.ChatHistoryProviderState ?? default, jsonSerializerOptions); } /// - /// Gets or sets the used by this thread. + /// Gets or sets the used by this thread. /// - public InMemoryChatMessageStore MessageStore { get; } + public InMemoryChatHistoryProvider ChatHistoryProvider { get; } /// /// Serializes the current object's state to a using the specified serialization options. @@ -100,11 +100,11 @@ protected InMemoryAgentThread( /// A representation of the object's state. public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) { - var storeState = this.MessageStore.Serialize(jsonSerializerOptions); + var chatHistoryProviderState = this.ChatHistoryProvider.Serialize(jsonSerializerOptions); var state = new InMemoryAgentThreadState { - StoreState = storeState, + ChatHistoryProviderState = chatHistoryProviderState, }; return JsonSerializer.SerializeToElement(state, AgentAbstractionsJsonUtilities.DefaultOptions.GetTypeInfo(typeof(InMemoryAgentThreadState))); @@ -112,13 +112,13 @@ public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptio /// public override object? GetService(Type serviceType, object? serviceKey = null) => - base.GetService(serviceType, serviceKey) ?? this.MessageStore?.GetService(serviceType, serviceKey); + base.GetService(serviceType, serviceKey) ?? this.ChatHistoryProvider?.GetService(serviceType, serviceKey); [DebuggerBrowsable(DebuggerBrowsableState.Never)] - private string DebuggerDisplay => $"Count = {this.MessageStore.Count}"; + private string DebuggerDisplay => $"Count = {this.ChatHistoryProvider.Count}"; internal sealed class InMemoryAgentThreadState { - public JsonElement? StoreState { get; set; } + public JsonElement? ChatHistoryProviderState { get; set; } } } diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatHistoryProvider.cs similarity index 74% rename from dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatMessageStore.cs rename to dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatHistoryProvider.cs index 1fb1b568ae..ab408c6a5e 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/InMemoryChatHistoryProvider.cs @@ -14,55 +14,54 @@ namespace Microsoft.Agents.AI; /// -/// Provides an in-memory implementation of with support for message reduction and collection semantics. +/// Provides an in-memory implementation of with support for message reduction and collection semantics. /// /// /// -/// stores chat messages entirely in local memory, providing fast access and manipulation -/// capabilities. It implements both for agent integration and +/// stores chat messages entirely in local memory, providing fast access and manipulation +/// capabilities. It implements both for agent integration and /// for direct collection manipulation. /// /// -/// This store maintains all messages in memory. For long-running conversations or high-volume scenarios, consider using +/// This maintains all messages in memory. For long-running conversations or high-volume scenarios, consider using /// message reduction strategies or alternative storage implementations. /// /// [DebuggerDisplay("Count = {Count}")] [DebuggerTypeProxy(typeof(DebugView))] -public sealed class InMemoryChatMessageStore : ChatMessageStore, IList, IReadOnlyList +public sealed class InMemoryChatHistoryProvider : ChatHistoryProvider, IList, IReadOnlyList { private List _messages; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// - /// This constructor creates a basic in-memory store without message reduction capabilities. + /// This constructor creates a basic in-memory without message reduction capabilities. /// Messages will be stored exactly as added without any automatic processing or reduction. /// - public InMemoryChatMessageStore() + public InMemoryChatHistoryProvider() { this._messages = []; } /// - /// Initializes a new instance of the class from previously serialized state. + /// Initializes a new instance of the class from previously serialized state. /// - /// A representing the serialized state of the message store. + /// A representing the serialized state of the provider. /// Optional settings for customizing the JSON deserialization process. - /// The is not a valid JSON object or cannot be deserialized. + /// The is not a valid JSON object or cannot be deserialized. /// - /// This constructor enables restoration of message stores from previously saved state, allowing + /// This constructor enables restoration of messages from previously saved state, allowing /// conversation history to be preserved across application restarts or migrated between instances. - /// The store will be configured with default settings and message reduction before retrieval. /// - public InMemoryChatMessageStore(JsonElement serializedStoreState, JsonSerializerOptions? jsonSerializerOptions = null) - : this(null, serializedStoreState, jsonSerializerOptions, ChatReducerTriggerEvent.BeforeMessagesRetrieval) + public InMemoryChatHistoryProvider(JsonElement serializedState, JsonSerializerOptions? jsonSerializerOptions = null) + : this(null, serializedState, jsonSerializerOptions, ChatReducerTriggerEvent.BeforeMessagesRetrieval) { } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// /// A instance used to process, reduce, or optimize chat messages. @@ -77,29 +76,29 @@ public InMemoryChatMessageStore(JsonElement serializedStoreState, JsonSerializer /// Message reducers enable automatic management of message storage by implementing strategies to /// keep memory usage under control while preserving important conversation context. /// - public InMemoryChatMessageStore(IChatReducer chatReducer, ChatReducerTriggerEvent reducerTriggerEvent = ChatReducerTriggerEvent.BeforeMessagesRetrieval) + public InMemoryChatHistoryProvider(IChatReducer chatReducer, ChatReducerTriggerEvent reducerTriggerEvent = ChatReducerTriggerEvent.BeforeMessagesRetrieval) : this(chatReducer, default, null, reducerTriggerEvent) { Throw.IfNull(chatReducer); } /// - /// Initializes a new instance of the class, with an existing state from a serialized JSON element. + /// Initializes a new instance of the class, with an existing state from a serialized JSON element. /// /// An optional instance used to process or reduce chat messages. If null, no reduction logic will be applied. - /// A representing the serialized state of the store. + /// A representing the serialized state of the provider. /// Optional settings for customizing the JSON deserialization process. /// The event that should trigger the reducer invocation. - public InMemoryChatMessageStore(IChatReducer? chatReducer, JsonElement serializedStoreState, JsonSerializerOptions? jsonSerializerOptions = null, ChatReducerTriggerEvent reducerTriggerEvent = ChatReducerTriggerEvent.BeforeMessagesRetrieval) + public InMemoryChatHistoryProvider(IChatReducer? chatReducer, JsonElement serializedState, JsonSerializerOptions? jsonSerializerOptions = null, ChatReducerTriggerEvent reducerTriggerEvent = ChatReducerTriggerEvent.BeforeMessagesRetrieval) { this.ChatReducer = chatReducer; this.ReducerTriggerEvent = reducerTriggerEvent; - if (serializedStoreState.ValueKind is JsonValueKind.Object) + if (serializedState.ValueKind is JsonValueKind.Object) { var jso = jsonSerializerOptions ?? AgentAbstractionsJsonUtilities.DefaultOptions; - var state = serializedStoreState.Deserialize( - jso.GetTypeInfo(typeof(StoreState))) as StoreState; + var state = serializedState.Deserialize( + jso.GetTypeInfo(typeof(State))) as State; if (state?.Messages is { } messages) { this._messages = messages; @@ -116,7 +115,7 @@ public InMemoryChatMessageStore(IChatReducer? chatReducer, JsonElement serialize public IChatReducer? ChatReducer { get; } /// - /// Gets the event that triggers the reducer invocation in this store. + /// Gets the event that triggers the reducer invocation in this provider. /// public ChatReducerTriggerEvent ReducerTriggerEvent { get; } @@ -156,7 +155,7 @@ public override async ValueTask InvokedAsync(InvokedContext context, Cancellatio return; } - // Add request, AI context provider, and response messages to the store + // Add request, AI context provider, and response messages to the provider var allNewMessages = context.RequestMessages.Concat(context.AIContextProviderMessages ?? []).Concat(context.ResponseMessages ?? []); this._messages.AddRange(allNewMessages); @@ -169,13 +168,13 @@ public override async ValueTask InvokedAsync(InvokedContext context, Cancellatio /// public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) { - StoreState state = new() + State state = new() { Messages = this._messages, }; var jso = jsonSerializerOptions ?? AgentAbstractionsJsonUtilities.DefaultOptions; - return JsonSerializer.SerializeToElement(state, jso.GetTypeInfo(typeof(StoreState))); + return JsonSerializer.SerializeToElement(state, jso.GetTypeInfo(typeof(State))); } /// @@ -218,13 +217,13 @@ public IEnumerator GetEnumerator() IEnumerator IEnumerable.GetEnumerator() => this.GetEnumerator(); - internal sealed class StoreState + internal sealed class State { public List Messages { get; set; } = []; } /// - /// Defines the events that can trigger a reducer in the . + /// Defines the events that can trigger a reducer in the . /// public enum ChatReducerTriggerEvent { @@ -235,15 +234,15 @@ public enum ChatReducerTriggerEvent AfterMessageAdded, /// - /// Trigger the reducer before messages are retrieved from the store. + /// Trigger the reducer before messages are retrieved from the provider. /// The reducer will process the messages before they are returned to the caller. /// BeforeMessagesRetrieval } - private sealed class DebugView(InMemoryChatMessageStore store) + private sealed class DebugView(InMemoryChatHistoryProvider provider) { [DebuggerBrowsable(DebuggerBrowsableState.RootHidden)] - public ChatMessage[] Items => store._messages.ToArray(); + public ChatMessage[] Items => provider._messages.ToArray(); } } diff --git a/dotnet/src/Microsoft.Agents.AI.AzureAI.Persistent/PersistentAgentsClientExtensions.cs b/dotnet/src/Microsoft.Agents.AI.AzureAI.Persistent/PersistentAgentsClientExtensions.cs index 55c3c4f0bf..2058e6760b 100644 --- a/dotnet/src/Microsoft.Agents.AI.AzureAI.Persistent/PersistentAgentsClientExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.AzureAI.Persistent/PersistentAgentsClientExtensions.cs @@ -82,39 +82,6 @@ public static ChatClientAgent AsAIAgent( }, services: services); } - /// - /// Retrieves an existing server side agent, wrapped as a using the provided . - /// - /// The to create the with. - /// A for the persistent agent. - /// The ID of the server side agent to create a for. - /// Options that should apply to all runs of the agent. - /// Provides a way to customize the creation of the underlying used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// The to monitor for cancellation requests. The default is . - /// A instance that can be used to perform operations on the persistent agent. - public static ChatClientAgent GetAIAgent( - this PersistentAgentsClient persistentAgentsClient, - string agentId, - ChatOptions? chatOptions = null, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - if (persistentAgentsClient is null) - { - throw new ArgumentNullException(nameof(persistentAgentsClient)); - } - - if (string.IsNullOrWhiteSpace(agentId)) - { - throw new ArgumentException($"{nameof(agentId)} should not be null or whitespace.", nameof(agentId)); - } - - var persistentAgentResponse = persistentAgentsClient.Administration.GetAgent(agentId, cancellationToken); - return persistentAgentsClient.AsAIAgent(persistentAgentResponse, chatOptions, clientFactory, services); - } - /// /// Retrieves an existing server side agent, wrapped as a using the provided . /// @@ -225,52 +192,13 @@ public static ChatClientAgent AsAIAgent( Description = options.Description ?? persistentAgentMetadata.Description, ChatOptions = options.ChatOptions, AIContextProviderFactory = options.AIContextProviderFactory, - ChatMessageStoreFactory = options.ChatMessageStoreFactory, + ChatHistoryProviderFactory = options.ChatHistoryProviderFactory, UseProvidedChatClientAsIs = options.UseProvidedChatClientAsIs }; return new ChatClientAgent(chatClient, agentOptions, services: services); } - /// - /// Retrieves an existing server side agent, wrapped as a using the provided . - /// - /// The to create the with. - /// The ID of the server side agent to create a for. - /// Full set of options to configure the agent. - /// Provides a way to customize the creation of the underlying used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// The to monitor for cancellation requests. The default is . - /// A instance that can be used to perform operations on the persistent agent. - /// Thrown when or is . - /// Thrown when is empty or whitespace. - public static ChatClientAgent GetAIAgent( - this PersistentAgentsClient persistentAgentsClient, - string agentId, - ChatClientAgentOptions options, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - if (persistentAgentsClient is null) - { - throw new ArgumentNullException(nameof(persistentAgentsClient)); - } - - if (string.IsNullOrWhiteSpace(agentId)) - { - throw new ArgumentException($"{nameof(agentId)} should not be null or whitespace.", nameof(agentId)); - } - - if (options is null) - { - throw new ArgumentNullException(nameof(options)); - } - - var persistentAgentResponse = persistentAgentsClient.Administration.GetAgent(agentId, cancellationToken); - return persistentAgentsClient.AsAIAgent(persistentAgentResponse, options, clientFactory, services); - } - /// /// Retrieves an existing server side agent, wrapped as a using the provided . /// @@ -366,122 +294,6 @@ public static async Task CreateAIAgentAsync( return await persistentAgentsClient.GetAIAgentAsync(createPersistentAgentResponse.Value.Id, clientFactory: clientFactory, services: services, cancellationToken: cancellationToken).ConfigureAwait(false); } - /// - /// Creates a new server side agent using the provided . - /// - /// The to create the agent with. - /// The model to be used by the agent. - /// The name of the agent. - /// The description of the agent. - /// The instructions for the agent. - /// The tools to be used by the agent. - /// The resources for the tools. - /// The temperature setting for the agent. - /// The top-p setting for the agent. - /// The response format for the agent. - /// The metadata for the agent. - /// Provides a way to customize the creation of the underlying used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// The to monitor for cancellation requests. The default is . - /// A instance that can be used to perform operations on the newly created agent. - public static ChatClientAgent CreateAIAgent( - this PersistentAgentsClient persistentAgentsClient, - string model, - string? name = null, - string? description = null, - string? instructions = null, - IEnumerable? tools = null, - ToolResources? toolResources = null, - float? temperature = null, - float? topP = null, - BinaryData? responseFormat = null, - IReadOnlyDictionary? metadata = null, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - if (persistentAgentsClient is null) - { - throw new ArgumentNullException(nameof(persistentAgentsClient)); - } - - var createPersistentAgentResponse = persistentAgentsClient.Administration.CreateAgent( - model: model, - name: name, - description: description, - instructions: instructions, - tools: tools, - toolResources: toolResources, - temperature: temperature, - topP: topP, - responseFormat: responseFormat, - metadata: metadata, - cancellationToken: cancellationToken); - - // Get a local proxy for the agent to work with. - return persistentAgentsClient.GetAIAgent(createPersistentAgentResponse.Value.Id, clientFactory: clientFactory, services: services, cancellationToken: cancellationToken); - } - - /// - /// Creates a new server side agent using the provided . - /// - /// The to create the agent with. - /// The model to be used by the agent. - /// Full set of options to configure the agent. - /// Provides a way to customize the creation of the underlying used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// The to monitor for cancellation requests. The default is . - /// A instance that can be used to perform operations on the newly created agent. - /// Thrown when or or is . - /// Thrown when is empty or whitespace. - public static ChatClientAgent CreateAIAgent( - this PersistentAgentsClient persistentAgentsClient, - string model, - ChatClientAgentOptions options, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - if (persistentAgentsClient is null) - { - throw new ArgumentNullException(nameof(persistentAgentsClient)); - } - - if (string.IsNullOrWhiteSpace(model)) - { - throw new ArgumentException($"{nameof(model)} should not be null or whitespace.", nameof(model)); - } - - if (options is null) - { - throw new ArgumentNullException(nameof(options)); - } - - var toolDefinitionsAndResources = ConvertAIToolsToToolDefinitions(options.ChatOptions?.Tools); - - var createPersistentAgentResponse = persistentAgentsClient.Administration.CreateAgent( - model: model, - name: options.Name, - description: options.Description, - instructions: options.ChatOptions?.Instructions, - tools: toolDefinitionsAndResources.ToolDefinitions, - toolResources: toolDefinitionsAndResources.ToolResources, - temperature: null, - topP: null, - responseFormat: null, - metadata: null, - cancellationToken: cancellationToken); - - if (options.ChatOptions?.Tools is { Count: > 0 } && (toolDefinitionsAndResources.FunctionToolsAndOtherTools is null || options.ChatOptions.Tools.Count != toolDefinitionsAndResources.FunctionToolsAndOtherTools.Count)) - { - options = options.Clone(); - options.ChatOptions!.Tools = toolDefinitionsAndResources.FunctionToolsAndOtherTools; - } - - // Get a local proxy for the agent to work with. - return persistentAgentsClient.GetAIAgent(createPersistentAgentResponse.Value.Id, options, clientFactory: clientFactory, services: services, cancellationToken: cancellationToken); - } - /// /// Creates a new server side agent using the provided . /// diff --git a/dotnet/src/Microsoft.Agents.AI.AzureAI/AzureAIProjectChatClientExtensions.cs b/dotnet/src/Microsoft.Agents.AI.AzureAI/AzureAIProjectChatClientExtensions.cs index 8e03a33be3..37ee7fa82c 100644 --- a/dotnet/src/Microsoft.Agents.AI.AzureAI/AzureAIProjectChatClientExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.AzureAI/AzureAIProjectChatClientExtensions.cs @@ -27,7 +27,7 @@ namespace Azure.AI.Projects; public static partial class AzureAIProjectChatClientExtensions { /// - /// Retrieves an existing server side agent, wrapped as a using the provided . + /// Uses an existing server side agent, wrapped as a using the provided and . /// /// The to create the with. Cannot be . /// The representing the name and version of the server side agent to create a for. Cannot be . @@ -38,10 +38,10 @@ public static partial class AzureAIProjectChatClientExtensions /// Thrown when or is . /// The agent with the specified name was not found. /// - /// When retrieving an agent by using an , minimal information will be available about the agent in the instance level, and any logic that relies + /// When instantiating a by using an , minimal information will be available about the agent in the instance level, and any logic that relies /// on to retrieve information about the agent like will receive as the result. /// - public static ChatClientAgent GetAIAgent( + public static ChatClientAgent AsAIAgent( this AIProjectClient aiProjectClient, AgentReference agentReference, IList? tools = null, @@ -52,7 +52,7 @@ public static ChatClientAgent GetAIAgent( Throw.IfNull(agentReference); ThrowIfInvalidAgentName(agentReference.Name); - return CreateChatClientAgent( + return AsChatClientAgent( aiProjectClient, agentReference, new ChatClientAgentOptions() @@ -65,40 +65,6 @@ public static ChatClientAgent GetAIAgent( services); } - /// - /// Retrieves an existing server side agent, wrapped as a using the provided . - /// - /// The to create the with. Cannot be . - /// The name of the server side agent to create a for. Cannot be or whitespace. - /// The tools to use when interacting with the agent. This is required when using prompt agent definitions with tools. - /// Provides a way to customize the creation of the underlying used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// The to monitor for cancellation requests. The default is . - /// A instance that can be used to perform operations based on the latest version of the named Azure AI Agent. - /// Thrown when or is . - /// Thrown when is empty or whitespace, or when the agent with the specified name was not found. - /// The agent with the specified name was not found. - public static ChatClientAgent GetAIAgent( - this AIProjectClient aiProjectClient, - string name, - IList? tools = null, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - Throw.IfNull(aiProjectClient); - ThrowIfInvalidAgentName(name); - - AgentRecord agentRecord = GetAgentRecordByName(aiProjectClient, name, cancellationToken); - - return AsAIAgent( - aiProjectClient, - agentRecord, - tools, - clientFactory, - services); - } - /// /// Asynchronously retrieves an existing server side agent, wrapped as a using the provided . /// @@ -134,7 +100,7 @@ public static async Task GetAIAgentAsync( } /// - /// Gets a runnable agent instance from the provided agent record. + /// Uses an existing server side agent, wrapped as a using the provided and . /// /// The client used to interact with Azure AI Agents. Cannot be . /// The agent record to be converted. The latest version will be used. Cannot be . @@ -155,7 +121,7 @@ public static ChatClientAgent AsAIAgent( var allowDeclarativeMode = tools is not { Count: > 0 }; - return CreateChatClientAgent( + return AsChatClientAgent( aiProjectClient, agentRecord, tools, @@ -165,7 +131,7 @@ public static ChatClientAgent AsAIAgent( } /// - /// Gets a runnable agent instance from a containing metadata about an Azure AI Agent. + /// Uses an existing server side agent, wrapped as a using the provided and . /// /// The client used to interact with Azure AI Agents. Cannot be . /// The agent version to be converted. Cannot be . @@ -186,7 +152,7 @@ public static ChatClientAgent AsAIAgent( var allowDeclarativeMode = tools is not { Count: > 0 }; - return CreateChatClientAgent( + return AsChatClientAgent( aiProjectClient, agentVersion, tools, @@ -196,47 +162,7 @@ public static ChatClientAgent AsAIAgent( } /// - /// Creates a new Prompt AI Agent using the provided and options. - /// - /// The client used to manage and interact with AI agents. Cannot be . - /// The options for creating the agent. Cannot be . - /// A factory function to customize the creation of the chat client used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// A to cancel the operation if needed. - /// A instance that can be used to perform operations on the newly created agent. - /// Thrown when or is . - public static ChatClientAgent GetAIAgent( - this AIProjectClient aiProjectClient, - ChatClientAgentOptions options, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - Throw.IfNull(aiProjectClient); - Throw.IfNull(options); - - if (string.IsNullOrWhiteSpace(options.Name)) - { - throw new ArgumentException("Agent name must be provided in the options.Name property", nameof(options)); - } - - ThrowIfInvalidAgentName(options.Name); - - AgentRecord agentRecord = GetAgentRecordByName(aiProjectClient, options.Name, cancellationToken); - var agentVersion = agentRecord.Versions.Latest; - - var agentOptions = CreateChatClientAgentOptions(agentVersion, options, requireInvocableTools: true); - - return CreateChatClientAgent( - aiProjectClient, - agentVersion, - agentOptions, - clientFactory, - services); - } - - /// - /// Creates a new Prompt AI Agent using the provided and options. + /// Asynchronously retrieves an existing server side agent, wrapped as a using the provided . /// /// The client used to manage and interact with AI agents. Cannot be . /// The options for creating the agent. Cannot be . @@ -267,7 +193,7 @@ public static async Task GetAIAgentAsync( var agentOptions = CreateChatClientAgentOptions(agentVersion, options, requireInvocableTools: true); - return CreateChatClientAgent( + return AsChatClientAgent( aiProjectClient, agentVersion, agentOptions, @@ -276,49 +202,7 @@ public static async Task GetAIAgentAsync( } /// - /// Creates a new Prompt AI agent using the specified configuration parameters. - /// - /// The client used to manage and interact with AI agents. Cannot be . - /// The name for the agent. - /// The name of the model to use for the agent. Cannot be or whitespace. - /// The instructions that guide the agent's behavior. Cannot be or whitespace. - /// The description for the agent. - /// The tools to use when interacting with the agent, this is required when using prompt agent definitions with tools. - /// A factory function to customize the creation of the chat client used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// A token to monitor for cancellation requests. - /// A instance that can be used to perform operations on the newly created agent. - /// Thrown when , , or is . - /// Thrown when or is empty or whitespace. - /// When using prompt agent definitions with tools the parameter needs to be provided. - public static ChatClientAgent CreateAIAgent( - this AIProjectClient aiProjectClient, - string name, - string model, - string instructions, - string? description = null, - IList? tools = null, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - Throw.IfNull(aiProjectClient); - ThrowIfInvalidAgentName(name); - Throw.IfNullOrWhitespace(model); - Throw.IfNullOrWhitespace(instructions); - - return CreateAIAgent( - aiProjectClient, - name, - tools, - new AgentVersionCreationOptions(new PromptAgentDefinition(model) { Instructions = instructions }) { Description = description }, - clientFactory, - services, - cancellationToken); - } - - /// - /// Creates a new Prompt AI agent using the specified configuration parameters. + /// Creates a new Prompt AI agent in the Foundry service using the specified configuration parameters, and exposes it as a . /// /// The client used to manage and interact with AI agents. Cannot be . /// The name for the agent. @@ -360,73 +244,7 @@ public static Task CreateAIAgentAsync( } /// - /// Creates a new Prompt AI Agent using the provided and options. - /// - /// The client used to manage and interact with AI agents. Cannot be . - /// The name of the model to use for the agent. Cannot be or whitespace. - /// The options for creating the agent. Cannot be . - /// A factory function to customize the creation of the chat client used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// A to cancel the operation if needed. - /// A instance that can be used to perform operations on the newly created agent. - /// Thrown when or is . - /// Thrown when is empty or whitespace, or when the agent name is not provided in the options. - public static ChatClientAgent CreateAIAgent( - this AIProjectClient aiProjectClient, - string model, - ChatClientAgentOptions options, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - Throw.IfNull(aiProjectClient); - Throw.IfNull(options); - Throw.IfNullOrWhitespace(model); - const bool RequireInvocableTools = true; - - if (string.IsNullOrWhiteSpace(options.Name)) - { - throw new ArgumentException("Agent name must be provided in the options.Name property", nameof(options)); - } - - ThrowIfInvalidAgentName(options.Name); - - PromptAgentDefinition agentDefinition = new(model) - { - Instructions = options.ChatOptions?.Instructions, - Temperature = options.ChatOptions?.Temperature, - TopP = options.ChatOptions?.TopP, - TextOptions = new() { TextFormat = ToOpenAIResponseTextFormat(options.ChatOptions?.ResponseFormat, options.ChatOptions) } - }; - - // Attempt to capture breaking glass options from the raw representation factory that match the agent definition. - if (options.ChatOptions?.RawRepresentationFactory?.Invoke(new NoOpChatClient()) is CreateResponseOptions respCreationOptions) - { - agentDefinition.ReasoningOptions = respCreationOptions.ReasoningOptions; - } - - ApplyToolsToAgentDefinition(agentDefinition, options.ChatOptions?.Tools); - - AgentVersionCreationOptions? creationOptions = new(agentDefinition); - if (!string.IsNullOrWhiteSpace(options.Description)) - { - creationOptions.Description = options.Description; - } - - AgentVersion agentVersion = CreateAgentVersionWithProtocol(aiProjectClient, options.Name, creationOptions, cancellationToken); - - var agentOptions = CreateChatClientAgentOptions(agentVersion, options, RequireInvocableTools); - - return CreateChatClientAgent( - aiProjectClient, - agentVersion, - agentOptions, - clientFactory, - services); - } - - /// - /// Creates a new Prompt AI Agent using the provided and options. + /// Creates a new Prompt AI agent in the Foundry service using the specified configuration parameters, and exposes it as a . /// /// The client used to manage and interact with AI agents. Cannot be . /// The name of the model to use for the agent. Cannot be or whitespace. @@ -483,7 +301,7 @@ public static async Task CreateAIAgentAsync( var agentOptions = CreateChatClientAgentOptions(agentVersion, options, RequireInvocableTools); - return CreateChatClientAgent( + return AsChatClientAgent( aiProjectClient, agentVersion, agentOptions, @@ -492,42 +310,7 @@ public static async Task CreateAIAgentAsync( } /// - /// Creates a new AI agent using the specified agent definition and optional configuration parameters. - /// - /// The client used to manage and interact with AI agents. Cannot be . - /// The name for the agent. - /// Settings that control the creation of the agent. - /// A factory function to customize the creation of the chat client used by the agent. - /// A token to monitor for cancellation requests. - /// A instance that can be used to perform operations on the newly created agent. - /// Thrown when or is . - /// - /// When using this extension method with a the tools are only declarative and not invocable. - /// Invocation of any in-process tools will need to be handled manually. - /// - public static ChatClientAgent CreateAIAgent( - this AIProjectClient aiProjectClient, - string name, - AgentVersionCreationOptions creationOptions, - Func? clientFactory = null, - CancellationToken cancellationToken = default) - { - Throw.IfNull(aiProjectClient); - ThrowIfInvalidAgentName(name); - Throw.IfNull(creationOptions); - - return CreateAIAgent( - aiProjectClient, - name, - tools: null, - creationOptions, - clientFactory, - services: null, - cancellationToken); - } - - /// - /// Asynchronously creates a new AI agent using the specified agent definition and optional configuration + /// Creates a new Prompt AI agent in the Foundry service using the specified configuration parameters, and exposes it as a . /// parameters. /// /// The client used to manage and interact with AI agents. Cannot be . @@ -566,18 +349,6 @@ public static Task CreateAIAgentAsync( private static readonly ModelReaderWriterOptions s_modelWriterOptionsWire = new("W"); - /// - /// Retrieves an agent record by name using the Protocol method with user-agent header. - /// - private static AgentRecord GetAgentRecordByName(AIProjectClient aiProjectClient, string agentName, CancellationToken cancellationToken) - { - ClientResult protocolResponse = aiProjectClient.Agents.GetAgent(agentName, cancellationToken.ToRequestOptions(false)); - var rawResponse = protocolResponse.GetRawResponse(); - AgentRecord? result = ModelReaderWriter.Read(rawResponse.Content, s_modelWriterOptionsWire, AzureAIProjectsOpenAIContext.Default); - return ClientResult.FromOptionalValue(result, rawResponse).Value! - ?? throw new InvalidOperationException($"Agent with name '{agentName}' not found."); - } - /// /// Asynchronously retrieves an agent record by name using the Protocol method with user-agent header. /// @@ -590,19 +361,6 @@ private static async Task GetAgentRecordByNameAsync(AIProjectClient ?? throw new InvalidOperationException($"Agent with name '{agentName}' not found."); } - /// - /// Creates an agent version using the Protocol method with user-agent header. - /// - private static AgentVersion CreateAgentVersionWithProtocol(AIProjectClient aiProjectClient, string agentName, AgentVersionCreationOptions creationOptions, CancellationToken cancellationToken) - { - using BinaryContent protocolRequest = BinaryContent.Create(ModelReaderWriter.Write(creationOptions, ModelReaderWriterOptions.Json, AzureAIProjectsContext.Default)); - ClientResult protocolResponse = aiProjectClient.Agents.CreateAgentVersion(agentName, protocolRequest, cancellationToken.ToRequestOptions(false)); - - var rawResponse = protocolResponse.GetRawResponse(); - AgentVersion? result = ModelReaderWriter.Read(rawResponse.Content, s_modelWriterOptionsWire, AzureAIProjectsOpenAIContext.Default); - return ClientResult.FromValue(result, rawResponse).Value!; - } - /// /// Asynchronously creates an agent version using the Protocol method with user-agent header. /// @@ -616,33 +374,6 @@ private static async Task CreateAgentVersionWithProtocolAsync(AIPr return ClientResult.FromValue(result, rawResponse).Value!; } - private static ChatClientAgent CreateAIAgent( - this AIProjectClient aiProjectClient, - string name, - IList? tools, - AgentVersionCreationOptions creationOptions, - Func? clientFactory, - IServiceProvider? services, - CancellationToken cancellationToken) - { - var allowDeclarativeMode = tools is not { Count: > 0 }; - - if (!allowDeclarativeMode) - { - ApplyToolsToAgentDefinition(creationOptions.Definition, tools); - } - - AgentVersion agentVersion = CreateAgentVersionWithProtocol(aiProjectClient, name, creationOptions, cancellationToken); - - return CreateChatClientAgent( - aiProjectClient, - agentVersion, - tools, - clientFactory, - !allowDeclarativeMode, - services); - } - private static async Task CreateAIAgentAsync( this AIProjectClient aiProjectClient, string name, @@ -661,7 +392,7 @@ private static async Task CreateAIAgentAsync( AgentVersion agentVersion = await CreateAgentVersionWithProtocolAsync(aiProjectClient, name, creationOptions, cancellationToken).ConfigureAwait(false); - return CreateChatClientAgent( + return AsChatClientAgent( aiProjectClient, agentVersion, tools, @@ -671,7 +402,7 @@ private static async Task CreateAIAgentAsync( } /// This method creates an with the specified ChatClientAgentOptions. - private static ChatClientAgent CreateChatClientAgent( + private static ChatClientAgent AsChatClientAgent( AIProjectClient aiProjectClient, AgentVersion agentVersion, ChatClientAgentOptions agentOptions, @@ -689,7 +420,7 @@ private static ChatClientAgent CreateChatClientAgent( } /// This method creates an with the specified ChatClientAgentOptions. - private static ChatClientAgent CreateChatClientAgent( + private static ChatClientAgent AsChatClientAgent( AIProjectClient aiProjectClient, AgentRecord agentRecord, ChatClientAgentOptions agentOptions, @@ -707,7 +438,7 @@ private static ChatClientAgent CreateChatClientAgent( } /// This method creates an with the specified ChatClientAgentOptions. - private static ChatClientAgent CreateChatClientAgent( + private static ChatClientAgent AsChatClientAgent( AIProjectClient aiProjectClient, AgentReference agentReference, ChatClientAgentOptions agentOptions, @@ -725,14 +456,14 @@ private static ChatClientAgent CreateChatClientAgent( } /// This method creates an with a auto-generated ChatClientAgentOptions from the specified configuration parameters. - private static ChatClientAgent CreateChatClientAgent( + private static ChatClientAgent AsChatClientAgent( AIProjectClient AIProjectClient, AgentVersion agentVersion, IList? tools, Func? clientFactory, bool requireInvocableTools, IServiceProvider? services) - => CreateChatClientAgent( + => AsChatClientAgent( AIProjectClient, agentVersion, CreateChatClientAgentOptions(agentVersion, new ChatOptions() { Tools = tools }, requireInvocableTools), @@ -740,14 +471,14 @@ private static ChatClientAgent CreateChatClientAgent( services); /// This method creates an with a auto-generated ChatClientAgentOptions from the specified configuration parameters. - private static ChatClientAgent CreateChatClientAgent( + private static ChatClientAgent AsChatClientAgent( AIProjectClient AIProjectClient, AgentRecord agentRecord, IList? tools, Func? clientFactory, bool requireInvocableTools, IServiceProvider? services) - => CreateChatClientAgent( + => AsChatClientAgent( AIProjectClient, agentRecord, CreateChatClientAgentOptions(agentRecord.Versions.Latest, new ChatOptions() { Tools = tools }, requireInvocableTools), @@ -852,7 +583,7 @@ private static ChatClientAgentOptions CreateChatClientAgentOptions(AgentVersion if (options is not null) { agentOptions.AIContextProviderFactory = options.AIContextProviderFactory; - agentOptions.ChatMessageStoreFactory = options.ChatMessageStoreFactory; + agentOptions.ChatHistoryProviderFactory = options.ChatHistoryProviderFactory; agentOptions.UseProvidedChatClientAsIs = options.UseProvidedChatClientAsIs; } diff --git a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatHistoryProvider.cs similarity index 86% rename from dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatMessageStore.cs rename to dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatHistoryProvider.cs index 5c2c23ff9e..41c9a211dc 100644 --- a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosChatHistoryProvider.cs @@ -15,11 +15,11 @@ namespace Microsoft.Agents.AI; /// -/// Provides a Cosmos DB implementation of the abstract class. +/// Provides a Cosmos DB implementation of the abstract class. /// -[RequiresUnreferencedCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with trimming.")] -[RequiresDynamicCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with NativeAOT.")] -public sealed class CosmosChatMessageStore : ChatMessageStore, IDisposable +[RequiresUnreferencedCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with trimming.")] +[RequiresDynamicCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with NativeAOT.")] +public sealed class CosmosChatHistoryProvider : ChatHistoryProvider, IDisposable { private readonly CosmosClient _cosmosClient; private readonly Container _container; @@ -60,7 +60,7 @@ private static JsonSerializerOptions CreateDefaultJsonOptions() public int MaxBatchSize { get; set; } = 100; /// - /// Gets or sets the maximum number of messages to retrieve from the store. + /// Gets or sets the maximum number of messages to retrieve from the provider. /// This helps prevent exceeding LLM context windows in long conversations. /// Default is null (no limit). When set, only the most recent messages are returned. /// @@ -73,17 +73,17 @@ private static JsonSerializerOptions CreateDefaultJsonOptions() public int? MessageTtlSeconds { get; set; } = 86400; /// - /// Gets the conversation ID associated with this message store. + /// Gets the conversation ID associated with this provider. /// public string ConversationId { get; init; } /// - /// Gets the database ID associated with this message store. + /// Gets the database ID associated with this provider. /// public string DatabaseId { get; init; } /// - /// Gets the container ID associated with this message store. + /// Gets the container ID associated with this provider. /// public string ContainerId { get; init; } @@ -97,7 +97,7 @@ private static JsonSerializerOptions CreateDefaultJsonOptions() /// Whether this instance owns the CosmosClient and should dispose it. /// Optional tenant identifier for hierarchical partitioning. /// Optional user identifier for hierarchical partitioning. - internal CosmosChatMessageStore(CosmosClient cosmosClient, string databaseId, string containerId, string conversationId, bool ownsClient, string? tenantId = null, string? userId = null) + internal CosmosChatHistoryProvider(CosmosClient cosmosClient, string databaseId, string containerId, string conversationId, bool ownsClient, string? tenantId = null, string? userId = null) { this._cosmosClient = Throw.IfNull(cosmosClient); this._container = this._cosmosClient.GetContainer(Throw.IfNullOrWhitespace(databaseId), Throw.IfNullOrWhitespace(containerId)); @@ -121,20 +121,20 @@ internal CosmosChatMessageStore(CosmosClient cosmosClient, string databaseId, st } /// - /// Initializes a new instance of the class using a connection string. + /// Initializes a new instance of the class using a connection string. /// /// The Cosmos DB connection string. /// The identifier of the Cosmos DB database. /// The identifier of the Cosmos DB container. /// Thrown when any required parameter is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(string connectionString, string databaseId, string containerId) + public CosmosChatHistoryProvider(string connectionString, string databaseId, string containerId) : this(connectionString, databaseId, containerId, Guid.NewGuid().ToString("N")) { } /// - /// Initializes a new instance of the class using a connection string. + /// Initializes a new instance of the class using a connection string. /// /// The Cosmos DB connection string. /// The identifier of the Cosmos DB database. @@ -142,13 +142,13 @@ public CosmosChatMessageStore(string connectionString, string databaseId, string /// The unique identifier for this conversation thread. /// Thrown when any required parameter is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(string connectionString, string databaseId, string containerId, string conversationId) + public CosmosChatHistoryProvider(string connectionString, string databaseId, string containerId, string conversationId) : this(new CosmosClient(Throw.IfNullOrWhitespace(connectionString)), databaseId, containerId, conversationId, ownsClient: true) { } /// - /// Initializes a new instance of the class using TokenCredential for authentication. + /// Initializes a new instance of the class using TokenCredential for authentication. /// /// The Cosmos DB account endpoint URI. /// The TokenCredential to use for authentication (e.g., DefaultAzureCredential, ManagedIdentityCredential). @@ -156,13 +156,13 @@ public CosmosChatMessageStore(string connectionString, string databaseId, string /// The identifier of the Cosmos DB container. /// Thrown when any required parameter is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(string accountEndpoint, TokenCredential tokenCredential, string databaseId, string containerId) + public CosmosChatHistoryProvider(string accountEndpoint, TokenCredential tokenCredential, string databaseId, string containerId) : this(accountEndpoint, tokenCredential, databaseId, containerId, Guid.NewGuid().ToString("N")) { } /// - /// Initializes a new instance of the class using a TokenCredential for authentication. + /// Initializes a new instance of the class using a TokenCredential for authentication. /// /// The Cosmos DB account endpoint URI. /// The TokenCredential to use for authentication (e.g., DefaultAzureCredential, ManagedIdentityCredential). @@ -171,26 +171,26 @@ public CosmosChatMessageStore(string accountEndpoint, TokenCredential tokenCrede /// The unique identifier for this conversation thread. /// Thrown when any required parameter is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(string accountEndpoint, TokenCredential tokenCredential, string databaseId, string containerId, string conversationId) + public CosmosChatHistoryProvider(string accountEndpoint, TokenCredential tokenCredential, string databaseId, string containerId, string conversationId) : this(new CosmosClient(Throw.IfNullOrWhitespace(accountEndpoint), Throw.IfNull(tokenCredential)), databaseId, containerId, conversationId, ownsClient: true) { } /// - /// Initializes a new instance of the class using an existing . + /// Initializes a new instance of the class using an existing . /// /// The instance to use for Cosmos DB operations. /// The identifier of the Cosmos DB database. /// The identifier of the Cosmos DB container. /// Thrown when is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(CosmosClient cosmosClient, string databaseId, string containerId) + public CosmosChatHistoryProvider(CosmosClient cosmosClient, string databaseId, string containerId) : this(cosmosClient, databaseId, containerId, Guid.NewGuid().ToString("N")) { } /// - /// Initializes a new instance of the class using an existing . + /// Initializes a new instance of the class using an existing . /// /// The instance to use for Cosmos DB operations. /// The identifier of the Cosmos DB database. @@ -198,13 +198,13 @@ public CosmosChatMessageStore(CosmosClient cosmosClient, string databaseId, stri /// The unique identifier for this conversation thread. /// Thrown when is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(CosmosClient cosmosClient, string databaseId, string containerId, string conversationId) + public CosmosChatHistoryProvider(CosmosClient cosmosClient, string databaseId, string containerId, string conversationId) : this(cosmosClient, databaseId, containerId, conversationId, ownsClient: false) { } /// - /// Initializes a new instance of the class using a connection string with hierarchical partition keys. + /// Initializes a new instance of the class using a connection string with hierarchical partition keys. /// /// The Cosmos DB connection string. /// The identifier of the Cosmos DB database. @@ -214,13 +214,13 @@ public CosmosChatMessageStore(CosmosClient cosmosClient, string databaseId, stri /// The session identifier for hierarchical partitioning. /// Thrown when any required parameter is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(string connectionString, string databaseId, string containerId, string tenantId, string userId, string sessionId) + public CosmosChatHistoryProvider(string connectionString, string databaseId, string containerId, string tenantId, string userId, string sessionId) : this(new CosmosClient(Throw.IfNullOrWhitespace(connectionString)), databaseId, containerId, Throw.IfNullOrWhitespace(sessionId), ownsClient: true, Throw.IfNullOrWhitespace(tenantId), Throw.IfNullOrWhitespace(userId)) { } /// - /// Initializes a new instance of the class using a TokenCredential for authentication with hierarchical partition keys. + /// Initializes a new instance of the class using a TokenCredential for authentication with hierarchical partition keys. /// /// The Cosmos DB account endpoint URI. /// The TokenCredential to use for authentication (e.g., DefaultAzureCredential, ManagedIdentityCredential). @@ -231,13 +231,13 @@ public CosmosChatMessageStore(string connectionString, string databaseId, string /// The session identifier for hierarchical partitioning. /// Thrown when any required parameter is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(string accountEndpoint, TokenCredential tokenCredential, string databaseId, string containerId, string tenantId, string userId, string sessionId) + public CosmosChatHistoryProvider(string accountEndpoint, TokenCredential tokenCredential, string databaseId, string containerId, string tenantId, string userId, string sessionId) : this(new CosmosClient(Throw.IfNullOrWhitespace(accountEndpoint), Throw.IfNull(tokenCredential)), databaseId, containerId, Throw.IfNullOrWhitespace(sessionId), ownsClient: true, Throw.IfNullOrWhitespace(tenantId), Throw.IfNullOrWhitespace(userId)) { } /// - /// Initializes a new instance of the class using an existing with hierarchical partition keys. + /// Initializes a new instance of the class using an existing with hierarchical partition keys. /// /// The instance to use for Cosmos DB operations. /// The identifier of the Cosmos DB database. @@ -247,43 +247,43 @@ public CosmosChatMessageStore(string accountEndpoint, TokenCredential tokenCrede /// The session identifier for hierarchical partitioning. /// Thrown when is null. /// Thrown when any string parameter is null or whitespace. - public CosmosChatMessageStore(CosmosClient cosmosClient, string databaseId, string containerId, string tenantId, string userId, string sessionId) + public CosmosChatHistoryProvider(CosmosClient cosmosClient, string databaseId, string containerId, string tenantId, string userId, string sessionId) : this(cosmosClient, databaseId, containerId, Throw.IfNullOrWhitespace(sessionId), ownsClient: false, Throw.IfNullOrWhitespace(tenantId), Throw.IfNullOrWhitespace(userId)) { } /// - /// Creates a new instance of the class from previously serialized state. + /// Creates a new instance of the class from previously serialized state. /// /// The instance to use for Cosmos DB operations. - /// A representing the serialized state of the message store. + /// A representing the serialized state of the provider. /// The identifier of the Cosmos DB database. /// The identifier of the Cosmos DB container. /// Optional settings for customizing the JSON deserialization process. - /// A new instance of initialized from the serialized state. + /// A new instance of initialized from the serialized state. /// Thrown when is null. /// Thrown when the serialized state cannot be deserialized. - public static CosmosChatMessageStore CreateFromSerializedState(CosmosClient cosmosClient, JsonElement serializedStoreState, string databaseId, string containerId, JsonSerializerOptions? jsonSerializerOptions = null) + public static CosmosChatHistoryProvider CreateFromSerializedState(CosmosClient cosmosClient, JsonElement serializedState, string databaseId, string containerId, JsonSerializerOptions? jsonSerializerOptions = null) { Throw.IfNull(cosmosClient); Throw.IfNullOrWhitespace(databaseId); Throw.IfNullOrWhitespace(containerId); - if (serializedStoreState.ValueKind is not JsonValueKind.Object) + if (serializedState.ValueKind is not JsonValueKind.Object) { - throw new ArgumentException("Invalid serialized state", nameof(serializedStoreState)); + throw new ArgumentException("Invalid serialized state", nameof(serializedState)); } - var state = serializedStoreState.Deserialize(jsonSerializerOptions); + var state = serializedState.Deserialize(jsonSerializerOptions); if (state?.ConversationIdentifier is not { } conversationId) { - throw new ArgumentException("Invalid serialized state", nameof(serializedStoreState)); + throw new ArgumentException("Invalid serialized state", nameof(serializedState)); } // Use the internal constructor with all parameters to ensure partition key logic is centralized return state.UseHierarchicalPartitioning && state.TenantId != null && state.UserId != null - ? new CosmosChatMessageStore(cosmosClient, databaseId, containerId, conversationId, ownsClient: false, state.TenantId, state.UserId) - : new CosmosChatMessageStore(cosmosClient, databaseId, containerId, conversationId, ownsClient: false); + ? new CosmosChatHistoryProvider(cosmosClient, databaseId, containerId, conversationId, ownsClient: false, state.TenantId, state.UserId) + : new CosmosChatHistoryProvider(cosmosClient, databaseId, containerId, conversationId, ownsClient: false); } /// @@ -524,7 +524,7 @@ public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptio } #pragma warning restore CA1513 - var state = new StoreState + var state = new State { ConversationIdentifier = this.ConversationId, TenantId = this._tenantId, @@ -632,7 +632,7 @@ public void Dispose() } } - private sealed class StoreState + private sealed class State { public string ConversationIdentifier { get; set; } = string.Empty; public string? TenantId { get; set; } diff --git a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBChatExtensions.cs b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBChatExtensions.cs index 45c0d09536..3d93e9dd6a 100644 --- a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBChatExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBChatExtensions.cs @@ -3,7 +3,7 @@ using System; using System.Diagnostics.CodeAnalysis; using System.Threading.Tasks; -using Azure.Identity; +using Azure.Core; using Microsoft.Azure.Cosmos; namespace Microsoft.Agents.AI; @@ -23,9 +23,9 @@ public static class CosmosDBChatExtensions /// The configured . /// Thrown when is null. /// Thrown when any string parameter is null or whitespace. - [RequiresUnreferencedCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with trimming.")] - [RequiresDynamicCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with NativeAOT.")] - public static ChatClientAgentOptions WithCosmosDBMessageStore( + [RequiresUnreferencedCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with trimming.")] + [RequiresDynamicCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with NativeAOT.")] + public static ChatClientAgentOptions WithCosmosDBChatHistoryProvider( this ChatClientAgentOptions options, string connectionString, string databaseId, @@ -36,7 +36,7 @@ public static ChatClientAgentOptions WithCosmosDBMessageStore( throw new ArgumentNullException(nameof(options)); } - options.ChatMessageStoreFactory = (context, ct) => new ValueTask(new CosmosChatMessageStore(connectionString, databaseId, containerId)); + options.ChatHistoryProviderFactory = (context, ct) => new ValueTask(new CosmosChatHistoryProvider(connectionString, databaseId, containerId)); return options; } @@ -47,23 +47,30 @@ public static ChatClientAgentOptions WithCosmosDBMessageStore( /// The Cosmos DB account endpoint URI. /// The identifier of the Cosmos DB database. /// The identifier of the Cosmos DB container. + /// The TokenCredential to use for authentication (e.g., DefaultAzureCredential, ManagedIdentityCredential). /// The configured . - /// Thrown when is null. + /// Thrown when or is null. /// Thrown when any string parameter is null or whitespace. - [RequiresUnreferencedCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with trimming.")] - [RequiresDynamicCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with NativeAOT.")] - public static ChatClientAgentOptions WithCosmosDBMessageStoreUsingManagedIdentity( + [RequiresUnreferencedCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with trimming.")] + [RequiresDynamicCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with NativeAOT.")] + public static ChatClientAgentOptions WithCosmosDBChatHistoryProviderUsingManagedIdentity( this ChatClientAgentOptions options, string accountEndpoint, string databaseId, - string containerId) + string containerId, + TokenCredential tokenCredential) { if (options is null) { throw new ArgumentNullException(nameof(options)); } - options.ChatMessageStoreFactory = (context, ct) => new ValueTask(new CosmosChatMessageStore(accountEndpoint, new DefaultAzureCredential(), databaseId, containerId)); + if (tokenCredential is null) + { + throw new ArgumentNullException(nameof(tokenCredential)); + } + + options.ChatHistoryProviderFactory = (context, ct) => new ValueTask(new CosmosChatHistoryProvider(accountEndpoint, tokenCredential, databaseId, containerId)); return options; } @@ -77,9 +84,9 @@ public static ChatClientAgentOptions WithCosmosDBMessageStoreUsingManagedIdentit /// The configured . /// Thrown when any required parameter is null. /// Thrown when any string parameter is null or whitespace. - [RequiresUnreferencedCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with trimming.")] - [RequiresDynamicCode("The CosmosChatMessageStore uses JSON serialization which is incompatible with NativeAOT.")] - public static ChatClientAgentOptions WithCosmosDBMessageStore( + [RequiresUnreferencedCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with trimming.")] + [RequiresDynamicCode("The CosmosChatHistoryProvider uses JSON serialization which is incompatible with NativeAOT.")] + public static ChatClientAgentOptions WithCosmosDBChatHistoryProvider( this ChatClientAgentOptions options, CosmosClient cosmosClient, string databaseId, @@ -90,7 +97,7 @@ public static ChatClientAgentOptions WithCosmosDBMessageStore( throw new ArgumentNullException(nameof(options)); } - options.ChatMessageStoreFactory = (context, ct) => new ValueTask(new CosmosChatMessageStore(cosmosClient, databaseId, containerId)); + options.ChatHistoryProviderFactory = (context, ct) => new ValueTask(new CosmosChatHistoryProvider(cosmosClient, databaseId, containerId)); return options; } } diff --git a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBWorkflowExtensions.cs b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBWorkflowExtensions.cs index 9d8bc52e68..4005808dbe 100644 --- a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBWorkflowExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/CosmosDBWorkflowExtensions.cs @@ -2,7 +2,7 @@ using System; using System.Diagnostics.CodeAnalysis; -using Azure.Identity; +using Azure.Core; using Microsoft.Agents.AI.Workflows.Checkpointing; using Microsoft.Azure.Cosmos; @@ -52,14 +52,17 @@ public static CosmosCheckpointStore CreateCheckpointStore( /// The Cosmos DB account endpoint URI. /// The identifier of the Cosmos DB database. /// The identifier of the Cosmos DB container. + /// The TokenCredential to use for authentication (e.g., DefaultAzureCredential, ManagedIdentityCredential). /// A new instance of . /// Thrown when any string parameter is null or whitespace. + /// Thrown when is null. [RequiresUnreferencedCode("The CosmosCheckpointStore uses JSON serialization which is incompatible with trimming.")] [RequiresDynamicCode("The CosmosCheckpointStore uses JSON serialization which is incompatible with NativeAOT.")] public static CosmosCheckpointStore CreateCheckpointStoreUsingManagedIdentity( string accountEndpoint, string databaseId, - string containerId) + string containerId, + TokenCredential tokenCredential) { if (string.IsNullOrWhiteSpace(accountEndpoint)) { @@ -76,7 +79,12 @@ public static CosmosCheckpointStore CreateCheckpointStoreUsingManagedIdentity( throw new ArgumentException("Cannot be null or whitespace", nameof(containerId)); } - return new CosmosCheckpointStore(accountEndpoint, new DefaultAzureCredential(), databaseId, containerId); + if (tokenCredential is null) + { + throw new ArgumentNullException(nameof(tokenCredential)); + } + + return new CosmosCheckpointStore(accountEndpoint, tokenCredential, databaseId, containerId); } /// @@ -154,14 +162,17 @@ public static CosmosCheckpointStore CreateCheckpointStore( /// The Cosmos DB account endpoint URI. /// The identifier of the Cosmos DB database. /// The identifier of the Cosmos DB container. + /// The TokenCredential to use for authentication (e.g., DefaultAzureCredential, ManagedIdentityCredential). /// A new instance of . /// Thrown when any string parameter is null or whitespace. + /// Thrown when is null. [RequiresUnreferencedCode("The CosmosCheckpointStore uses JSON serialization which is incompatible with trimming.")] [RequiresDynamicCode("The CosmosCheckpointStore uses JSON serialization which is incompatible with NativeAOT.")] public static CosmosCheckpointStore CreateCheckpointStoreUsingManagedIdentity( string accountEndpoint, string databaseId, - string containerId) + string containerId, + TokenCredential tokenCredential) { if (string.IsNullOrWhiteSpace(accountEndpoint)) { @@ -178,7 +189,12 @@ public static CosmosCheckpointStore CreateCheckpointStoreUsingManagedIdentity throw new ArgumentException("Cannot be null or whitespace", nameof(containerId)); } - return new CosmosCheckpointStore(accountEndpoint, new DefaultAzureCredential(), databaseId, containerId); + if (tokenCredential is null) + { + throw new ArgumentNullException(nameof(tokenCredential)); + } + + return new CosmosCheckpointStore(accountEndpoint, tokenCredential, databaseId, containerId); } /// diff --git a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/Microsoft.Agents.AI.CosmosNoSql.csproj b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/Microsoft.Agents.AI.CosmosNoSql.csproj index 7e13ec5998..f6f80b3dea 100644 --- a/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/Microsoft.Agents.AI.CosmosNoSql.csproj +++ b/dotnet/src/Microsoft.Agents.AI.CosmosNoSql/Microsoft.Agents.AI.CosmosNoSql.csproj @@ -21,7 +21,7 @@ Microsoft Agent Framework Cosmos DB NoSQL Integration - Provides Cosmos DB NoSQL implementations for Microsoft Agent Framework storage abstractions including ChatMessageStore and CheckpointStore. + Provides Cosmos DB NoSQL implementations for Microsoft Agent Framework storage abstractions including ChatHistoryProvider and CheckpointStore. diff --git a/dotnet/src/Microsoft.Agents.AI.DurableTask/AgentSessionId.cs b/dotnet/src/Microsoft.Agents.AI.DurableTask/AgentSessionId.cs index f183ec84dc..6d603e1491 100644 --- a/dotnet/src/Microsoft.Agents.AI.DurableTask/AgentSessionId.cs +++ b/dotnet/src/Microsoft.Agents.AI.DurableTask/AgentSessionId.cs @@ -26,13 +26,6 @@ public AgentSessionId(string name, string key) this._entityId = new EntityInstanceId(ToEntityName(name), key); } - /// - /// Converts an agent name to its underlying entity name representation. - /// - /// The agent name. - /// The entity name used by Durable Task for this agent. - public static string ToEntityName(string name) => $"{EntityNamePrefix}{name}"; - /// /// Gets the name of the agent that owns the session. Names are case-insensitive. /// @@ -43,6 +36,17 @@ public AgentSessionId(string name, string key) /// public string Key => this._entityId.Key; + /// + /// Converts an agent name to its underlying entity name representation. + /// + /// The agent name. + /// The entity name used by Durable Task for this agent. + internal static string ToEntityName(string name) => $"{EntityNamePrefix}{name}"; + + /// + /// Converts the to an . + /// + /// The representation of the . internal EntityInstanceId ToEntityId() => this._entityId; /// diff --git a/dotnet/src/Microsoft.Agents.AI.OpenAI/Extensions/OpenAIAssistantClientExtensions.cs b/dotnet/src/Microsoft.Agents.AI.OpenAI/Extensions/OpenAIAssistantClientExtensions.cs index 291ff56091..d167d1f0b4 100644 --- a/dotnet/src/Microsoft.Agents.AI.OpenAI/Extensions/OpenAIAssistantClientExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.OpenAI/Extensions/OpenAIAssistantClientExtensions.cs @@ -93,39 +93,6 @@ public static ChatClientAgent AsAIAgent( }, services: services); } - /// - /// Retrieves an existing server side agent, wrapped as a using the provided . - /// - /// The to create the with. - /// The ID of the server side agent to create a for. - /// Options that should apply to all runs of the agent. - /// Provides a way to customize the creation of the underlying used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// The to monitor for cancellation requests. The default is . - /// A instance that can be used to perform operations on the assistant agent. - [Obsolete("The Assistants API has been deprecated. Please use the Responses API instead.")] - public static ChatClientAgent GetAIAgent( - this AssistantClient assistantClient, - string agentId, - ChatOptions? chatOptions = null, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - if (assistantClient is null) - { - throw new ArgumentNullException(nameof(assistantClient)); - } - - if (string.IsNullOrWhiteSpace(agentId)) - { - throw new ArgumentException($"{nameof(agentId)} should not be null or whitespace.", nameof(agentId)); - } - - var assistant = assistantClient.GetAssistant(agentId, cancellationToken); - return assistantClient.AsAIAgent(assistant, chatOptions, clientFactory, services); - } - /// /// Retrieves an existing server side agent, wrapped as a using the provided . /// @@ -238,53 +205,13 @@ public static ChatClientAgent AsAIAgent( Description = options.Description ?? assistantMetadata.Description, ChatOptions = options.ChatOptions, AIContextProviderFactory = options.AIContextProviderFactory, - ChatMessageStoreFactory = options.ChatMessageStoreFactory, + ChatHistoryProviderFactory = options.ChatHistoryProviderFactory, UseProvidedChatClientAsIs = options.UseProvidedChatClientAsIs }; return new ChatClientAgent(chatClient, mergedOptions, services: services); } - /// - /// Retrieves an existing server side agent, wrapped as a using the provided . - /// - /// The to create the with. - /// The ID of the server side agent to create a for. - /// Full set of options to configure the agent. - /// Provides a way to customize the creation of the underlying used by the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// The to monitor for cancellation requests. The default is . - /// A instance that can be used to perform operations on the assistant agent. - /// or is . - /// is empty or whitespace. - [Obsolete("The Assistants API has been deprecated. Please use the Responses API instead.")] - public static ChatClientAgent GetAIAgent( - this AssistantClient assistantClient, - string agentId, - ChatClientAgentOptions options, - Func? clientFactory = null, - IServiceProvider? services = null, - CancellationToken cancellationToken = default) - { - if (assistantClient is null) - { - throw new ArgumentNullException(nameof(assistantClient)); - } - - if (string.IsNullOrWhiteSpace(agentId)) - { - throw new ArgumentException($"{nameof(agentId)} should not be null or whitespace.", nameof(agentId)); - } - - if (options is null) - { - throw new ArgumentNullException(nameof(options)); - } - - var assistant = assistantClient.GetAssistant(agentId, cancellationToken); - return assistantClient.AsAIAgent(assistant, options, clientFactory, services); - } - /// /// Retrieves an existing server side agent, wrapped as a using the provided . /// @@ -325,111 +252,6 @@ public static async Task GetAIAgentAsync( return assistantClient.AsAIAgent(assistantResponse, options, clientFactory, services); } - /// - /// Creates an AI agent from an using the OpenAI Assistant API. - /// - /// The OpenAI to use for the agent. - /// The model identifier to use (e.g., "gpt-4"). - /// Optional system instructions that define the agent's behavior and personality. - /// Optional name for the agent for identification purposes. - /// Optional description of the agent's capabilities and purpose. - /// Optional collection of AI tools that the agent can use during conversations. - /// Provides a way to customize the creation of the underlying used by the agent. - /// Optional logger factory for enabling logging within the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// An instance backed by the OpenAI Assistant service. - /// Thrown when or is . - /// Thrown when is empty or whitespace. - [Obsolete("The Assistants API has been deprecated. Please use the Responses API instead.")] - public static ChatClientAgent CreateAIAgent( - this AssistantClient client, - string model, - string? instructions = null, - string? name = null, - string? description = null, - IList? tools = null, - Func? clientFactory = null, - ILoggerFactory? loggerFactory = null, - IServiceProvider? services = null) => - client.CreateAIAgent( - model, - new ChatClientAgentOptions() - { - Name = name, - Description = description, - ChatOptions = tools is null && string.IsNullOrWhiteSpace(instructions) ? null : new ChatOptions() - { - Tools = tools, - Instructions = instructions - } - }, - clientFactory, - loggerFactory, - services); - - /// - /// Creates an AI agent from an using the OpenAI Assistant API. - /// - /// The OpenAI to use for the agent. - /// The model identifier to use (e.g., "gpt-4"). - /// Full set of options to configure the agent. - /// Provides a way to customize the creation of the underlying used by the agent. - /// Optional logger factory for enabling logging within the agent. - /// An optional to use for resolving services required by the instances being invoked. - /// An instance backed by the OpenAI Assistant service. - /// Thrown when or or is . - /// Thrown when is empty or whitespace. - [Obsolete("The Assistants API has been deprecated. Please use the Responses API instead.")] - public static ChatClientAgent CreateAIAgent( - this AssistantClient client, - string model, - ChatClientAgentOptions options, - Func? clientFactory = null, - ILoggerFactory? loggerFactory = null, - IServiceProvider? services = null) - { - Throw.IfNull(client); - Throw.IfNullOrEmpty(model); - Throw.IfNull(options); - - var assistantOptions = new AssistantCreationOptions() - { - Name = options.Name, - Description = options.Description, - Instructions = options.ChatOptions?.Instructions, - }; - - // Convert AITools to ToolDefinitions and ToolResources - var toolDefinitionsAndResources = ConvertAIToolsToToolDefinitions(options.ChatOptions?.Tools); - if (toolDefinitionsAndResources.ToolDefinitions is { Count: > 0 }) - { - toolDefinitionsAndResources.ToolDefinitions.ForEach(x => assistantOptions.Tools.Add(x)); - } - - if (toolDefinitionsAndResources.ToolResources is not null) - { - assistantOptions.ToolResources = toolDefinitionsAndResources.ToolResources; - } - - // Create the assistant in the assistant service. - var assistantCreateResult = client.CreateAssistant(model, assistantOptions); - var assistantId = assistantCreateResult.Value.Id; - - // Build the local agent object. - var chatClient = client.AsIChatClient(assistantId); - if (clientFactory is not null) - { - chatClient = clientFactory(chatClient); - } - - var agentOptions = options.Clone(); - agentOptions.Id = assistantId; - options.ChatOptions ??= new ChatOptions(); - options.ChatOptions!.Tools = toolDefinitionsAndResources.FunctionToolsAndOtherTools; - - return new ChatClientAgent(chatClient, agentOptions, loggerFactory, services); - } - /// /// Creates an AI agent from an using the OpenAI Assistant API. /// diff --git a/dotnet/src/Microsoft.Agents.AI.OpenAI/Microsoft.Agents.AI.OpenAI.csproj b/dotnet/src/Microsoft.Agents.AI.OpenAI/Microsoft.Agents.AI.OpenAI.csproj index bfcf6e5263..3de68137ba 100644 --- a/dotnet/src/Microsoft.Agents.AI.OpenAI/Microsoft.Agents.AI.OpenAI.csproj +++ b/dotnet/src/Microsoft.Agents.AI.OpenAI/Microsoft.Agents.AI.OpenAI.csproj @@ -17,6 +17,10 @@ + + + + Microsoft Agent Framework OpenAI diff --git a/dotnet/src/Microsoft.Agents.AI.Purview/BackgroundJobRunner.cs b/dotnet/src/Microsoft.Agents.AI.Purview/BackgroundJobRunner.cs index efe376718b..85a4fa54c3 100644 --- a/dotnet/src/Microsoft.Agents.AI.Purview/BackgroundJobRunner.cs +++ b/dotnet/src/Microsoft.Agents.AI.Purview/BackgroundJobRunner.cs @@ -12,7 +12,7 @@ namespace Microsoft.Agents.AI.Purview; /// /// Service that runs jobs in background threads. /// -internal sealed class BackgroundJobRunner +internal sealed class BackgroundJobRunner : IBackgroundJobRunner { private readonly IChannelHandler _channelHandler; private readonly IPurviewClient _purviewClient; @@ -70,4 +70,12 @@ private async Task RunJobAsync(BackgroundJobBase job) break; } } + + /// + /// Shutdown the job runners. + /// + public async Task ShutdownAsync() + { + await this._channelHandler.StopAndWaitForCompletionAsync().ConfigureAwait(false); + } } diff --git a/dotnet/src/Microsoft.Agents.AI.Purview/IBackgroundJobRunner.cs b/dotnet/src/Microsoft.Agents.AI.Purview/IBackgroundJobRunner.cs new file mode 100644 index 0000000000..e9c3d0d54e --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Purview/IBackgroundJobRunner.cs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Threading.Tasks; + +namespace Microsoft.Agents.AI.Purview; + +/// +/// An interface for a class that manages background jobs. +/// +internal interface IBackgroundJobRunner +{ + /// + /// Shutdown the background jobs. + /// + Task ShutdownAsync(); +} diff --git a/dotnet/src/Microsoft.Agents.AI.Purview/PurviewExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Purview/PurviewExtensions.cs index 4095345d99..2458db94a3 100644 --- a/dotnet/src/Microsoft.Agents.AI.Purview/PurviewExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Purview/PurviewExtensions.cs @@ -41,7 +41,7 @@ private static PurviewWrapper CreateWrapper(TokenCredential tokenCredential, Pur services.AddSingleton(); services.AddSingleton(Channel.CreateBounded(purviewSettings.PendingBackgroundJobLimit)); services.AddSingleton(); - services.AddSingleton(); + services.AddSingleton(); ServiceProvider serviceProvider = services.BuildServiceProvider(); return serviceProvider.GetRequiredService(); diff --git a/dotnet/src/Microsoft.Agents.AI.Purview/PurviewWrapper.cs b/dotnet/src/Microsoft.Agents.AI.Purview/PurviewWrapper.cs index 835b0146a8..14cddbe5c4 100644 --- a/dotnet/src/Microsoft.Agents.AI.Purview/PurviewWrapper.cs +++ b/dotnet/src/Microsoft.Agents.AI.Purview/PurviewWrapper.cs @@ -18,7 +18,7 @@ internal sealed class PurviewWrapper : IDisposable private readonly ILogger _logger; private readonly IScopedContentProcessor _scopedProcessor; private readonly PurviewSettings _purviewSettings; - private readonly IChannelHandler _channelHandler; + private readonly IBackgroundJobRunner _backgroundJobRunner; /// /// Creates a new instance. @@ -26,13 +26,13 @@ internal sealed class PurviewWrapper : IDisposable /// The scoped processor used to orchestrate the calls to Purview. /// The settings for Purview integration. /// The logger used for logging. - /// The channel handler used to queue background jobs and add job runners. - public PurviewWrapper(IScopedContentProcessor scopedProcessor, PurviewSettings purviewSettings, ILogger logger, IChannelHandler channelHandler) + /// The runner used to manage background jobs. + public PurviewWrapper(IScopedContentProcessor scopedProcessor, PurviewSettings purviewSettings, ILogger logger, IBackgroundJobRunner backgroundJobRunner) { this._scopedProcessor = scopedProcessor; this._purviewSettings = purviewSettings; this._logger = logger; - this._channelHandler = channelHandler; + this._backgroundJobRunner = backgroundJobRunner; } private static string GetThreadIdFromAgentThread(AgentThread? thread, IEnumerable messages) @@ -203,7 +203,7 @@ public async Task ProcessAgentContentAsync(IEnumerable +/// Provides semantic analysis of executor route candidates. +/// +/// +/// Analysis is split into two phases for efficiency with incremental generators: +/// +/// - Called per method, extracts data and performs method-level validation only. +/// - Groups methods by class and performs class-level validation once. +/// +/// This avoids redundant class validation when multiple handlers exist in the same class. +/// +internal static class SemanticAnalyzer +{ + // Fully-qualified type names used for symbol comparison + private const string ExecutorTypeName = "Microsoft.Agents.AI.Workflows.Executor"; + private const string WorkflowContextTypeName = "Microsoft.Agents.AI.Workflows.IWorkflowContext"; + private const string CancellationTokenTypeName = "System.Threading.CancellationToken"; + private const string ValueTaskTypeName = "System.Threading.Tasks.ValueTask"; + private const string MessageHandlerAttributeName = "Microsoft.Agents.AI.Workflows.MessageHandlerAttribute"; + private const string SendsMessageAttributeName = "Microsoft.Agents.AI.Workflows.SendsMessageAttribute"; + private const string YieldsOutputAttributeName = "Microsoft.Agents.AI.Workflows.YieldsOutputAttribute"; + + /// + /// Analyzes a method with [MessageHandler] attribute found by ForAttributeWithMetadataName. + /// Returns a MethodAnalysisResult containing both method info and class context. + /// + /// + /// This method only extracts raw data and performs method-level validation. + /// Class-level validation is deferred to to avoid + /// redundant validation when a class has multiple handler methods. + /// + public static MethodAnalysisResult AnalyzeHandlerMethod( + GeneratorAttributeSyntaxContext context, + CancellationToken cancellationToken) + { + // The target should be a method + if (context.TargetSymbol is not IMethodSymbol methodSymbol) + { + return MethodAnalysisResult.Empty; + } + + // Get the containing class + INamedTypeSymbol? classSymbol = methodSymbol.ContainingType; + if (classSymbol is null) + { + return MethodAnalysisResult.Empty; + } + + // Get the method syntax for location info + MethodDeclarationSyntax? methodSyntax = context.TargetNode as MethodDeclarationSyntax; + + // Extract class-level info (raw facts, no validation here) + string classKey = GetClassKey(classSymbol); + bool isPartialClass = IsPartialClass(classSymbol, cancellationToken); + bool derivesFromExecutor = DerivesFromExecutor(classSymbol); + bool hasManualConfigureRoutes = HasConfigureRoutesDefined(classSymbol); + + // Extract class metadata + string? @namespace = classSymbol.ContainingNamespace?.IsGlobalNamespace == true + ? null + : classSymbol.ContainingNamespace?.ToDisplayString(); + string className = classSymbol.Name; + string? genericParameters = GetGenericParameters(classSymbol); + bool isNested = classSymbol.ContainingType != null; + string containingTypeChain = GetContainingTypeChain(classSymbol); + bool baseHasConfigureRoutes = BaseHasConfigureRoutes(classSymbol); + ImmutableEquatableArray classSendTypes = GetClassLevelTypes(classSymbol, SendsMessageAttributeName); + ImmutableEquatableArray classYieldTypes = GetClassLevelTypes(classSymbol, YieldsOutputAttributeName); + + // Get class location for class-level diagnostics + DiagnosticLocationInfo? classLocation = GetClassLocation(classSymbol, cancellationToken); + + // Analyze the handler method (method-level validation only) + // Skip method analysis if class doesn't derive from Executor (class-level diagnostic will be reported later) + var methodDiagnostics = ImmutableArray.CreateBuilder(); + HandlerInfo? handler = null; + if (derivesFromExecutor) + { + handler = AnalyzeHandler(methodSymbol, methodSyntax, methodDiagnostics); + } + + return new MethodAnalysisResult( + classKey, @namespace, className, genericParameters, isNested, containingTypeChain, + baseHasConfigureRoutes, classSendTypes, classYieldTypes, + isPartialClass, derivesFromExecutor, hasManualConfigureRoutes, + classLocation, + handler, + Diagnostics: new ImmutableEquatableArray(methodDiagnostics.ToImmutable())); + } + + /// + /// Combines multiple MethodAnalysisResults for the same class into an AnalysisResult. + /// Performs class-level validation once (instead of per-method) for efficiency. + /// + public static AnalysisResult CombineHandlerMethodResults(IEnumerable methodResults) + { + List methods = methodResults.ToList(); + if (methods.Count == 0) + { + return AnalysisResult.Empty; + } + + // All methods should have same class info - take from first + MethodAnalysisResult first = methods[0]; + Location classLocation = first.ClassLocation?.ToRoslynLocation() ?? Location.None; + + // Collect method-level diagnostics + var allDiagnostics = ImmutableArray.CreateBuilder(); + foreach (var method in methods) + { + foreach (var diag in method.Diagnostics) + { + allDiagnostics.Add(diag.ToRoslynDiagnostic(null)); + } + } + + // Class-level validation (done once, not per-method) + if (!first.DerivesFromExecutor) + { + allDiagnostics.Add(Diagnostic.Create( + DiagnosticDescriptors.NotAnExecutor, + classLocation, + first.ClassName, + first.ClassName)); + return AnalysisResult.WithDiagnostics(allDiagnostics.ToImmutable()); + } + + if (!first.IsPartialClass) + { + allDiagnostics.Add(Diagnostic.Create( + DiagnosticDescriptors.ClassMustBePartial, + classLocation, + first.ClassName)); + return AnalysisResult.WithDiagnostics(allDiagnostics.ToImmutable()); + } + + if (first.HasManualConfigureRoutes) + { + allDiagnostics.Add(Diagnostic.Create( + DiagnosticDescriptors.ConfigureRoutesAlreadyDefined, + classLocation, + first.ClassName)); + return AnalysisResult.WithDiagnostics(allDiagnostics.ToImmutable()); + } + + // Collect valid handlers + ImmutableArray handlers = methods + .Where(m => m.Handler is not null) + .Select(m => m.Handler!) + .ToImmutableArray(); + + if (handlers.Length == 0) + { + return AnalysisResult.WithDiagnostics(allDiagnostics.ToImmutable()); + } + + ExecutorInfo executorInfo = new( + first.Namespace, + first.ClassName, + first.GenericParameters, + first.IsNested, + first.ContainingTypeChain, + first.BaseHasConfigureRoutes, + new ImmutableEquatableArray(handlers), + first.ClassSendTypes, + first.ClassYieldTypes); + + if (allDiagnostics.Count > 0) + { + return AnalysisResult.WithInfoAndDiagnostics(executorInfo, allDiagnostics.ToImmutable()); + } + + return AnalysisResult.Success(executorInfo); + } + + /// + /// Analyzes a class with [SendsMessage] or [YieldsOutput] attribute found by ForAttributeWithMetadataName. + /// Returns ClassProtocolInfo entries for each attribute instance (handles multiple attributes of same type). + /// + /// The generator attribute syntax context. + /// Whether this is a Send or Yield attribute. + /// Cancellation token. + /// The analysis results for the class protocol attributes. + public static ImmutableArray AnalyzeClassProtocolAttribute( + GeneratorAttributeSyntaxContext context, + ProtocolAttributeKind attributeKind, + CancellationToken cancellationToken) + { + // The target should be a class + if (context.TargetSymbol is not INamedTypeSymbol classSymbol) + { + return ImmutableArray.Empty; + } + + // Extract class-level info (same for all attributes) + string classKey = GetClassKey(classSymbol); + bool isPartialClass = IsPartialClass(classSymbol, cancellationToken); + bool derivesFromExecutor = DerivesFromExecutor(classSymbol); + bool hasManualConfigureRoutes = HasConfigureRoutesDefined(classSymbol); + + string? @namespace = classSymbol.ContainingNamespace?.IsGlobalNamespace == true + ? null + : classSymbol.ContainingNamespace?.ToDisplayString(); + string className = classSymbol.Name; + string? genericParameters = GetGenericParameters(classSymbol); + bool isNested = classSymbol.ContainingType != null; + string containingTypeChain = GetContainingTypeChain(classSymbol); + DiagnosticLocationInfo? classLocation = GetClassLocation(classSymbol, cancellationToken); + + // Extract a ClassProtocolInfo for each attribute instance + ImmutableArray.Builder results = ImmutableArray.CreateBuilder(); + + foreach (AttributeData attr in context.Attributes) + { + if (attr.ConstructorArguments.Length > 0 && + attr.ConstructorArguments[0].Value is INamedTypeSymbol typeSymbol) + { + string typeName = typeSymbol.ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat); + results.Add(new ClassProtocolInfo( + classKey, + @namespace, + className, + genericParameters, + isNested, + containingTypeChain, + isPartialClass, + derivesFromExecutor, + hasManualConfigureRoutes, + classLocation, + typeName, + attributeKind)); + } + } + + return results.ToImmutable(); + } + + /// + /// Combines ClassProtocolInfo results into an AnalysisResult for classes that only have protocol attributes + /// (no [MessageHandler] methods). This generates only ConfigureSentTypes/ConfigureYieldTypes overrides. + /// + /// The protocol info entries for the class. + /// The combined analysis result. + public static AnalysisResult CombineProtocolOnlyResults(IEnumerable protocolInfos) + { + List protocols = protocolInfos.ToList(); + if (protocols.Count == 0) + { + return AnalysisResult.Empty; + } + + // All entries should have same class info - take from first + ClassProtocolInfo first = protocols[0]; + Location classLocation = first.ClassLocation?.ToRoslynLocation() ?? Location.None; + + ImmutableArray.Builder allDiagnostics = ImmutableArray.CreateBuilder(); + + // Class-level validation + if (!first.DerivesFromExecutor) + { + allDiagnostics.Add(Diagnostic.Create( + DiagnosticDescriptors.NotAnExecutor, + classLocation, + first.ClassName, + first.ClassName)); + return AnalysisResult.WithDiagnostics(allDiagnostics.ToImmutable()); + } + + if (!first.IsPartialClass) + { + allDiagnostics.Add(Diagnostic.Create( + DiagnosticDescriptors.ClassMustBePartial, + classLocation, + first.ClassName)); + return AnalysisResult.WithDiagnostics(allDiagnostics.ToImmutable()); + } + + // Collect send and yield types + ImmutableArray.Builder sendTypes = ImmutableArray.CreateBuilder(); + ImmutableArray.Builder yieldTypes = ImmutableArray.CreateBuilder(); + + foreach (ClassProtocolInfo protocol in protocols) + { + if (protocol.AttributeKind == ProtocolAttributeKind.Send) + { + sendTypes.Add(protocol.TypeName); + } + else + { + yieldTypes.Add(protocol.TypeName); + } + } + + // Sort to ensure consistent ordering for incremental generator caching + sendTypes.Sort(StringComparer.Ordinal); + yieldTypes.Sort(StringComparer.Ordinal); + + // Create ExecutorInfo with no handlers but with protocol types + ExecutorInfo executorInfo = new( + first.Namespace, + first.ClassName, + first.GenericParameters, + first.IsNested, + first.ContainingTypeChain, + BaseHasConfigureRoutes: false, // Not relevant for protocol-only + Handlers: ImmutableEquatableArray.Empty, + ClassSendTypes: new ImmutableEquatableArray(sendTypes.ToImmutable()), + ClassYieldTypes: new ImmutableEquatableArray(yieldTypes.ToImmutable())); + + if (allDiagnostics.Count > 0) + { + return AnalysisResult.WithInfoAndDiagnostics(executorInfo, allDiagnostics.ToImmutable()); + } + + return AnalysisResult.Success(executorInfo); + } + + /// + /// Gets the source location of the class identifier for diagnostic reporting. + /// + private static DiagnosticLocationInfo? GetClassLocation(INamedTypeSymbol classSymbol, CancellationToken cancellationToken) + { + foreach (SyntaxReference syntaxRef in classSymbol.DeclaringSyntaxReferences) + { + SyntaxNode syntax = syntaxRef.GetSyntax(cancellationToken); + if (syntax is ClassDeclarationSyntax classDecl) + { + return DiagnosticLocationInfo.FromLocation(classDecl.Identifier.GetLocation()); + } + } + + return null; + } + + /// + /// Returns a unique identifier for the class used to group methods by their containing type. + /// + private static string GetClassKey(INamedTypeSymbol classSymbol) + { + return classSymbol.ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat); + } + + /// + /// Checks if any declaration of the class has the 'partial' modifier. + /// + private static bool IsPartialClass(INamedTypeSymbol classSymbol, CancellationToken cancellationToken) + { + foreach (SyntaxReference syntaxRef in classSymbol.DeclaringSyntaxReferences) + { + SyntaxNode syntax = syntaxRef.GetSyntax(cancellationToken); + if (syntax is ClassDeclarationSyntax classDecl && + classDecl.Modifiers.Any(SyntaxKind.PartialKeyword)) + { + return true; + } + } + + return false; + } + + /// + /// Walks the inheritance chain to check if the class derives from Executor or Executor<T>. + /// + private static bool DerivesFromExecutor(INamedTypeSymbol classSymbol) + { + INamedTypeSymbol? current = classSymbol.BaseType; + while (current != null) + { + string fullName = current.OriginalDefinition.ToDisplayString(); + if (fullName == ExecutorTypeName || fullName.StartsWith(ExecutorTypeName + "<", StringComparison.Ordinal)) + { + return true; + } + + current = current.BaseType; + } + + return false; + } + + /// + /// Checks if this class directly defines ConfigureRoutes (not inherited). + /// If so, we skip generation to avoid conflicting with user's manual implementation. + /// + private static bool HasConfigureRoutesDefined(INamedTypeSymbol classSymbol) + { + foreach (var member in classSymbol.GetMembers("ConfigureRoutes")) + { + if (member is IMethodSymbol method && !method.IsAbstract && + SymbolEqualityComparer.Default.Equals(method.ContainingType, classSymbol)) + { + return true; + } + } + + return false; + } + + /// + /// Checks if any base class (between this class and Executor) defines ConfigureRoutes. + /// If so, generated code should call base.ConfigureRoutes() to preserve inherited handlers. + /// + private static bool BaseHasConfigureRoutes(INamedTypeSymbol classSymbol) + { + INamedTypeSymbol? baseType = classSymbol.BaseType; + while (baseType != null) + { + string fullName = baseType.OriginalDefinition.ToDisplayString(); + // Stop at Executor - its ConfigureRoutes is abstract/empty + if (fullName == ExecutorTypeName) + { + return false; + } + + foreach (var member in baseType.GetMembers("ConfigureRoutes")) + { + if (member is IMethodSymbol method && !method.IsAbstract) + { + return true; + } + } + + baseType = baseType.BaseType; + } + + return false; + } + + /// + /// Validates a handler method's signature and extracts metadata. + /// + /// + /// Valid signatures: + /// + /// void Handle(TMessage, IWorkflowContext, [CancellationToken]) + /// ValueTask HandleAsync(TMessage, IWorkflowContext, [CancellationToken]) + /// ValueTask<TResult> HandleAsync(TMessage, IWorkflowContext, [CancellationToken]) + /// TResult Handle(TMessage, IWorkflowContext, [CancellationToken]) (sync with result) + /// + /// + private static HandlerInfo? AnalyzeHandler( + IMethodSymbol methodSymbol, + MethodDeclarationSyntax? methodSyntax, + ImmutableArray.Builder diagnostics) + { + Location location = methodSyntax?.Identifier.GetLocation() ?? Location.None; + + // Check if static + if (methodSymbol.IsStatic) + { + diagnostics.Add(DiagnosticInfo.Create("MAFGENWF007", location, methodSymbol.Name)); + return null; + } + + // Check parameter count + if (methodSymbol.Parameters.Length < 2) + { + diagnostics.Add(DiagnosticInfo.Create("MAFGENWF005", location, methodSymbol.Name)); + return null; + } + + // Check second parameter is IWorkflowContext + IParameterSymbol secondParam = methodSymbol.Parameters[1]; + if (secondParam.Type.ToDisplayString() != WorkflowContextTypeName) + { + diagnostics.Add(DiagnosticInfo.Create("MAFGENWF001", location, methodSymbol.Name)); + return null; + } + + // Check for optional CancellationToken as third parameter + bool hasCancellationToken = methodSymbol.Parameters.Length >= 3 && + methodSymbol.Parameters[2].Type.ToDisplayString() == CancellationTokenTypeName; + + // Analyze return type + ITypeSymbol returnType = methodSymbol.ReturnType; + HandlerSignatureKind? signatureKind = GetSignatureKind(returnType); + if (signatureKind == null) + { + diagnostics.Add(DiagnosticInfo.Create("MAFGENWF002", location, methodSymbol.Name)); + return null; + } + + // Get input type + ITypeSymbol inputType = methodSymbol.Parameters[0].Type; + string inputTypeName = inputType.ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat); + + // Get output type + string? outputTypeName = null; + if (signatureKind == HandlerSignatureKind.ResultSync) + { + outputTypeName = returnType.ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat); + } + else if (signatureKind == HandlerSignatureKind.ResultAsync && returnType is INamedTypeSymbol namedReturn) + { + if (namedReturn.TypeArguments.Length == 1) + { + outputTypeName = namedReturn.TypeArguments[0].ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat); + } + } + + // Get Yield and Send types from attribute + (ImmutableEquatableArray yieldTypes, ImmutableEquatableArray sendTypes) = GetAttributeTypeArrays(methodSymbol); + + return new HandlerInfo( + methodSymbol.Name, + inputTypeName, + outputTypeName, + signatureKind.Value, + hasCancellationToken, + yieldTypes, + sendTypes); + } + + /// + /// Determines the handler signature kind from the return type. + /// + /// The signature kind, or null if the return type is not supported (e.g., Task, Task<T>). + private static HandlerSignatureKind? GetSignatureKind(ITypeSymbol returnType) + { + string returnTypeName = returnType.ToDisplayString(); + + if (returnType.SpecialType == SpecialType.System_Void) + { + return HandlerSignatureKind.VoidSync; + } + + if (returnTypeName == ValueTaskTypeName) + { + return HandlerSignatureKind.VoidAsync; + } + + if (returnType is INamedTypeSymbol namedType && + namedType.OriginalDefinition.ToDisplayString() == "System.Threading.Tasks.ValueTask") + { + return HandlerSignatureKind.ResultAsync; + } + + // Any non-void, non-Task type is treated as a synchronous result + if (returnType.SpecialType != SpecialType.System_Void && + !returnTypeName.StartsWith("System.Threading.Tasks.Task", StringComparison.Ordinal) && + !returnTypeName.StartsWith("System.Threading.Tasks.ValueTask", StringComparison.Ordinal)) + { + return HandlerSignatureKind.ResultSync; + } + + // Task/Task not supported - must use ValueTask + return null; + } + + /// + /// Extracts Yield and Send type arrays from the [MessageHandler] attribute's named arguments. + /// + /// + /// [MessageHandler(Yield = new[] { typeof(OutputA), typeof(OutputB) }, Send = new[] { typeof(Request) })] + /// + private static (ImmutableEquatableArray YieldTypes, ImmutableEquatableArray SendTypes) GetAttributeTypeArrays( + IMethodSymbol methodSymbol) + { + var yieldTypes = ImmutableArray.Empty; + var sendTypes = ImmutableArray.Empty; + + foreach (var attr in methodSymbol.GetAttributes()) + { + if (attr.AttributeClass?.ToDisplayString() != MessageHandlerAttributeName) + { + continue; + } + + foreach (var namedArg in attr.NamedArguments) + { + if (namedArg.Key.Equals("Yield", StringComparison.Ordinal) && !namedArg.Value.IsNull) + { + yieldTypes = ExtractTypeArray(namedArg.Value); + } + else if (namedArg.Key.Equals("Send", StringComparison.Ordinal) && !namedArg.Value.IsNull) + { + sendTypes = ExtractTypeArray(namedArg.Value); + } + } + } + + return (new ImmutableEquatableArray(yieldTypes), new ImmutableEquatableArray(sendTypes)); + } + + /// + /// Converts a TypedConstant array (from attribute argument) to fully-qualified type name strings. + /// + /// + /// Results are sorted to ensure consistent ordering for incremental generator caching. + /// + private static ImmutableArray ExtractTypeArray(TypedConstant typedConstant) + { + if (typedConstant.Kind != TypedConstantKind.Array) + { + return ImmutableArray.Empty; + } + + ImmutableArray.Builder builder = ImmutableArray.CreateBuilder(); + foreach (TypedConstant value in typedConstant.Values) + { + if (value.Value is INamedTypeSymbol typeSymbol) + { + builder.Add(typeSymbol.ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat)); + } + } + + // Sort to ensure consistent ordering for incremental generator caching + builder.Sort(StringComparer.Ordinal); + + return builder.ToImmutable(); + } + + /// + /// Collects types from [SendsMessage] or [YieldsOutput] attributes applied to the class. + /// + /// + /// Results are sorted to ensure consistent ordering for incremental generator caching, + /// since GetAttributes() order is not guaranteed across partial class declarations. + /// + /// + /// [SendsMessage(typeof(Request))] + /// [YieldsOutput(typeof(Response))] + /// public partial class MyExecutor : Executor { } + /// + private static ImmutableEquatableArray GetClassLevelTypes(INamedTypeSymbol classSymbol, string attributeName) + { + ImmutableArray.Builder builder = ImmutableArray.CreateBuilder(); + + foreach (AttributeData attr in classSymbol.GetAttributes()) + { + if (attr.AttributeClass?.ToDisplayString() == attributeName && + attr.ConstructorArguments.Length > 0 && + attr.ConstructorArguments[0].Value is INamedTypeSymbol typeSymbol) + { + builder.Add(typeSymbol.ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat)); + } + } + + // Sort to ensure consistent ordering for incremental generator caching + builder.Sort(StringComparer.Ordinal); + + return new ImmutableEquatableArray(builder.ToImmutable()); + } + + /// + /// Builds the chain of containing types for nested classes, outermost first. + /// + /// + /// For class Outer.Middle.Inner.MyExecutor, returns "Outer.Middle.Inner" + /// + private static string GetContainingTypeChain(INamedTypeSymbol classSymbol) + { + List chain = new(); + INamedTypeSymbol? current = classSymbol.ContainingType; + + while (current != null) + { + chain.Insert(0, current.Name); + current = current.ContainingType; + } + + return string.Join(".", chain); + } + + /// + /// Returns the generic type parameter clause (e.g., "<T, U>") for generic classes, or null for non-generic. + /// + private static string? GetGenericParameters(INamedTypeSymbol classSymbol) + { + if (!classSymbol.IsGenericType) + { + return null; + } + + string parameters = string.Join(", ", classSymbol.TypeParameters.Select(p => p.Name)); + return $"<{parameters}>"; + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Diagnostics/DiagnosticDescriptors.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Diagnostics/DiagnosticDescriptors.cs new file mode 100644 index 0000000000..4afc7a1697 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Diagnostics/DiagnosticDescriptors.cs @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using Microsoft.CodeAnalysis; + +namespace Microsoft.Agents.AI.Workflows.Generators.Diagnostics; + +/// +/// Diagnostic descriptors for the executor route source generator. +/// +internal static class DiagnosticDescriptors +{ + private const string Category = "Microsoft.Agents.AI.Workflows.Generators"; + + private static readonly Dictionary s_descriptorsById = new(); + + /// + /// Gets a diagnostic descriptor by its ID. + /// + public static DiagnosticDescriptor? GetById(string id) + { + return s_descriptorsById.TryGetValue(id, out var descriptor) ? descriptor : null; + } + + private static DiagnosticDescriptor Register(DiagnosticDescriptor descriptor) + { + s_descriptorsById[descriptor.Id] = descriptor; + return descriptor; + } + + /// + /// MAFGENWF001: Handler method must have IWorkflowContext parameter. + /// + public static readonly DiagnosticDescriptor MissingWorkflowContext = Register(new( + id: "MAFGENWF001", + title: "Handler missing IWorkflowContext parameter", + messageFormat: "Method '{0}' marked with [MessageHandler] must have IWorkflowContext as the second parameter", + category: Category, + defaultSeverity: DiagnosticSeverity.Error, + isEnabledByDefault: true)); + + /// + /// MAFGENWF002: Handler method has invalid return type. + /// + public static readonly DiagnosticDescriptor InvalidReturnType = Register(new( + id: "MAFGENWF002", + title: "Handler has invalid return type", + messageFormat: "Method '{0}' marked with [MessageHandler] must return void, ValueTask, or ValueTask", + category: Category, + defaultSeverity: DiagnosticSeverity.Error, + isEnabledByDefault: true)); + + /// + /// MAFGENWF003: Executor with [MessageHandler] must be partial. + /// + public static readonly DiagnosticDescriptor ClassMustBePartial = Register(new( + id: "MAFGENWF003", + title: "Executor with [MessageHandler] must be partial", + messageFormat: "Class '{0}' contains [MessageHandler] methods but is not declared as partial", + category: Category, + defaultSeverity: DiagnosticSeverity.Error, + isEnabledByDefault: true)); + + /// + /// MAFGENWF004: [MessageHandler] on non-Executor class. + /// + public static readonly DiagnosticDescriptor NotAnExecutor = Register(new( + id: "MAFGENWF004", + title: "[MessageHandler] on non-Executor class", + messageFormat: "Method '{0}' is marked with [MessageHandler] but class '{1}' does not derive from Executor", + category: Category, + defaultSeverity: DiagnosticSeverity.Warning, + isEnabledByDefault: true)); + + /// + /// MAFGENWF005: Handler method has insufficient parameters. + /// + public static readonly DiagnosticDescriptor InsufficientParameters = Register(new( + id: "MAFGENWF005", + title: "Handler has insufficient parameters", + messageFormat: "Method '{0}' marked with [MessageHandler] must have at least 2 parameters (message and IWorkflowContext)", + category: Category, + defaultSeverity: DiagnosticSeverity.Error, + isEnabledByDefault: true)); + + /// + /// MAFGENWF006: ConfigureRoutes already defined. + /// + public static readonly DiagnosticDescriptor ConfigureRoutesAlreadyDefined = Register(new( + id: "MAFGENWF006", + title: "ConfigureRoutes already defined", + messageFormat: "Class '{0}' already defines ConfigureRoutes; [MessageHandler] methods will be ignored", + category: Category, + defaultSeverity: DiagnosticSeverity.Info, + isEnabledByDefault: true)); + + /// + /// MAFGENWF007: Handler method is static. + /// + public static readonly DiagnosticDescriptor HandlerCannotBeStatic = Register(new( + id: "MAFGENWF007", + title: "Handler cannot be static", + messageFormat: "Method '{0}' marked with [MessageHandler] cannot be static", + category: Category, + defaultSeverity: DiagnosticSeverity.Error, + isEnabledByDefault: true)); +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Directory.Build.targets b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Directory.Build.targets new file mode 100644 index 0000000000..9808af77f0 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Directory.Build.targets @@ -0,0 +1,18 @@ + + + + <_ParentTargetsPath>$([MSBuild]::GetPathOfFileAbove(Directory.Build.targets, $(MSBuildThisFileDirectory)..)) + + + + + + <_SkipIncompatibleBuild>true + + + true + + + + + diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/ExecutorRouteGenerator.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/ExecutorRouteGenerator.cs new file mode 100644 index 0000000000..181e799ae2 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/ExecutorRouteGenerator.cs @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Linq; +using System.Text; +using Microsoft.Agents.AI.Workflows.Generators.Analysis; +using Microsoft.Agents.AI.Workflows.Generators.Generation; +using Microsoft.Agents.AI.Workflows.Generators.Models; +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.CSharp.Syntax; +using Microsoft.CodeAnalysis.Text; + +namespace Microsoft.Agents.AI.Workflows.Generators; + +/// +/// Roslyn incremental source generator that generates ConfigureRoutes implementations +/// for executor classes with [MessageHandler] attributed methods, and/or ConfigureSentTypes/ConfigureYieldTypes +/// overrides for classes with [SendsMessage]/[YieldsOutput] attributes. +/// +[Generator] +public sealed class ExecutorRouteGenerator : IIncrementalGenerator +{ + private const string MessageHandlerAttributeFullName = "Microsoft.Agents.AI.Workflows.MessageHandlerAttribute"; + private const string SendsMessageAttributeFullName = "Microsoft.Agents.AI.Workflows.SendsMessageAttribute"; + private const string YieldsOutputAttributeFullName = "Microsoft.Agents.AI.Workflows.YieldsOutputAttribute"; + + /// + public void Initialize(IncrementalGeneratorInitializationContext context) + { + // Pipeline 1: Methods with [MessageHandler] attribute + IncrementalValuesProvider methodAnalysisResults = context.SyntaxProvider + .ForAttributeWithMetadataName( + fullyQualifiedMetadataName: MessageHandlerAttributeFullName, + predicate: static (node, _) => node is MethodDeclarationSyntax, + transform: static (ctx, ct) => SemanticAnalyzer.AnalyzeHandlerMethod(ctx, ct)) + .Where(static result => !string.IsNullOrWhiteSpace(result.ClassKey)); + + // Pipeline 2: Classes with [SendsMessage] attribute + IncrementalValuesProvider sendProtocolResults = context.SyntaxProvider + .ForAttributeWithMetadataName( + fullyQualifiedMetadataName: SendsMessageAttributeFullName, + predicate: static (node, _) => node is ClassDeclarationSyntax, + transform: static (ctx, ct) => SemanticAnalyzer.AnalyzeClassProtocolAttribute(ctx, ProtocolAttributeKind.Send, ct)) + .SelectMany(static (results, _) => results); + + // Pipeline 3: Classes with [YieldsOutput] attribute + IncrementalValuesProvider yieldProtocolResults = context.SyntaxProvider + .ForAttributeWithMetadataName( + fullyQualifiedMetadataName: YieldsOutputAttributeFullName, + predicate: static (node, _) => node is ClassDeclarationSyntax, + transform: static (ctx, ct) => SemanticAnalyzer.AnalyzeClassProtocolAttribute(ctx, ProtocolAttributeKind.Yield, ct)) + .SelectMany(static (results, _) => results); + + // Combine all protocol results (Send + Yield) + IncrementalValuesProvider allProtocolResults = sendProtocolResults + .Collect() + .Combine(yieldProtocolResults.Collect()) + .SelectMany(static (tuple, _) => tuple.Left.AddRange(tuple.Right)); + + // Combine all pipelines and produce AnalysisResults grouped by class + IncrementalValuesProvider combinedResults = methodAnalysisResults + .Collect() + .Combine(allProtocolResults.Collect()) + .SelectMany(static (tuple, _) => CombineAllResults(tuple.Left, tuple.Right)); + + // Generate source for valid executors + context.RegisterSourceOutput( + combinedResults.Where(static r => r.ExecutorInfo is not null), + static (ctx, result) => + { + string source = SourceBuilder.Generate(result.ExecutorInfo!); + string hintName = GetHintName(result.ExecutorInfo!); + ctx.AddSource(hintName, SourceText.From(source, Encoding.UTF8)); + }); + + // Report diagnostics + context.RegisterSourceOutput( + combinedResults.Where(static r => !r.Diagnostics.IsEmpty), + static (ctx, result) => + { + foreach (Diagnostic diagnostic in result.Diagnostics) + { + ctx.ReportDiagnostic(diagnostic); + } + }); + } + + /// + /// Combines method analysis results with class protocol results, grouping by class key. + /// Classes with [MessageHandler] methods get full generation; classes with only protocol + /// attributes get protocol-only generation. + /// + private static IEnumerable CombineAllResults( + ImmutableArray methodResults, + ImmutableArray protocolResults) + { + // Group method results by class + Dictionary> methodsByClass = methodResults + .GroupBy(r => r.ClassKey) + .ToDictionary(g => g.Key, g => g.ToList()); + + // Group protocol results by class + Dictionary> protocolsByClass = protocolResults + .GroupBy(r => r.ClassKey) + .ToDictionary(g => g.Key, g => g.ToList()); + + // Track which classes we've processed + HashSet processedClasses = new(); + + // Process classes that have [MessageHandler] methods + foreach (KeyValuePair> kvp in methodsByClass) + { + processedClasses.Add(kvp.Key); + yield return SemanticAnalyzer.CombineHandlerMethodResults(kvp.Value); + } + + // Process classes that only have protocol attributes (no [MessageHandler] methods) + foreach (KeyValuePair> kvp in protocolsByClass) + { + if (!processedClasses.Contains(kvp.Key)) + { + yield return SemanticAnalyzer.CombineProtocolOnlyResults(kvp.Value); + } + } + } + + /// + /// Generates a hint (virtual file) name for the generated source file based on the ExecutorInfo. + /// + private static string GetHintName(ExecutorInfo info) + { + var sb = new StringBuilder(); + + if (!string.IsNullOrWhiteSpace(info.Namespace)) + { + sb.Append(info.Namespace) + .Append('.'); + } + + if (info.IsNested) + { + sb.Append(info.ContainingTypeChain) + .Append('.'); + } + + sb.Append(info.ClassName); + + // Handle generic type parameters in hint name + if (!string.IsNullOrWhiteSpace(info.GenericParameters)) + { + // Replace < > with underscores for valid file name + sb.Append('_') + .Append(info.GenericParameters!.Length - 2); // Number of type params approximation + } + + sb.Append(".g.cs"); + + return sb.ToString(); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Generation/SourceBuilder.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Generation/SourceBuilder.cs new file mode 100644 index 0000000000..0779a56045 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Generation/SourceBuilder.cs @@ -0,0 +1,253 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Text; +using Microsoft.Agents.AI.Workflows.Generators.Models; + +namespace Microsoft.Agents.AI.Workflows.Generators.Generation; + +/// +/// Generates source code for executor route configuration. +/// +/// +/// This builder produces a partial class file that overrides ConfigureRoutes to register +/// handlers discovered via [MessageHandler] attributes. It may also generate ConfigureSentTypes +/// and ConfigureYieldTypes overrides when [SendsMessage] or [YieldsOutput] attributes are present. +/// +internal static class SourceBuilder +{ + /// + /// Generates the complete source file for an executor's generated partial class. + /// + /// The analyzed executor information containing class metadata and handler details. + /// The generated C# source code as a string. + public static string Generate(ExecutorInfo info) + { + var sb = new StringBuilder(); + + // File header + sb.AppendLine("// "); + sb.AppendLine("#nullable enable"); + sb.AppendLine(); + + // Using directives + sb.AppendLine("using System;"); + sb.AppendLine("using System.Collections.Generic;"); + sb.AppendLine("using Microsoft.Agents.AI.Workflows;"); + sb.AppendLine(); + + // Namespace + if (!string.IsNullOrWhiteSpace(info.Namespace)) + { + sb.AppendLine($"namespace {info.Namespace};"); + sb.AppendLine(); + } + + // For nested classes, we must emit partial declarations for each containing type. + // Example: if MyExecutor is nested in Outer.Inner, we emit: + // partial class Outer { partial class Inner { partial class MyExecutor { ... } } } + string indent = ""; + if (info.IsNested) + { + foreach (string containingType in info.ContainingTypeChain.Split('.')) + { + sb.AppendLine($"{indent}partial class {containingType}"); + sb.AppendLine($"{indent}{{"); + indent += " "; + } + } + + // Class declaration + sb.AppendLine($"{indent}partial class {info.ClassName}{info.GenericParameters}"); + sb.AppendLine($"{indent}{{"); + + string memberIndent = indent + " "; + bool hasContent = false; + + // Only generate ConfigureRoutes if there are handlers + if (info.Handlers.Count > 0) + { + GenerateConfigureRoutes(sb, info, memberIndent); + hasContent = true; + } + + // Only generate protocol overrides if [SendsMessage] or [YieldsOutput] attributes are present. + // Without these attributes, we rely on the base class defaults. + if (info.ShouldGenerateProtocolOverrides) + { + if (hasContent) + { + sb.AppendLine(); + } + + GenerateConfigureSentTypes(sb, info, memberIndent); + sb.AppendLine(); + GenerateConfigureYieldTypes(sb, info, memberIndent); + } + + // Close class + sb.AppendLine($"{indent}}}"); + + // Close nested classes + if (info.IsNested) + { + string[] containingTypes = info.ContainingTypeChain.Split('.'); + for (int i = containingTypes.Length - 1; i >= 0; i--) + { + indent = new string(' ', i * 4); + sb.AppendLine($"{indent}}}"); + } + } + + return sb.ToString(); + } + + /// + /// Generates the ConfigureRoutes override that registers all [MessageHandler] methods. + /// + private static void GenerateConfigureRoutes(StringBuilder sb, ExecutorInfo info, string indent) + { + sb.AppendLine($"{indent}protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder)"); + sb.AppendLine($"{indent}{{"); + + string bodyIndent = indent + " "; + + // If a base class has its own ConfigureRoutes, chain to it first to preserve inherited handlers. + if (info.BaseHasConfigureRoutes) + { + sb.AppendLine($"{bodyIndent}routeBuilder = base.ConfigureRoutes(routeBuilder);"); + sb.AppendLine(); + } + + // Generate handler registrations using fluent AddHandler calls. + // RouteBuilder.AddHandler registers a void handler; AddHandler registers one with a return value. + if (info.Handlers.Count == 1) + { + HandlerInfo handler = info.Handlers[0]; + sb.AppendLine($"{bodyIndent}return routeBuilder"); + sb.Append($"{bodyIndent} .AddHandler"); + AppendHandlerGenericArgs(sb, handler); + sb.AppendLine($"(this.{handler.MethodName});"); + } + else + { + // Multiple handlers: chain fluent calls, semicolon only on the last one. + sb.AppendLine($"{bodyIndent}return routeBuilder"); + + for (int i = 0; i < info.Handlers.Count; i++) + { + HandlerInfo handler = info.Handlers[i]; + + sb.Append($"{bodyIndent} .AddHandler"); + AppendHandlerGenericArgs(sb, handler); + sb.Append($"(this.{handler.MethodName})"); + sb.AppendLine(); + } + + // Remove last newline without using that System.Environment which is banned from use in analyzers + var newLineLength = new StringBuilder().AppendLine().Length; + sb.Remove(sb.Length - newLineLength, newLineLength); + sb.AppendLine(";"); + } + + sb.AppendLine($"{indent}}}"); + } + + /// + /// Appends generic type arguments for AddHandler based on whether the handler returns a value. + /// + private static void AppendHandlerGenericArgs(StringBuilder sb, HandlerInfo handler) + { + // Handlers returning ValueTask use single type arg; ValueTask uses two. + if (handler.HasOutput && handler.OutputTypeName != null) + { + sb.Append($"<{handler.InputTypeName}, {handler.OutputTypeName}>"); + } + else + { + sb.Append($"<{handler.InputTypeName}>"); + } + } + + /// + /// Generates ConfigureSentTypes override declaring message types this executor sends via context.SendMessageAsync. + /// + /// + /// Types come from [SendsMessage] attributes on the class or individual handler methods. + /// This enables workflow protocol validation at build time. + /// + private static void GenerateConfigureSentTypes(StringBuilder sb, ExecutorInfo info, string indent) + { + sb.AppendLine($"{indent}protected override ISet ConfigureSentTypes()"); + sb.AppendLine($"{indent}{{"); + + string bodyIndent = indent + " "; + + sb.AppendLine($"{bodyIndent}var types = base.ConfigureSentTypes();"); + + foreach (var type in info.ClassSendTypes) + { + sb.AppendLine($"{bodyIndent}types.Add(typeof({type}));"); + } + + foreach (var handler in info.Handlers) + { + foreach (var type in handler.SendTypes) + { + sb.AppendLine($"{bodyIndent}types.Add(typeof({type}));"); + } + } + + sb.AppendLine($"{bodyIndent}return types;"); + sb.AppendLine($"{indent}}}"); + } + + /// + /// Generates ConfigureYieldTypes override declaring message types this executor yields via context.YieldOutputAsync. + /// + /// + /// Types come from [YieldsOutput] attributes and handler return types (ValueTask<T>). + /// This enables workflow protocol validation at build time. + /// + private static void GenerateConfigureYieldTypes(StringBuilder sb, ExecutorInfo info, string indent) + { + sb.AppendLine($"{indent}protected override ISet ConfigureYieldTypes()"); + sb.AppendLine($"{indent}{{"); + + string bodyIndent = indent + " "; + + sb.AppendLine($"{bodyIndent}var types = base.ConfigureYieldTypes();"); + + // Track types to avoid emitting duplicate Add calls (the set handles runtime dedup, + // but cleaner generated code is easier to read). + var addedTypes = new HashSet(); + + foreach (var type in info.ClassYieldTypes) + { + if (addedTypes.Add(type)) + { + sb.AppendLine($"{bodyIndent}types.Add(typeof({type}));"); + } + } + + foreach (var handler in info.Handlers) + { + foreach (var type in handler.YieldTypes) + { + if (addedTypes.Add(type)) + { + sb.AppendLine($"{bodyIndent}types.Add(typeof({type}));"); + } + } + + // Handler return types (ValueTask) are implicitly yielded. + if (handler.HasOutput && handler.OutputTypeName != null && addedTypes.Add(handler.OutputTypeName)) + { + sb.AppendLine($"{bodyIndent}types.Add(typeof({handler.OutputTypeName}));"); + } + } + + sb.AppendLine($"{bodyIndent}return types;"); + sb.AppendLine($"{indent}}}"); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Microsoft.Agents.AI.Workflows.Generators.csproj b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Microsoft.Agents.AI.Workflows.Generators.csproj new file mode 100644 index 0000000000..82a1b0adef --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Microsoft.Agents.AI.Workflows.Generators.csproj @@ -0,0 +1,65 @@ + + + + + netstandard2.0 + + + + latest + enable + + + true + + + true + true + + + false + true + + + $(NoWarn);nullable + + $(NoWarn);RS2008 + + $(NoWarn);NU5128 + + + + preview + + + + + + + Microsoft Agent Framework Workflows Source Generators + Provides Roslyn source generators for Microsoft Agent Framework Workflows, enabling compile-time route configuration for executors. + true + + + + + + + + + + + + + + + + + + + + diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/AnalysisResult.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/AnalysisResult.cs new file mode 100644 index 0000000000..249b05e5af --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/AnalysisResult.cs @@ -0,0 +1,50 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Immutable; +using Microsoft.CodeAnalysis; + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Represents the result of analyzing a class with [MessageHandler] attributed methods. +/// Combines the executor info (if valid) with any diagnostics to report. +/// Note: Instances of this class should not be used within the analyzers caching +/// layer because it directly contains a collection of objects. +/// +/// The executor information. +/// Any diagnostics to report. +internal sealed class AnalysisResult(ExecutorInfo? executorInfo, ImmutableArray diagnostics) +{ + /// + /// Gets the executor information. + /// + public ExecutorInfo? ExecutorInfo { get; } = executorInfo; + + /// + /// Gets the diagnostics to report. + /// + public ImmutableArray Diagnostics { get; } = diagnostics.IsDefault ? ImmutableArray.Empty : diagnostics; + + /// + /// Creates a successful result with executor info and no diagnostics. + /// + public static AnalysisResult Success(ExecutorInfo info) => + new(info, ImmutableArray.Empty); + + /// + /// Creates a result with only diagnostics (no valid executor info). + /// + public static AnalysisResult WithDiagnostics(ImmutableArray diagnostics) => + new(null, diagnostics); + + /// + /// Creates a result with executor info and diagnostics. + /// + public static AnalysisResult WithInfoAndDiagnostics(ExecutorInfo info, ImmutableArray diagnostics) => + new(info, diagnostics); + + /// + /// Creates an empty result (no info, no diagnostics). + /// + public static AnalysisResult Empty => new(null, ImmutableArray.Empty); +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ClassProtocolInfo.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ClassProtocolInfo.cs new file mode 100644 index 0000000000..df9205cc5f --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ClassProtocolInfo.cs @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Represents protocol type information extracted from class-level [SendsMessage] or [YieldsOutput] attributes. +/// Used by the incremental generator pipeline to capture classes that declare protocol types +/// but may not have [MessageHandler] methods (e.g., when ConfigureRoutes is manually implemented). +/// +/// Unique identifier for the class (fully qualified name). +/// The namespace of the class. +/// The name of the class. +/// The generic type parameters (e.g., "<T>"), or null if not generic. +/// Whether the class is nested inside another class. +/// The chain of containing types for nested classes. Empty if not nested. +/// Whether the class is declared as partial. +/// Whether the class derives from Executor. +/// Whether the class has a manually defined ConfigureRoutes method. +/// Location info for diagnostics. +/// The fully qualified type name from the attribute. +/// Whether this is from a SendsMessage or YieldsOutput attribute. +internal sealed record ClassProtocolInfo( + string ClassKey, + string? Namespace, + string ClassName, + string? GenericParameters, + bool IsNested, + string ContainingTypeChain, + bool IsPartialClass, + bool DerivesFromExecutor, + bool HasManualConfigureRoutes, + DiagnosticLocationInfo? ClassLocation, + string TypeName, + ProtocolAttributeKind AttributeKind) +{ + /// + /// Gets an empty result for invalid targets. + /// + public static ClassProtocolInfo Empty { get; } = new( + string.Empty, null, string.Empty, null, false, string.Empty, + false, false, false, null, string.Empty, ProtocolAttributeKind.Send); +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/DiagnosticInfo.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/DiagnosticInfo.cs new file mode 100644 index 0000000000..17ea1f7cca --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/DiagnosticInfo.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Agents.AI.Workflows.Generators.Diagnostics; +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.Text; + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Represents diagnostic information in a form that supports value equality. +/// Location is stored as file path + span, which can be used to recreate a Location. +/// +internal sealed record DiagnosticInfo( + string DiagnosticId, + string FilePath, + TextSpan Span, + LinePositionSpan LineSpan, + ImmutableEquatableArray MessageArgs) +{ + /// + /// Creates a DiagnosticInfo from a location and message arguments. + /// + public static DiagnosticInfo Create(string diagnosticId, Location location, params string[] messageArgs) + { + FileLinePositionSpan lineSpan = location.GetLineSpan(); + return new DiagnosticInfo( + diagnosticId, + lineSpan.Path ?? string.Empty, + location.SourceSpan, + lineSpan.Span, + new ImmutableEquatableArray(System.Collections.Immutable.ImmutableArray.Create(messageArgs))); + } + + /// + /// Converts this info back to a Roslyn Diagnostic. + /// + public Diagnostic ToRoslynDiagnostic(SyntaxTree? syntaxTree) + { + DiagnosticDescriptor? descriptor = DiagnosticDescriptors.GetById(this.DiagnosticId); + if (descriptor is null) + { + // Fallback - should not happen + object[] fallbackArgs = new object[this.MessageArgs.Count]; + for (int i = 0; i < this.MessageArgs.Count; i++) + { + fallbackArgs[i] = this.MessageArgs[i]; + } + + return Diagnostic.Create( + DiagnosticDescriptors.InsufficientParameters, + Location.None, + fallbackArgs); + } + + Location location; + if (syntaxTree is not null) + { + location = Location.Create(syntaxTree, this.Span); + } + else if (!string.IsNullOrWhiteSpace(this.FilePath)) + { + location = Location.Create(this.FilePath, this.Span, this.LineSpan); + } + else + { + location = Location.None; + } + + object[] args = new object[this.MessageArgs.Count]; + for (int i = 0; i < this.MessageArgs.Count; i++) + { + args[i] = this.MessageArgs[i]; + } + + return Diagnostic.Create(descriptor, location, args); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/DiagnosticLocationInfo.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/DiagnosticLocationInfo.cs new file mode 100644 index 0000000000..21f55749dd --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/DiagnosticLocationInfo.cs @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.Text; + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Represents location information in a form that supports value equality making it friendly for source gen caching. +/// +internal sealed record DiagnosticLocationInfo( + string FilePath, + TextSpan Span, + LinePositionSpan LineSpan) +{ + /// + /// Creates a DiagnosticLocationInfo from a Roslyn Location. + /// + public static DiagnosticLocationInfo? FromLocation(Location? location) + { + if (location is null || location == Location.None) + { + return null; + } + + FileLinePositionSpan lineSpan = location.GetLineSpan(); + return new DiagnosticLocationInfo( + lineSpan.Path ?? string.Empty, + location.SourceSpan, + lineSpan.Span); + } + + /// + /// Converts back to a Roslyn Location. + /// + public Location ToRoslynLocation() + { + if (string.IsNullOrWhiteSpace(this.FilePath)) + { + return Location.None; + } + + return Location.Create(this.FilePath, this.Span, this.LineSpan); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ExecutorInfo.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ExecutorInfo.cs new file mode 100644 index 0000000000..507927d875 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ExecutorInfo.cs @@ -0,0 +1,80 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Contains all information needed to generate code for an executor class. +/// Uses record for automatic value equality, which is required for incremental generator caching. +/// +/// The namespace of the executor class. +/// The name of the executor class. +/// The generic type parameters of the class (e.g., "<T, U>"), or null if not generic. +/// Whether the class is nested inside another class. +/// The chain of containing types for nested classes (e.g., "OuterClass.InnerClass"). Empty string if not nested. +/// Whether the base class has a ConfigureRoutes method that should be called. +/// The list of handler methods to register. +/// The types declared via class-level [SendsMessage] attributes. +/// The types declared via class-level [YieldsOutput] attributes. +internal sealed record ExecutorInfo( + string? Namespace, + string ClassName, + string? GenericParameters, + bool IsNested, + string ContainingTypeChain, + bool BaseHasConfigureRoutes, + ImmutableEquatableArray Handlers, + ImmutableEquatableArray ClassSendTypes, + ImmutableEquatableArray ClassYieldTypes) +{ + /// + /// Gets whether any protocol type overrides should be generated. + /// + public bool ShouldGenerateProtocolOverrides => + !this.ClassSendTypes.IsEmpty || + !this.ClassYieldTypes.IsEmpty || + this.HasHandlerWithSendTypes || + this.HasHandlerWithYieldTypes; + + /// + /// Gets whether any handler has explicit Send types. + /// + public bool HasHandlerWithSendTypes + { + get + { + foreach (var handler in this.Handlers) + { + if (!handler.SendTypes.IsEmpty) + { + return true; + } + } + + return false; + } + } + + /// + /// Gets whether any handler has explicit Yield types or output types. + /// + public bool HasHandlerWithYieldTypes + { + get + { + foreach (var handler in this.Handlers) + { + if (!handler.YieldTypes.IsEmpty) + { + return true; + } + + if (handler.HasOutput) + { + return true; + } + } + + return false; + } + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/HandlerInfo.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/HandlerInfo.cs new file mode 100644 index 0000000000..f5d8b5642f --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/HandlerInfo.cs @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Represents the signature kind of a message handler method. +/// +internal enum HandlerSignatureKind +{ + /// Void synchronous: void Handler(T, IWorkflowContext) or void Handler(T, IWorkflowContext, CT) + VoidSync, + + /// Void asynchronous: ValueTask Handler(T, IWorkflowContext[, CT]) + VoidAsync, + + /// Result synchronous: TResult Handler(T, IWorkflowContext[, CT]) + ResultSync, + + /// Result asynchronous: ValueTask<TResult> Handler(T, IWorkflowContext[, CT]) + ResultAsync +} + +/// +/// Contains information about a single message handler method. +/// Uses record for automatic value equality, which is required for incremental generator caching. +/// +/// The name of the handler method. +/// The fully-qualified type name of the input message type. +/// The fully-qualified type name of the output type, or null if the handler is void. +/// The signature kind of the handler. +/// Whether the handler method has a CancellationToken parameter. +/// The types explicitly declared in the Yield property of [MessageHandler]. +/// The types explicitly declared in the Send property of [MessageHandler]. +internal sealed record HandlerInfo( + string MethodName, + string InputTypeName, + string? OutputTypeName, + HandlerSignatureKind SignatureKind, + bool HasCancellationToken, + ImmutableEquatableArray YieldTypes, + ImmutableEquatableArray SendTypes) +{ + /// + /// Gets whether this handler returns a value (either sync or async). + /// + public bool HasOutput => this.SignatureKind == HandlerSignatureKind.ResultSync || this.SignatureKind == HandlerSignatureKind.ResultAsync; +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ImmutableEquatableArray.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ImmutableEquatableArray.cs new file mode 100644 index 0000000000..f39a36c85e --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ImmutableEquatableArray.cs @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Provides an immutable list implementation which implements sequence equality. +/// Copied from: https://github.com/dotnet/runtime/blob/main/src/libraries/Common/src/SourceGenerators/ImmutableEquatableArray.cs +/// +internal sealed class ImmutableEquatableArray : IEquatable>, IReadOnlyList + where T : IEquatable +{ + /// + /// Creates a new empty . + /// + public static ImmutableEquatableArray Empty { get; } = new ImmutableEquatableArray(Array.Empty()); + + private readonly T[] _values; + + /// + /// Gets the element at the specified index. + /// + /// + /// + public T this[int index] => this._values[index]; + + /// + /// Gets the number of elements contained in the collection. + /// + public int Count => this._values.Length; + + /// + /// Gets whether the array is empty. + /// + public bool IsEmpty => this._values.Length == 0; + + /// + /// Initializes a new instance of the ImmutableEquatableArray{T} class that contains the elements from the specified + /// collection. + /// + /// The elements from the provided collection are copied into the immutable array. Subsequent + /// changes to the original collection do not affect the contents of this array. + /// The collection of elements to initialize the array with. Cannot be null. + public ImmutableEquatableArray(IEnumerable values) => this._values = values.ToArray(); + + /// + public bool Equals(ImmutableEquatableArray? other) => other != null && ((ReadOnlySpan)this._values).SequenceEqual(other._values); + + /// + public override bool Equals(object? obj) + => obj is ImmutableEquatableArray other && this.Equals(other); + + /// + public override int GetHashCode() + { + int hash = 0; + foreach (T value in this._values) + { + hash = HashHelpers.Combine(hash, value is null ? 0 : value.GetHashCode()); + } + + return hash; + } + + /// + public Enumerator GetEnumerator() => new(this._values); + + IEnumerator IEnumerable.GetEnumerator() => ((IEnumerable)this._values).GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() => this._values.GetEnumerator(); + + /// + public struct Enumerator + { + private readonly T[] _values; + private int _index; + + internal Enumerator(T[] values) + { + this._values = values; + this._index = -1; + } + + /// + public bool MoveNext() + { + int newIndex = this._index + 1; + + if ((uint)newIndex < (uint)this._values.Length) + { + this._index = newIndex; + return true; + } + + return false; + } + + /// + /// The element at the current position of the enumerator. + /// + public readonly T Current => this._values[this._index]; + } +} + +internal static class ImmutableEquatableArray +{ + public static ImmutableEquatableArray ToImmutableEquatableArray(this IEnumerable values) where T : IEquatable + => new(values); +} + +// Copied from https://github.com/dotnet/runtime/blob/main/src/libraries/System.Private.CoreLib/src/System/Numerics/Hashing/HashHelpers.cs#L6 +internal static class HashHelpers +{ + public static int Combine(int h1, int h2) + { + // RyuJIT optimizes this to use the ROL instruction + // Related GitHub pull request: https://github.com/dotnet/coreclr/pull/1830 + uint rol5 = ((uint)h1 << 5) | ((uint)h1 >> 27); + return ((int)rol5 + h1) ^ h2; + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/MethodAnalysisResult.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/MethodAnalysisResult.cs new file mode 100644 index 0000000000..f9493c5d93 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/MethodAnalysisResult.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Represents the result of analyzing a single method with [MessageHandler]. +/// Contains both the method's handler info and class context for grouping. +/// Uses value-equatable types to support incremental generator caching. +/// +/// +/// Class-level validation (IsPartialClass, DerivesFromExecutor, HasManualConfigureRoutes) +/// is extracted here but validated once per class in CombineMethodResults to avoid +/// redundant validation work when a class has multiple handlers. +/// +internal sealed record MethodAnalysisResult( + // Class identification for grouping + string ClassKey, + + // Class-level info (extracted once per method, will be same for all methods in class) + string? Namespace, + string ClassName, + string? GenericParameters, + bool IsNested, + string ContainingTypeChain, + bool BaseHasConfigureRoutes, + ImmutableEquatableArray ClassSendTypes, + ImmutableEquatableArray ClassYieldTypes, + + // Class-level facts (used for validation in CombineMethodResults) + bool IsPartialClass, + bool DerivesFromExecutor, + bool HasManualConfigureRoutes, + + // Class location for diagnostics (value-equatable) + DiagnosticLocationInfo? ClassLocation, + + // Method-level info (null if method validation failed) + HandlerInfo? Handler, + + // Method-level diagnostics only (class-level diagnostics created in CombineMethodResults) + ImmutableEquatableArray Diagnostics) +{ + /// + /// Gets an empty result for invalid targets (e.g., attribute on non-method). + /// + public static MethodAnalysisResult Empty { get; } = new( + string.Empty, null, string.Empty, null, false, string.Empty, + false, ImmutableEquatableArray.Empty, ImmutableEquatableArray.Empty, + false, false, false, + null, null, ImmutableEquatableArray.Empty); +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ProtocolAttributeKind.cs b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ProtocolAttributeKind.cs new file mode 100644 index 0000000000..68d4e75469 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/Models/ProtocolAttributeKind.cs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.Agents.AI.Workflows.Generators.Models; + +/// +/// Identifies the kind of protocol attribute. +/// +internal enum ProtocolAttributeKind +{ + /// + /// The [SendsMessage] attribute. + /// + Send, + + /// + /// The [YieldsOutput] attribute. + /// + Yield +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/SkipIncompatibleBuild.targets b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/SkipIncompatibleBuild.targets new file mode 100644 index 0000000000..bd5d7b835f --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows.Generators/SkipIncompatibleBuild.targets @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/MessageHandlerAttribute.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/MessageHandlerAttribute.cs new file mode 100644 index 0000000000..7f40b3573d --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/MessageHandlerAttribute.cs @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; + +namespace Microsoft.Agents.AI.Workflows; + +/// +/// Marks a method as a message handler for source-generated route configuration. +/// The method signature determines the input type and optional output type. +/// +/// +/// +/// Methods marked with this attribute must have a signature matching one of the following patterns: +/// +/// void Handler(TMessage, IWorkflowContext) +/// void Handler(TMessage, IWorkflowContext, CancellationToken) +/// ValueTask Handler(TMessage, IWorkflowContext) +/// ValueTask Handler(TMessage, IWorkflowContext, CancellationToken) +/// TResult Handler(TMessage, IWorkflowContext) +/// TResult Handler(TMessage, IWorkflowContext, CancellationToken) +/// ValueTask<TResult> Handler(TMessage, IWorkflowContext) +/// ValueTask<TResult> Handler(TMessage, IWorkflowContext, CancellationToken) +/// +/// +/// +/// The containing class must be partial and derive from . +/// +/// +/// +/// +/// public partial class MyExecutor : Executor +/// { +/// [MessageHandler] +/// private async ValueTask<MyResponse> HandleQueryAsync( +/// MyQuery query, IWorkflowContext ctx, CancellationToken ct) +/// { +/// return new MyResponse(); +/// } +/// +/// [MessageHandler(Yield = [typeof(StreamChunk)], Send = [typeof(InternalMessage)])] +/// private void HandleStream(StreamRequest req, IWorkflowContext ctx) +/// { +/// // Handler with explicit yield and send types +/// } +/// } +/// +/// +[AttributeUsage(AttributeTargets.Method, AllowMultiple = false, Inherited = false)] +public sealed class MessageHandlerAttribute : Attribute +{ + /// + /// Gets or sets the types that this handler may yield as workflow outputs. + /// + /// + /// If not specified, the return type (if any) is used as the default yield type. + /// Use this property to explicitly declare additional output types or to override + /// the default inference from the return type. + /// + public Type[]? Yield { get; set; } + + /// + /// Gets or sets the types that this handler may send as messages to other executors. + /// + /// + /// Use this property to declare the message types that this handler may send + /// via during its execution. + /// This information is used for protocol validation and documentation. + /// + public Type[]? Send { get; set; } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/SendsMessageAttribute.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/SendsMessageAttribute.cs new file mode 100644 index 0000000000..3b5620fc37 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/SendsMessageAttribute.cs @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Microsoft.Shared.Diagnostics; + +namespace Microsoft.Agents.AI.Workflows; + +/// +/// Declares that an executor may send messages of the specified type. +/// +/// +/// +/// Apply this attribute to an class to declare the types of messages +/// it may send via . This information is used +/// for protocol validation and documentation. +/// +/// +/// This attribute can be applied multiple times to declare multiple message types. +/// It is inherited by derived classes, allowing base executors to declare common message types. +/// +/// +/// +/// +/// [SendsMessage(typeof(PollToken))] +/// [SendsMessage(typeof(StatusUpdate))] +/// public partial class MyExecutor : Executor +/// { +/// // ... +/// } +/// +/// +[AttributeUsage(AttributeTargets.Class, AllowMultiple = true, Inherited = true)] +public sealed class SendsMessageAttribute : Attribute +{ + /// + /// Gets the type of message that the executor may send. + /// + public Type Type { get; } + + /// + /// Initializes a new instance of the class. + /// + /// The type of message that the executor may send. + /// is . + public SendsMessageAttribute(Type type) + { + this.Type = Throw.IfNull(type); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/YieldsOutputAttribute.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/YieldsOutputAttribute.cs new file mode 100644 index 0000000000..5aad434b1d --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Attributes/YieldsOutputAttribute.cs @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Microsoft.Shared.Diagnostics; + +namespace Microsoft.Agents.AI.Workflows; + +/// +/// Declares that an executor may yield messages of the specified type as workflow outputs. +/// +/// +/// +/// Apply this attribute to an class to declare the types of messages +/// it may yield via . This information is used +/// for protocol validation and documentation. +/// +/// +/// This attribute can be applied multiple times to declare multiple output types. +/// It is inherited by derived classes, allowing base executors to declare common output types. +/// +/// +/// +/// +/// [YieldsOutput(typeof(FinalResult))] +/// [YieldsOutput(typeof(StreamChunk))] +/// public partial class MyExecutor : Executor +/// { +/// // ... +/// } +/// +/// +[AttributeUsage(AttributeTargets.Class, AllowMultiple = true, Inherited = true)] +public sealed class YieldsOutputAttribute : Attribute +{ + /// + /// Gets the type of message that the executor may yield. + /// + public Type Type { get; } + + /// + /// Initializes a new instance of the class. + /// + /// The type of message that the executor may yield. + /// is . + public YieldsOutputAttribute(Type type) + { + this.Type = Throw.IfNull(type); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocol.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocol.cs index ff8140ee3a..5a328bc8c8 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocol.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocol.cs @@ -20,13 +20,20 @@ public static class ChatProtocolExtensions /// Determines whether the specified protocol descriptor represents the Agent Workflow Chat Protocol. /// /// The protocol descriptor to evaluate. + /// If , will allow protocols handling all inputs to be treated + /// as a Chat Protocol /// if the protocol descriptor represents a supported chat protocol; otherwise, . - public static bool IsChatProtocol(this ProtocolDescriptor descriptor) + public static bool IsChatProtocol(this ProtocolDescriptor descriptor, bool allowCatchAll = false) { bool foundListChatMessageInput = false; bool foundTurnTokenInput = false; + if (allowCatchAll && descriptor.AcceptsAll) + { + return true; + } + // We require that the workflow be a ChatProtocol; right now that is defined as accepting at // least List as input (pending polymorphism/interface-input support), as well as // TurnToken. Since output is mediated by events, which we forward, we don't need to validate @@ -50,9 +57,11 @@ public static bool IsChatProtocol(this ProtocolDescriptor descriptor) /// Throws an exception if the specified protocol descriptor does not represent a valid chat protocol. /// /// The protocol descriptor to validate as a chat protocol. Cannot be null. - public static void ThrowIfNotChatProtocol(this ProtocolDescriptor descriptor) + /// If , will allow protocols handling all inputs to be treated + /// as a Chat Protocol + public static void ThrowIfNotChatProtocol(this ProtocolDescriptor descriptor, bool allowCatchAll = false) { - if (!descriptor.IsChatProtocol()) + if (!descriptor.IsChatProtocol(allowCatchAll)) { throw new InvalidOperationException("Workflow does not support ChatProtocol: At least List" + " and TurnToken must be supported as input."); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs index 238734b598..8fe11f696f 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatProtocolExecutor.cs @@ -29,6 +29,12 @@ public abstract class ChatProtocolExecutor : StatefulExecutor> private static readonly Func> s_initFunction = () => []; private readonly ChatRole? _stringMessageChatRole; + private static readonly StatefulExecutorOptions s_baseExecutorOptions = new() + { + AutoSendMessageHandlerResultObject = false, + AutoYieldOutputHandlerResultObject = false + }; + /// /// Initializes a new instance of the class. /// @@ -36,7 +42,7 @@ public abstract class ChatProtocolExecutor : StatefulExecutor> /// Optional configuration settings for the executor. If null, default options are used. /// Declare that this executor may be used simultaneously by multiple runs safely. protected ChatProtocolExecutor(string id, ChatProtocolExecutorOptions? options = null, bool declareCrossRunShareable = false) - : base(id, () => [], declareCrossRunShareable: declareCrossRunShareable) + : base(id, () => [], s_baseExecutorOptions, declareCrossRunShareable) { this._stringMessageChatRole = options?.StringMessageChatRole; } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs index f4af19bcfd..8dacca61c0 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Execution/ISuperStepJoinContext.cs @@ -13,6 +13,7 @@ internal interface ISuperStepJoinContext ValueTask ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default); ValueTask SendMessageAsync(string senderId, [DisallowNull] TMessage message, CancellationToken cancellationToken = default); + ValueTask YieldOutputAsync(string senderId, [DisallowNull] TOutput output, CancellationToken cancellationToken = default); ValueTask AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellationToken = default); ValueTask DetachSuperstepAsync(string id); diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs index 647dbcd852..741f49e2ab 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Executor.cs @@ -210,7 +210,7 @@ public ProtocolDescriptor DescribeProtocol() // TODO: Once burden of annotating yield/output messages becomes easier for the non-Auto case, // we should (1) start checking for validity on output/send side, and (2) add the Yield/Send // types to the ProtocolDescriptor. - return new(this.InputTypes); + return new(this.InputTypes, this.Router.HasCatchAll); } /// diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowExecutionEnvironment.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowExecutionEnvironment.cs index b8e8b37fa5..1b82308ae7 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowExecutionEnvironment.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/IWorkflowExecutionEnvironment.cs @@ -75,10 +75,9 @@ public interface IWorkflowExecutionEnvironment /// The workflow to be executed. Must not be null. /// The corresponding to the checkpoint from which to resume. /// The to use with this run. - /// An optional unique identifier for the run. If not provided, a new identifier will be generated. /// The to monitor for cancellation requests. The default is . /// A that provides access to the results of the streaming run. - ValueTask> ResumeStreamAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, string? runId = null, CancellationToken cancellationToken = default); + ValueTask> ResumeStreamAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, CancellationToken cancellationToken = default); /// /// Initiates a non-streaming execution of the workflow with the specified input. @@ -117,9 +116,8 @@ public interface IWorkflowExecutionEnvironment /// The workflow to be executed. Must not be null. /// The corresponding to the checkpoint from which to resume. /// The to use with this run. - /// An optional unique identifier for the run. If not provided, a new identifier will be generated. /// The to monitor for cancellation requests. The default is . /// A that represents the asynchronous operation. The result contains a for managing and interacting with the streaming run. - ValueTask> ResumeAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, string? runId = null, CancellationToken cancellationToken = default); + ValueTask> ResumeAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessExecutionEnvironment.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessExecutionEnvironment.cs index a4d40ff127..47dee1e1e0 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessExecutionEnvironment.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessExecutionEnvironment.cs @@ -30,9 +30,9 @@ internal ValueTask BeginRunAsync(Workflow workflow, ICheckpointM return runner.BeginStreamAsync(this.ExecutionMode, cancellationToken); } - internal ValueTask ResumeRunAsync(Workflow workflow, ICheckpointManager? checkpointManager, string? runId, CheckpointInfo fromCheckpoint, IEnumerable knownValidInputTypes, CancellationToken cancellationToken) + internal ValueTask ResumeRunAsync(Workflow workflow, ICheckpointManager? checkpointManager, CheckpointInfo fromCheckpoint, IEnumerable knownValidInputTypes, CancellationToken cancellationToken) { - InProcessRunner runner = InProcessRunner.CreateTopLevelRunner(workflow, checkpointManager, runId, this.EnableConcurrentRuns, knownValidInputTypes); + InProcessRunner runner = InProcessRunner.CreateTopLevelRunner(workflow, checkpointManager, fromCheckpoint.RunId, this.EnableConcurrentRuns, knownValidInputTypes); return runner.ResumeStreamAsync(this.ExecutionMode, fromCheckpoint, cancellationToken); } @@ -95,10 +95,9 @@ public async ValueTask> ResumeStreamAsync( Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, - string? runId = null, CancellationToken cancellationToken = default) { - AsyncRunHandle runHandle = await this.ResumeRunAsync(workflow, checkpointManager, runId: runId, fromCheckpoint, [], cancellationToken) + AsyncRunHandle runHandle = await this.ResumeRunAsync(workflow, checkpointManager, fromCheckpoint, [], cancellationToken) .ConfigureAwait(false); return await runHandle.WithCheckpointingAsync(() => new(new StreamingRun(runHandle))) @@ -172,10 +171,9 @@ public async ValueTask> ResumeAsync( Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, - string? runId = null, CancellationToken cancellationToken = default) { - AsyncRunHandle runHandle = await this.ResumeRunAsync(workflow, checkpointManager, runId: runId, fromCheckpoint, [], cancellationToken) + AsyncRunHandle runHandle = await this.ResumeRunAsync(workflow, checkpointManager, fromCheckpoint, [], cancellationToken) .ConfigureAwait(false); return await runHandle.WithCheckpointingAsync(() => new(new Run(runHandle))) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs index 1750f779f2..2f2162b969 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/InProc/InProcessRunnerContext.cs @@ -24,6 +24,8 @@ internal sealed class InProcessRunnerContext : IRunnerContext private int _runEnded; private readonly string _runId; private readonly Workflow _workflow; + private readonly object? _previousOwnership; + private bool _ownsWorkflow; private readonly EdgeMap _edgeMap; private readonly OutputFilter _outputFilter; @@ -54,7 +56,10 @@ public InProcessRunnerContext( else { workflow.TakeOwnership(this, existingOwnershipSignoff: existingOwnershipSignoff); + this._previousOwnership = existingOwnershipSignoff; + this._ownsWorkflow = true; } + this._workflow = workflow; this._runId = runId; @@ -211,10 +216,27 @@ await this._edgeMap.PrepareDeliveryForEdgeAsync(edge, envelope) } } + private async ValueTask YieldOutputAsync(string sourceId, object output, CancellationToken cancellationToken = default) + { + this.CheckEnded(); + Throw.IfNull(output); + + Executor sourceExecutor = await this.EnsureExecutorAsync(sourceId, tracer: null, cancellationToken).ConfigureAwait(false); + if (!sourceExecutor.CanOutput(output.GetType())) + { + throw new InvalidOperationException($"Cannot output object of type {output.GetType().Name}. Expecting one of [{string.Join(", ", sourceExecutor.OutputTypes)}]."); + } + + if (this._outputFilter.CanOutput(sourceId, output)) + { + await this.AddEventAsync(new WorkflowOutputEvent(output, sourceId), cancellationToken).ConfigureAwait(false); + } + } + public IWorkflowContext Bind(string executorId, Dictionary? traceContext = null) { this.CheckEnded(); - return new BoundContext(this, executorId, this._outputFilter, traceContext); + return new BoundContext(this, executorId, traceContext); } public ValueTask PostAsync(ExternalRequest request) @@ -241,7 +263,6 @@ public bool CompleteRequest(string requestId) private sealed class BoundContext( InProcessRunnerContext RunnerContext, string ExecutorId, - OutputFilter outputFilter, Dictionary? traceContext) : IWorkflowContext { public ValueTask AddEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) => RunnerContext.AddEventAsync(workflowEvent, cancellationToken); @@ -251,21 +272,9 @@ public ValueTask SendMessageAsync(object message, string? targetId = null, Cance return RunnerContext.SendMessageAsync(ExecutorId, message, targetId, cancellationToken); } - public async ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default) + public ValueTask YieldOutputAsync(object output, CancellationToken cancellationToken = default) { - RunnerContext.CheckEnded(); - Throw.IfNull(output); - - Executor sourceExecutor = await RunnerContext.EnsureExecutorAsync(ExecutorId, tracer: null, cancellationToken).ConfigureAwait(false); - if (!sourceExecutor.CanOutput(output.GetType())) - { - throw new InvalidOperationException($"Cannot output object of type {output.GetType().Name}. Expecting one of [{string.Join(", ", sourceExecutor.OutputTypes)}]."); - } - - if (outputFilter.CanOutput(ExecutorId, output)) - { - await this.AddEventAsync(new WorkflowOutputEvent(output, ExecutorId), cancellationToken).ConfigureAwait(false); - } + return RunnerContext.YieldOutputAsync(ExecutorId, output, cancellationToken); } public ValueTask RequestHaltAsync() => this.AddEventAsync(new RequestHaltEvent()); @@ -389,7 +398,9 @@ public async ValueTask EndRunAsync() { foreach (string executorId in this._executors.Keys) { - Task executor = this._executors[executorId]; + Task executorTask = this._executors[executorId]; + Executor executor = await executorTask.ConfigureAwait(false); + if (executor is IAsyncDisposable asyncDisposable) { await asyncDisposable.DisposeAsync().ConfigureAwait(false); @@ -400,9 +411,10 @@ public async ValueTask EndRunAsync() } } - if (!this.ConcurrentRunsEnabled) + if (this._ownsWorkflow) { - await this._workflow.ReleaseOwnershipAsync(this).ConfigureAwait(false); + await this._workflow.ReleaseOwnershipAsync(this, this._previousOwnership).ConfigureAwait(false); + this._ownsWorkflow = false; } } } @@ -429,4 +441,7 @@ ValueTask ISuperStepJoinContext.ForwardWorkflowEventAsync(WorkflowEvent workflow ValueTask ISuperStepJoinContext.SendMessageAsync(string senderId, [DisallowNull] TMessage message, CancellationToken cancellationToken) => this.SendMessageAsync(senderId, Throw.IfNull(message), cancellationToken: cancellationToken); + + ValueTask ISuperStepJoinContext.YieldOutputAsync(string senderId, [DisallowNull] TOutput output, CancellationToken cancellationToken) + => this.YieldOutputAsync(senderId, Throw.IfNull(output), cancellationToken); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/InProcessExecution.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/InProcessExecution.cs index dc110e7570..f21117a38a 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/InProcessExecution.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/InProcessExecution.cs @@ -57,9 +57,9 @@ public static ValueTask> StreamAsync(Workflow workflo public static ValueTask> StreamAsync(Workflow workflow, TInput input, CheckpointManager checkpointManager, string? runId = null, CancellationToken cancellationToken = default) where TInput : notnull => Default.StreamAsync(workflow, input, checkpointManager, runId, cancellationToken); - /// - public static ValueTask> ResumeStreamAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, string? runId = null, CancellationToken cancellationToken = default) - => Default.ResumeStreamAsync(workflow, fromCheckpoint, checkpointManager, runId, cancellationToken); + /// + public static ValueTask> ResumeStreamAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, CancellationToken cancellationToken = default) + => Default.ResumeStreamAsync(workflow, fromCheckpoint, checkpointManager, cancellationToken); /// public static ValueTask RunAsync(Workflow workflow, TInput input, string? runId = null, CancellationToken cancellationToken = default) where TInput : notnull @@ -69,7 +69,7 @@ public static ValueTask RunAsync(Workflow workflow, TInput input, s public static ValueTask> RunAsync(Workflow workflow, TInput input, CheckpointManager checkpointManager, string? runId = null, CancellationToken cancellationToken = default) where TInput : notnull => Default.RunAsync(workflow, input, checkpointManager, runId, cancellationToken); - /// - public static ValueTask> ResumeAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, string? runId = null, CancellationToken cancellationToken = default) - => Default.ResumeAsync(workflow, fromCheckpoint, checkpointManager, runId, cancellationToken); + /// + public static ValueTask> ResumeAsync(Workflow workflow, CheckpointInfo fromCheckpoint, CheckpointManager checkpointManager, CancellationToken cancellationToken = default) + => Default.ResumeAsync(workflow, fromCheckpoint, checkpointManager, cancellationToken); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Microsoft.Agents.AI.Workflows.csproj b/dotnet/src/Microsoft.Agents.AI.Workflows/Microsoft.Agents.AI.Workflows.csproj index 7379d9a6ac..3ecf31e132 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Microsoft.Agents.AI.Workflows.csproj +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Microsoft.Agents.AI.Workflows.csproj @@ -25,6 +25,15 @@ + + + + + + diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/ProtocolDescriptor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/ProtocolDescriptor.cs index 91adc4dbae..bb2663c100 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/ProtocolDescriptor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/ProtocolDescriptor.cs @@ -12,12 +12,18 @@ namespace Microsoft.Agents.AI.Workflows; public class ProtocolDescriptor { /// - /// Get the collection of types accepted by the or . + /// Get the collection of types explicitly accepted by the or . /// public IEnumerable Accepts { get; } - internal ProtocolDescriptor(IEnumerable acceptedTypes) + /// + /// Gets a value indicating whether the or has a "catch-all" handler. + /// + public bool AcceptsAll { get; set; } + + internal ProtocolDescriptor(IEnumerable acceptedTypes, bool acceptsAll) { this.Accepts = acceptedTypes.ToArray(); + this.AcceptsAll = acceptsAll; } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/OutputMessagesExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/OutputMessagesExecutor.cs index e727e30bac..b3c714406d 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/OutputMessagesExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/OutputMessagesExecutor.cs @@ -7,17 +7,16 @@ namespace Microsoft.Agents.AI.Workflows; -public static partial class AgentWorkflowBuilder +/// +/// Provides an executor that batches received chat messages that it then publishes as the final result +/// when receiving a . +/// +internal sealed class OutputMessagesExecutor(ChatProtocolExecutorOptions? options = null) : ChatProtocolExecutor(ExecutorId, options, declareCrossRunShareable: true), IResettableExecutor { - /// - /// Provides an executor that batches received chat messages that it then publishes as the final result - /// when receiving a . - /// - internal sealed class OutputMessagesExecutor() : ChatProtocolExecutor("OutputMessages", declareCrossRunShareable: true), IResettableExecutor - { - protected override ValueTask TakeTurnAsync(List messages, IWorkflowContext context, bool? emitEvents, CancellationToken cancellationToken = default) - => context.YieldOutputAsync(messages, cancellationToken); + public const string ExecutorId = "OutputMessages"; - ValueTask IResettableExecutor.ResetAsync() => default; - } + protected override ValueTask TakeTurnAsync(List messages, IWorkflowContext context, bool? emitEvents, CancellationToken cancellationToken = default) + => context.YieldOutputAsync(messages, cancellationToken); + + ValueTask IResettableExecutor.ResetAsync() => default; } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs index 409f751107..ab8a499a75 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/WorkflowHostExecutor.cs @@ -79,7 +79,7 @@ internal async ValueTask EnsureRunnerAsync() // serialization because we will be relying on the parent workflow's checkpoint manager to do that, // if needed. For our purposes, all we need is to keep a faithful representation of the checkpointed // objects so we can emit them back to the parent workflow on checkpoint creation. - this._checkpointManager = new InMemoryCheckpointManager(); + this._checkpointManager ??= new InMemoryCheckpointManager(); } this._activeRunner = InProcessRunner.CreateSubworkflowRunner(this._workflow, @@ -124,7 +124,7 @@ internal async ValueTask EnsureRunSendMessageAsync(object? incomin if (incomingMessage != null) { - await runHandle.EnqueueUntypedAndRunAsync(incomingMessage, cancellationToken).ConfigureAwait(false); + await runHandle.EnqueueMessageUntypedAsync(incomingMessage, cancellationToken: cancellationToken).ConfigureAwait(false); } } else if (incomingMessage != null) @@ -132,7 +132,7 @@ internal async ValueTask EnsureRunSendMessageAsync(object? incomin runHandle = await activeRunner.BeginStreamAsync(ExecutionMode.Subworkflow, cancellationToken) .ConfigureAwait(false); - await runHandle.EnqueueUntypedAndRunAsync(incomingMessage, cancellationToken).ConfigureAwait(false); + await runHandle.EnqueueMessageUntypedAsync(incomingMessage, cancellationToken: cancellationToken).ConfigureAwait(false); } else { @@ -198,6 +198,13 @@ private async ValueTask ForwardWorkflowEventAsync(object? sender, WorkflowEvent { resultTask = this._joinContext.SendMessageAsync(this.Id, outputEvent.Data).AsTask(); } + + if (this._joinContext != null && + this._options.AutoYieldOutputHandlerResultObject + && outputEvent.Data != null) + { + resultTask = this._joinContext.YieldOutputAsync(this.Id, outputEvent.Data).AsTask(); + } break; case RequestHaltEvent requestHaltEvent: resultTask = this._joinContext?.ForwardWorkflowEventAsync(new RequestHaltEvent()).AsTask() ?? Task.CompletedTask; @@ -231,9 +238,10 @@ internal async ValueTask AttachSuperStepContextAsync(ISuperStepJoinContext joinC this._joinContext = Throw.IfNull(joinContext); } + private const string CheckpointManagerStateKey = nameof(CheckpointManager); protected internal override async ValueTask OnCheckpointingAsync(IWorkflowContext context, CancellationToken cancellationToken = default) { - await context.QueueStateUpdateAsync(nameof(CheckpointManager), this._checkpointManager, cancellationToken: cancellationToken).ConfigureAwait(false); + await context.QueueStateUpdateAsync(CheckpointManagerStateKey, this._checkpointManager, cancellationToken: cancellationToken).ConfigureAwait(false); await base.OnCheckpointingAsync(context, cancellationToken).ConfigureAwait(false); } @@ -242,7 +250,7 @@ protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowC { await base.OnCheckpointRestoredAsync(context, cancellationToken).ConfigureAwait(false); - InMemoryCheckpointManager manager = await context.ReadStateAsync(nameof(InMemoryCheckpointManager), cancellationToken: cancellationToken).ConfigureAwait(false) ?? new(); + InMemoryCheckpointManager manager = await context.ReadStateAsync(CheckpointManagerStateKey, cancellationToken: cancellationToken).ConfigureAwait(false) ?? new(); if (this._checkpointManager == manager) { // We are restoring in the context of the same run; not need to rebuild the entire execution stack. @@ -254,7 +262,7 @@ protected internal override async ValueTask OnCheckpointRestoredAsync(IWorkflowC await this.ResetAsync().ConfigureAwait(false); } - StreamingRun run = await this.EnsureRunSendMessageAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + await this.EnsureRunSendMessageAsync(resume: true, cancellationToken: cancellationToken).ConfigureAwait(false); } private async ValueTask ResetAsync() @@ -273,15 +281,10 @@ private async ValueTask ResetAsync() this._activeRunner = null; } - if (this._joinContext != null) + if (this._joinContext != null && this._joinId != null) { - if (this._joinId != null) - { - await this._joinContext.DetachSuperstepAsync(this._joinId).ConfigureAwait(false); - this._joinId = null; - } - - this._joinContext = null; + await this._joinContext.DetachSuperstepAsync(this._joinId).ConfigureAwait(false); + this._joinId = null; } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/SubworkflowBinding.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/SubworkflowBinding.cs index 1f29ffe426..389aa19afc 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/SubworkflowBinding.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/SubworkflowBinding.cs @@ -16,9 +16,9 @@ namespace Microsoft.Agents.AI.Workflows; /// public record SubworkflowBinding(Workflow WorkflowInstance, string Id, ExecutorOptions? ExecutorOptions = null) : ExecutorBinding(Throw.IfNull(Id), - CreateWorkflowExecutorFactory(WorkflowInstance, Id, ExecutorOptions), - typeof(WorkflowHostExecutor), - WorkflowInstance) + CreateWorkflowExecutorFactory(WorkflowInstance, Id, ExecutorOptions), + typeof(WorkflowHostExecutor), + WorkflowInstance) { private static Func> CreateWorkflowExecutorFactory(Workflow workflow, string id, ExecutorOptions? options) { diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Workflow.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Workflow.cs index 456838b9eb..7486c54914 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Workflow.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/Workflow.cs @@ -51,6 +51,15 @@ public Dictionary ReflectPorts() ); } + /// + /// Gets the collection of executor bindings, keyed by their ID. + /// + /// A copy of the executor bindings dictionary. Modifications do not affect the workflow. + public Dictionary ReflectExecutors() + { + return new Dictionary(this.ExecutorBindings); + } + /// /// Gets the identifier of the starting executor of the workflow. /// @@ -166,9 +175,9 @@ internal void TakeOwnership(object ownerToken, bool subworkflow = false, object? [System.Diagnostics.CodeAnalysis.SuppressMessage("Maintainability", "CA1513:Use ObjectDisposedException throw helper", Justification = "Does not exist in NetFx 4.7.2")] - internal async ValueTask ReleaseOwnershipAsync(object ownerToken) + internal async ValueTask ReleaseOwnershipAsync(object ownerToken, object? targetOwnerToken) { - object? originalToken = Interlocked.CompareExchange(ref this._ownerToken, null, ownerToken) ?? + object? originalToken = Interlocked.CompareExchange(ref this._ownerToken, targetOwnerToken, ownerToken) ?? throw new InvalidOperationException("Attempting to release ownership of a Workflow that is not owned."); if (!ReferenceEquals(originalToken, ownerToken)) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowMessageStore.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowChatHistoryProvider.cs similarity index 93% rename from dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowMessageStore.cs rename to dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowChatHistoryProvider.cs index 87cef04e76..afe6706553 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowMessageStore.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowChatHistoryProvider.cs @@ -10,16 +10,16 @@ namespace Microsoft.Agents.AI.Workflows; -internal sealed class WorkflowMessageStore : ChatMessageStore +internal sealed class WorkflowChatHistoryProvider : ChatHistoryProvider { private int _bookmark; private readonly List _chatMessages = []; - public WorkflowMessageStore() + public WorkflowChatHistoryProvider() { } - public WorkflowMessageStore(StoreState state) + public WorkflowChatHistoryProvider(StoreState state) { this.ImportStoreState(Throw.IfNull(state)); } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs index f20660bc51..fe597c17f3 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowHostAgent.cs @@ -19,11 +19,12 @@ internal sealed class WorkflowHostAgent : AIAgent private readonly CheckpointManager? _checkpointManager; private readonly IWorkflowExecutionEnvironment _executionEnvironment; private readonly bool _includeExceptionDetails; + private readonly bool _includeWorkflowOutputsInResponse; private readonly Task _describeTask; private readonly ConcurrentDictionary _assignedRunIds = []; - public WorkflowHostAgent(Workflow workflow, string? id = null, string? name = null, string? description = null, CheckpointManager? checkpointManager = null, IWorkflowExecutionEnvironment? executionEnvironment = null, bool includeExceptionDetails = false) + public WorkflowHostAgent(Workflow workflow, string? id = null, string? name = null, string? description = null, CheckpointManager? checkpointManager = null, IWorkflowExecutionEnvironment? executionEnvironment = null, bool includeExceptionDetails = false, bool includeWorkflowOutputsInResponse = false) { this._workflow = Throw.IfNull(workflow); @@ -32,6 +33,7 @@ public WorkflowHostAgent(Workflow workflow, string? id = null, string? name = nu : InProcessExecution.OffThread); this._checkpointManager = checkpointManager; this._includeExceptionDetails = includeExceptionDetails; + this._includeWorkflowOutputsInResponse = includeWorkflowOutputsInResponse; this._id = id; this.Name = name; @@ -60,14 +62,14 @@ private string GenerateNewId() private async ValueTask ValidateWorkflowAsync() { ProtocolDescriptor protocol = await this._describeTask.ConfigureAwait(false); - protocol.ThrowIfNotChatProtocol(); + protocol.ThrowIfNotChatProtocol(allowCatchAll: true); } public override ValueTask GetNewThreadAsync(CancellationToken cancellationToken = default) - => new(new WorkflowThread(this._workflow, this.GenerateNewId(), this._executionEnvironment, this._checkpointManager, this._includeExceptionDetails)); + => new(new WorkflowThread(this._workflow, this.GenerateNewId(), this._executionEnvironment, this._checkpointManager, this._includeExceptionDetails, this._includeWorkflowOutputsInResponse)); public override ValueTask DeserializeThreadAsync(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null, CancellationToken cancellationToken = default) - => new(new WorkflowThread(this._workflow, serializedThread, this._executionEnvironment, this._checkpointManager, this._includeExceptionDetails, jsonSerializerOptions)); + => new(new WorkflowThread(this._workflow, serializedThread, this._executionEnvironment, this._checkpointManager, this._includeExceptionDetails, this._includeWorkflowOutputsInResponse, jsonSerializerOptions)); private async ValueTask UpdateThreadAsync(IEnumerable messages, AgentThread? thread = null, CancellationToken cancellationToken = default) { @@ -80,7 +82,7 @@ private async ValueTask UpdateThreadAsync(IEnumerable for the in-process environments. /// If , will include /// in the representing the workflow error. + /// If , will transform outgoing workflow outputs + /// into into content in s or the as appropriate. /// public static AIAgent AsAgent( this Workflow workflow, @@ -31,9 +33,10 @@ public static AIAgent AsAgent( string? description = null, CheckpointManager? checkpointManager = null, IWorkflowExecutionEnvironment? executionEnvironment = null, - bool includeExceptionDetails = false) + bool includeExceptionDetails = false, + bool includeWorkflowOutputsInResponse = false) { - return new WorkflowHostAgent(workflow, id, name, description, checkpointManager, executionEnvironment, includeExceptionDetails); + return new WorkflowHostAgent(workflow, id, name, description, checkpointManager, executionEnvironment, includeExceptionDetails, includeWorkflowOutputsInResponse); } internal static FunctionCallContent ToFunctionCall(this ExternalRequest request) diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs index 6f00566b95..270ec02149 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowThread.cs @@ -19,15 +19,17 @@ internal sealed class WorkflowThread : AgentThread private readonly Workflow _workflow; private readonly IWorkflowExecutionEnvironment _executionEnvironment; private readonly bool _includeExceptionDetails; + private readonly bool _includeWorkflowOutputsInResponse; private readonly CheckpointManager _checkpointManager; private readonly InMemoryCheckpointManager? _inMemoryCheckpointManager; - public WorkflowThread(Workflow workflow, string runId, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null, bool includeExceptionDetails = false) + public WorkflowThread(Workflow workflow, string runId, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null, bool includeExceptionDetails = false, bool includeWorkflowOutputsInResponse = false) { this._workflow = Throw.IfNull(workflow); this._executionEnvironment = Throw.IfNull(executionEnvironment); this._includeExceptionDetails = includeExceptionDetails; + this._includeWorkflowOutputsInResponse = includeWorkflowOutputsInResponse; // If the user provided an external checkpoint manager, use that, otherwise rely on an in-memory one. // TODO: Implement persist-only-last functionality for in-memory checkpoint manager, to avoid unbounded @@ -35,13 +37,15 @@ public WorkflowThread(Workflow workflow, string runId, IWorkflowExecutionEnviron this._checkpointManager = checkpointManager ?? new(this._inMemoryCheckpointManager = new()); this.RunId = Throw.IfNullOrEmpty(runId); - this.MessageStore = new WorkflowMessageStore(); + this.ChatHistoryProvider = new WorkflowChatHistoryProvider(); } - public WorkflowThread(Workflow workflow, JsonElement serializedThread, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null, bool includeExceptionDetails = false, JsonSerializerOptions? jsonSerializerOptions = null) + public WorkflowThread(Workflow workflow, JsonElement serializedThread, IWorkflowExecutionEnvironment executionEnvironment, CheckpointManager? checkpointManager = null, bool includeExceptionDetails = false, bool includeWorkflowOutputsInResponse = false, JsonSerializerOptions? jsonSerializerOptions = null) { this._workflow = Throw.IfNull(workflow); this._executionEnvironment = Throw.IfNull(executionEnvironment); + this._includeExceptionDetails = includeExceptionDetails; + this._includeWorkflowOutputsInResponse = includeWorkflowOutputsInResponse; JsonMarshaller marshaller = new(jsonSerializerOptions); ThreadState threadState = marshaller.Marshal(serializedThread); @@ -66,7 +70,7 @@ public WorkflowThread(Workflow workflow, JsonElement serializedThread, IWorkflow this.RunId = threadState.RunId; this.LastCheckpoint = threadState.LastCheckpoint; - this.MessageStore = new WorkflowMessageStore(threadState.MessageStoreState); + this.ChatHistoryProvider = new WorkflowChatHistoryProvider(threadState.ChatHistoryProviderState); } public CheckpointInfo? LastCheckpoint { get; set; } @@ -77,7 +81,7 @@ public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptio ThreadState info = new( this.RunId, this.LastCheckpoint, - this.MessageStore.ExportStoreState(), + this.ChatHistoryProvider.ExportStoreState(), this._inMemoryCheckpointManager); return marshaller.Marshal(info); @@ -96,7 +100,24 @@ public AgentResponseUpdate CreateUpdate(string responseId, object raw, params AI RawRepresentation = raw }; - this.MessageStore.AddMessages(update.ToChatMessage()); + this.ChatHistoryProvider.AddMessages(update.ToChatMessage()); + + return update; + } + + public AgentResponseUpdate CreateUpdate(string responseId, object raw, ChatMessage message) + { + Throw.IfNull(message); + + AgentResponseUpdate update = new(message.Role, message.Contents) + { + CreatedAt = message.CreatedAt ?? DateTimeOffset.UtcNow, + MessageId = message.MessageId ?? Guid.NewGuid().ToString("N"), + ResponseId = responseId, + RawRepresentation = raw + }; + + this.ChatHistoryProvider.AddMessages(update.ToChatMessage()); return update; } @@ -112,7 +133,6 @@ await this._executionEnvironment .ResumeStreamAsync(this._workflow, this.LastCheckpoint, this._checkpointManager, - this.RunId, cancellationToken) .ConfigureAwait(false); @@ -136,7 +156,7 @@ IAsyncEnumerable InvokeStageAsync( try { this.LastResponseId = Guid.NewGuid().ToString("N"); - List messages = this.MessageStore.GetFromBookmark().ToList(); + List messages = this.ChatHistoryProvider.GetFromBookmark().ToList(); #pragma warning disable CA2007 // Analyzer misfiring and not seeing .ConfigureAwait(false) below. await using Checkpointed checkpointed = @@ -184,6 +204,25 @@ IAsyncEnumerable InvokeStageAsync( this.LastCheckpoint = stepCompleted.CompletionInfo?.Checkpoint; goto default; + case WorkflowOutputEvent output: + IEnumerable? updateMessages = output.Data switch + { + IEnumerable chatMessages => chatMessages, + ChatMessage chatMessage => [chatMessage], + _ => null + }; + + if (!this._includeWorkflowOutputsInResponse || updateMessages == null) + { + goto default; + } + + foreach (ChatMessage message in updateMessages) + { + yield return this.CreateUpdate(this.LastResponseId, evt, message); + } + break; + default: // Emit all other workflow events for observability (DevUI, logging, etc.) yield return new AgentResponseUpdate(ChatRole.Assistant, []) @@ -201,7 +240,7 @@ IAsyncEnumerable InvokeStageAsync( finally { // Do we want to try to undo the step, and not update the bookmark? - this.MessageStore.UpdateBookmark(); + this.ChatHistoryProvider.UpdateBookmark(); } } @@ -210,17 +249,17 @@ IAsyncEnumerable InvokeStageAsync( public string RunId { get; } /// - public WorkflowMessageStore MessageStore { get; } + public WorkflowChatHistoryProvider ChatHistoryProvider { get; } internal sealed class ThreadState( string runId, CheckpointInfo? lastCheckpoint, - WorkflowMessageStore.StoreState messageStoreState, + WorkflowChatHistoryProvider.StoreState chatHistoryProviderState, InMemoryCheckpointManager? checkpointManager = null) { public string RunId { get; } = runId; public CheckpointInfo? LastCheckpoint { get; } = lastCheckpoint; - public WorkflowMessageStore.StoreState MessageStoreState { get; } = messageStoreState; + public WorkflowChatHistoryProvider.StoreState ChatHistoryProviderState { get; } = chatHistoryProviderState; public InMemoryCheckpointManager? CheckpointManager { get; } = checkpointManager; } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowsJsonUtilities.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowsJsonUtilities.cs index d8241f4681..f27b2d9299 100644 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowsJsonUtilities.cs +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/WorkflowsJsonUtilities.cs @@ -83,7 +83,7 @@ private static JsonSerializerOptions CreateDefaultOptions() [JsonSerializable(typeof(EdgeConnection))] // Workflow-as-Agent - [JsonSerializable(typeof(WorkflowMessageStore.StoreState))] + [JsonSerializable(typeof(WorkflowChatHistoryProvider.StoreState))] [JsonSerializable(typeof(WorkflowThread.ThreadState))] // Message Types diff --git a/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs b/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs index 097b789a84..07247b059d 100644 --- a/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI/AgentExtensions.cs @@ -73,7 +73,12 @@ async Task InvokeAgentAsync( [Description("Input query to invoke the agent.")] string query, CancellationToken cancellationToken) { - var response = await agent.RunAsync(query, thread: thread, cancellationToken: cancellationToken).ConfigureAwait(false); + // Propagate any additional properties from the parent agent's run to the child agent if the parent is using a FunctionInvokingChatClient. + AgentRunOptions? agentRunOptions = FunctionInvokingChatClient.CurrentContext?.Options?.AdditionalProperties is AdditionalPropertiesDictionary dict + ? new AgentRunOptions { AdditionalProperties = dict } + : null; + + var response = await agent.RunAsync(query, thread: thread, options: agentRunOptions, cancellationToken: cancellationToken).ConfigureAwait(false); return response.Text; } diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs index 4a42241b3c..3b416c8979 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgent.cs @@ -78,7 +78,7 @@ public ChatClientAgent(IChatClient chatClient, string? instructions = null, stri /// The chat client to use when running the agent. /// /// Configuration options that control all aspects of the agent's behavior, including chat settings, - /// message store factories, context provider factories, and other advanced configurations. + /// chat history provider factories, context provider factories, and other advanced configurations. /// /// /// Optional logger factory for creating loggers used by the agent and its components. @@ -208,7 +208,7 @@ protected override async IAsyncEnumerable RunCoreStreamingA ChatOptions? chatOptions, List inputMessagesForChatClient, IList? aiContextProviderMessages, - IList? chatMessageStoreMessages, + IList? chatHistoryProviderMessages, ChatClientAgentContinuationToken? continuationToken) = await this.PrepareThreadAndMessagesAsync(thread, inputMessages, options, cancellationToken).ConfigureAwait(false); @@ -231,7 +231,7 @@ protected override async IAsyncEnumerable RunCoreStreamingA } catch (Exception ex) { - await NotifyMessageStoreOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyChatHistoryProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatHistoryProviderMessages, aiContextProviderMessages, chatOptions, cancellationToken).ConfigureAwait(false); await NotifyAIContextProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, cancellationToken).ConfigureAwait(false); throw; } @@ -246,7 +246,7 @@ protected override async IAsyncEnumerable RunCoreStreamingA } catch (Exception ex) { - await NotifyMessageStoreOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyChatHistoryProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatHistoryProviderMessages, aiContextProviderMessages, chatOptions, cancellationToken).ConfigureAwait(false); await NotifyAIContextProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, cancellationToken).ConfigureAwait(false); throw; } @@ -273,7 +273,7 @@ protected override async IAsyncEnumerable RunCoreStreamingA } catch (Exception ex) { - await NotifyMessageStoreOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, cancellationToken).ConfigureAwait(false); + await NotifyChatHistoryProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), chatHistoryProviderMessages, aiContextProviderMessages, chatOptions, cancellationToken).ConfigureAwait(false); await NotifyAIContextProviderOfFailureAsync(safeThread, ex, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, cancellationToken).ConfigureAwait(false); throw; } @@ -286,7 +286,7 @@ protected override async IAsyncEnumerable RunCoreStreamingA await this.UpdateThreadWithTypeAndConversationIdAsync(safeThread, chatResponse.ConversationId, cancellationToken).ConfigureAwait(false); // To avoid inconsistent state we only notify the thread of the input messages if no error occurs after the initial request. - await NotifyMessageStoreOfNewMessagesAsync(safeThread, GetInputMessages(inputMessages, continuationToken), chatMessageStoreMessages, aiContextProviderMessages, chatResponse.Messages, cancellationToken).ConfigureAwait(false); + await NotifyChatHistoryProviderOfNewMessagesAsync(safeThread, GetInputMessages(inputMessages, continuationToken), chatHistoryProviderMessages, aiContextProviderMessages, chatResponse.Messages, chatOptions, cancellationToken).ConfigureAwait(false); // Notify the AIContextProvider of all new messages. await NotifyAIContextProviderOfSuccessAsync(safeThread, GetInputMessages(inputMessages, continuationToken), aiContextProviderMessages, chatResponse.Messages, cancellationToken).ConfigureAwait(false); @@ -304,8 +304,8 @@ protected override async IAsyncEnumerable RunCoreStreamingA /// public override async ValueTask GetNewThreadAsync(CancellationToken cancellationToken = default) { - ChatMessageStore? messageStore = this._agentOptions?.ChatMessageStoreFactory is not null - ? await this._agentOptions.ChatMessageStoreFactory.Invoke(new() { SerializedState = default, JsonSerializerOptions = null }, cancellationToken).ConfigureAwait(false) + ChatHistoryProvider? chatHistoryProvider = this._agentOptions?.ChatHistoryProviderFactory is not null + ? await this._agentOptions.ChatHistoryProviderFactory.Invoke(new() { SerializedState = default, JsonSerializerOptions = null }, cancellationToken).ConfigureAwait(false) : null; AIContextProvider? contextProvider = this._agentOptions?.AIContextProviderFactory is not null @@ -314,7 +314,7 @@ public override async ValueTask GetNewThreadAsync(CancellationToken return new ChatClientAgentThread { - MessageStore = messageStore, + ChatHistoryProvider = chatHistoryProvider, AIContextProvider = contextProvider }; } @@ -329,8 +329,8 @@ public override async ValueTask GetNewThreadAsync(CancellationToken /// /// /// - /// This method creates threads that rely on server-side conversation storage, where the chat history - /// is maintained by the underlying AI service rather than in local message stores. + /// This method creates an that relies on server-side chat history storage, where the chat history + /// is maintained by the underlying AI service rather than by a local . /// /// /// Agent threads created with this method will only work with @@ -351,28 +351,28 @@ public async ValueTask GetNewThreadAsync(string conversationId, Can } /// - /// Creates a new agent thread instance using an existing to continue a conversation. + /// Creates a new agent thread instance using an existing to continue a conversation. /// - /// The instance to use for managing the conversation's message history. + /// The instance to use for managing the conversation's message history. /// The to monitor for cancellation requests. /// - /// A value task representing the asynchronous operation. The task result contains a new instance configured to work with the provided . + /// A value task representing the asynchronous operation. The task result contains a new instance configured to work with the provided . /// /// /// /// This method creates threads that do not support server-side conversation storage. /// Some AI services require server-side conversation storage to function properly, and creating a thread - /// with a may not be compatible with these services. + /// with a may not be compatible with these services. /// /// /// Where a service requires server-side conversation storage, use . /// /// /// If the agent detects, during the first run, that the underlying AI service requires server-side conversation storage, - /// the thread will throw an exception to indicate that it cannot continue using the provided . + /// the thread will throw an exception to indicate that it cannot continue using the provided . /// /// - public async ValueTask GetNewThreadAsync(ChatMessageStore chatMessageStore, CancellationToken cancellationToken = default) + public async ValueTask GetNewThreadAsync(ChatHistoryProvider chatHistoryProvider, CancellationToken cancellationToken = default) { AIContextProvider? contextProvider = this._agentOptions?.AIContextProviderFactory is not null ? await this._agentOptions.AIContextProviderFactory.Invoke(new() { SerializedState = default, JsonSerializerOptions = null }, cancellationToken).ConfigureAwait(false) @@ -380,7 +380,7 @@ public async ValueTask GetNewThreadAsync(ChatMessageStore chatMessa return new ChatClientAgentThread() { - MessageStore = Throw.IfNull(chatMessageStore), + ChatHistoryProvider = Throw.IfNull(chatHistoryProvider), AIContextProvider = contextProvider }; } @@ -388,9 +388,9 @@ public async ValueTask GetNewThreadAsync(ChatMessageStore chatMessa /// public override async ValueTask DeserializeThreadAsync(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null, CancellationToken cancellationToken = default) { - Func>? chatMessageStoreFactory = this._agentOptions?.ChatMessageStoreFactory is null ? + Func>? chatHistoryProviderFactory = this._agentOptions?.ChatHistoryProviderFactory is null ? null : - (jse, jso, ct) => this._agentOptions.ChatMessageStoreFactory.Invoke(new() { SerializedState = jse, JsonSerializerOptions = jso }, ct); + (jse, jso, ct) => this._agentOptions.ChatHistoryProviderFactory.Invoke(new() { SerializedState = jse, JsonSerializerOptions = jso }, ct); Func>? aiContextProviderFactory = this._agentOptions?.AIContextProviderFactory is null ? null : @@ -399,7 +399,7 @@ public override async ValueTask DeserializeThreadAsync(JsonElement return await ChatClientAgentThread.DeserializeAsync( serializedThread, jsonSerializerOptions, - chatMessageStoreFactory, + chatHistoryProviderFactory, aiContextProviderFactory, cancellationToken).ConfigureAwait(false); } @@ -422,7 +422,7 @@ private async Task RunCoreAsync inputMessagesForChatClient, IList? aiContextProviderMessages, - IList? chatMessageStoreMessages, + IList? chatHistoryProviderMessages, ChatClientAgentContinuationToken? _) = await this.PrepareThreadAndMessagesAsync(thread, inputMessages, options, cancellationToken).ConfigureAwait(false); @@ -442,7 +442,7 @@ private async Task RunCoreAsync RunCoreAsync InputMessagesForChatClient, IList? AIContextProviderMessages, - IList? ChatMessageStoreMessages, + IList? ChatHistoryProviderMessages, ChatClientAgentContinuationToken? ContinuationToken )> PrepareThreadAndMessagesAsync( AgentThread? thread, @@ -703,18 +703,20 @@ private async Task List inputMessagesForChatClient = []; IList? aiContextProviderMessages = null; - IList? chatMessageStoreMessages = []; + IList? chatHistoryProviderMessages = null; // Populate the thread messages only if we are not continuing an existing response as it's not allowed if (chatOptions?.ContinuationToken is null) { + ChatHistoryProvider? chatHistoryProvider = ResolveChatHistoryProvider(typedThread, chatOptions); + // Add any existing messages from the thread to the messages to be sent to the chat client. - if (typedThread.MessageStore is not null) + if (chatHistoryProvider is not null) { - var invokingContext = new ChatMessageStore.InvokingContext(inputMessages); - var storeMessages = await typedThread.MessageStore.InvokingAsync(invokingContext, cancellationToken).ConfigureAwait(false); - inputMessagesForChatClient.AddRange(storeMessages); - chatMessageStoreMessages = storeMessages as IList ?? storeMessages.ToList(); + var invokingContext = new ChatHistoryProvider.InvokingContext(inputMessages); + var providerMessages = await chatHistoryProvider.InvokingAsync(invokingContext, cancellationToken).ConfigureAwait(false); + inputMessagesForChatClient.AddRange(providerMessages); + chatHistoryProviderMessages = providerMessages as IList ?? providerMessages.ToList(); } // Add the input messages before getting context from AIContextProvider. @@ -768,7 +770,7 @@ private async Task chatOptions.ConversationId = typedThread.ConversationId; } - return (typedThread, chatOptions, inputMessagesForChatClient, aiContextProviderMessages, chatMessageStoreMessages, continuationToken); + return (typedThread, chatOptions, inputMessagesForChatClient, aiContextProviderMessages, chatHistoryProviderMessages, continuationToken); } private async Task UpdateThreadWithTypeAndConversationIdAsync(ChatClientAgentThread thread, string? responseConversationId, CancellationToken cancellationToken) @@ -789,65 +791,80 @@ private async Task UpdateThreadWithTypeAndConversationIdAsync(ChatClientAgentThr else { // If the service doesn't use service side chat history storage (i.e. we got no id back from invocation), and - // the thread has no MessageStore yet, we should update the thread with the custom MessageStore or - // default InMemoryMessageStore so that it has somewhere to store the chat history. - thread.MessageStore ??= this._agentOptions?.ChatMessageStoreFactory is not null - ? await this._agentOptions.ChatMessageStoreFactory.Invoke(new() { SerializedState = default, JsonSerializerOptions = null }, cancellationToken).ConfigureAwait(false) - : new InMemoryChatMessageStore(); + // the thread has no ChatHistoryProvider yet, we should update the thread with the custom ChatHistoryProvider or + // default InMemoryChatHistoryProvider so that it has somewhere to store the chat history. + thread.ChatHistoryProvider ??= this._agentOptions?.ChatHistoryProviderFactory is not null + ? await this._agentOptions.ChatHistoryProviderFactory.Invoke(new() { SerializedState = default, JsonSerializerOptions = null }, cancellationToken).ConfigureAwait(false) + : new InMemoryChatHistoryProvider(); } } - private static Task NotifyMessageStoreOfFailureAsync( + private static Task NotifyChatHistoryProviderOfFailureAsync( ChatClientAgentThread thread, Exception ex, IEnumerable requestMessages, - IEnumerable? chatMessageStoreMessages, + IEnumerable? chatHistoryProviderMessages, IEnumerable? aiContextProviderMessages, + ChatOptions? chatOptions, CancellationToken cancellationToken) { - var messageStore = thread.MessageStore; + ChatHistoryProvider? provider = ResolveChatHistoryProvider(thread, chatOptions); - // Only notify the message store if we have one. + // Only notify the provider if we have one. // If we don't have one, it means that the chat history is service managed and the underlying service is responsible for storing messages. - if (messageStore is not null) + if (provider is not null) { - var invokedContext = new ChatMessageStore.InvokedContext(requestMessages, chatMessageStoreMessages!) + var invokedContext = new ChatHistoryProvider.InvokedContext(requestMessages, chatHistoryProviderMessages!) { AIContextProviderMessages = aiContextProviderMessages, InvokeException = ex }; - return messageStore.InvokedAsync(invokedContext, cancellationToken).AsTask(); + return provider.InvokedAsync(invokedContext, cancellationToken).AsTask(); } return Task.CompletedTask; } - private static Task NotifyMessageStoreOfNewMessagesAsync( + private static Task NotifyChatHistoryProviderOfNewMessagesAsync( ChatClientAgentThread thread, IEnumerable requestMessages, - IEnumerable? chatMessageStoreMessages, + IEnumerable? chatHistoryProviderMessages, IEnumerable? aiContextProviderMessages, IEnumerable responseMessages, + ChatOptions? chatOptions, CancellationToken cancellationToken) { - var messageStore = thread.MessageStore; + ChatHistoryProvider? provider = ResolveChatHistoryProvider(thread, chatOptions); - // Only notify the message store if we have one. + // Only notify the provider if we have one. // If we don't have one, it means that the chat history is service managed and the underlying service is responsible for storing messages. - if (messageStore is not null) + if (provider is not null) { - var invokedContext = new ChatMessageStore.InvokedContext(requestMessages, chatMessageStoreMessages!) + var invokedContext = new ChatHistoryProvider.InvokedContext(requestMessages, chatHistoryProviderMessages!) { AIContextProviderMessages = aiContextProviderMessages, ResponseMessages = responseMessages }; - return messageStore.InvokedAsync(invokedContext, cancellationToken).AsTask(); + return provider.InvokedAsync(invokedContext, cancellationToken).AsTask(); } return Task.CompletedTask; } + private static ChatHistoryProvider? ResolveChatHistoryProvider(ChatClientAgentThread thread, ChatOptions? chatOptions) + { + ChatHistoryProvider? provider = thread.ChatHistoryProvider; + + // If someone provided an override ChatHistoryProvider via AdditionalProperties, we should use that instead of the one on the thread. + if (chatOptions?.AdditionalProperties?.TryGetValue(out ChatHistoryProvider? overrideProvider) is true) + { + provider = overrideProvider; + } + + return provider; + } + private static ChatClientAgentContinuationToken? WrapContinuationToken(ResponseContinuationToken? continuationToken, IEnumerable? inputMessages = null, List? responseUpdates = null) { if (continuationToken is null) diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs index 719e863f0c..6f8451e2b8 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentOptions.cs @@ -39,10 +39,10 @@ public sealed class ChatClientAgentOptions public ChatOptions? ChatOptions { get; set; } /// - /// Gets or sets a factory function to create an instance of - /// which will be used to store chat messages for this agent. + /// Gets or sets a factory function to create an instance of + /// which will be used to provide chat history for this agent. /// - public Func>? ChatMessageStoreFactory { get; set; } + public Func>? ChatHistoryProviderFactory { get; set; } /// /// Gets or sets a factory function to create an instance of @@ -75,7 +75,7 @@ public ChatClientAgentOptions Clone() Name = this.Name, Description = this.Description, ChatOptions = this.ChatOptions?.Clone(), - ChatMessageStoreFactory = this.ChatMessageStoreFactory, + ChatHistoryProviderFactory = this.ChatHistoryProviderFactory, AIContextProviderFactory = this.AIContextProviderFactory, }; @@ -97,14 +97,14 @@ public sealed class AIContextProviderFactoryContext } /// - /// Context object passed to the to create a new instance of . + /// Context object passed to the to create a new instance of . /// - public sealed class ChatMessageStoreFactoryContext + public sealed class ChatHistoryProviderFactoryContext { /// - /// Gets or sets the serialized state of the chat message store, if any. + /// Gets or sets the serialized state of the , if any. /// - /// if there is no state, e.g. when the is first created. + /// if there is no state, e.g. when the is first created. public JsonElement SerializedState { get; set; } /// diff --git a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs index 06326d1ed2..a604a0d085 100644 --- a/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs +++ b/dotnet/src/Microsoft.Agents.AI/ChatClient/ChatClientAgentThread.cs @@ -15,7 +15,7 @@ namespace Microsoft.Agents.AI; [DebuggerDisplay("{DebuggerDisplay,nq}")] public sealed class ChatClientAgentThread : AgentThread { - private ChatMessageStore? _messageStore; + private ChatHistoryProvider? _chatHistoryProvider; /// /// Initializes a new instance of the class. @@ -29,14 +29,14 @@ internal ChatClientAgentThread() /// /// /// - /// Note that either or may be set, but not both. - /// If is not null, setting will throw an + /// Note that either or may be set, but not both. + /// If is not null, setting will throw an /// exception. /// /// /// This property may be null in the following cases: /// - /// The thread stores messages via the and not in the agent service. + /// The thread stores messages via the and not in the agent service. /// This thread object is new and a server managed thread has not yet been created in the agent service. /// /// @@ -46,7 +46,7 @@ internal ChatClientAgentThread() /// to fork the thread with each iteration. /// /// - /// Attempted to set a conversation ID but a is already set. + /// Attempted to set a conversation ID but a is already set. public string? ConversationId { get; @@ -57,12 +57,12 @@ internal set return; } - if (this._messageStore is not null) + if (this._chatHistoryProvider is not null) { - // If we have a message store already, we shouldn't switch the thread to use a conversation id + // If we have a ChatHistoryProvider already, we shouldn't switch the thread to use a conversation id // since it means that the thread contents will essentially be deleted, and the thread will not work // with the original agent anymore. - throw new InvalidOperationException("Only the ConversationId or MessageStore may be set, but not both and switching from one to another is not supported."); + throw new InvalidOperationException("Only the ConversationId or ChatHistoryProvider may be set, but not both and switching from one to another is not supported."); } field = Throw.IfNullOrWhitespace(value); @@ -70,40 +70,40 @@ internal set } /// - /// Gets or sets the used by this thread, for cases where messages should be stored in a custom location. + /// Gets or sets the used by this thread, for cases where messages should be stored in a custom location. /// /// /// - /// Note that either or may be set, but not both. - /// If is not null, and is set, + /// Note that either or may be set, but not both. + /// If is not null, and is set, /// will be reverted to null, and vice versa. /// /// /// This property may be null in the following cases: /// - /// The thread stores messages in the agent service and just has an id to the remove thread, instead of in an . - /// This thread object is new it is not yet clear whether it will be backed by a server managed thread or an . + /// The thread stores messages in the agent service and just has an id to the remove thread, instead of in an . + /// This thread object is new it is not yet clear whether it will be backed by a server managed thread or an . /// /// /// - public ChatMessageStore? MessageStore + public ChatHistoryProvider? ChatHistoryProvider { - get => this._messageStore; + get => this._chatHistoryProvider; internal set { - if (this._messageStore is null && value is null) + if (this._chatHistoryProvider is null && value is null) { return; } if (!string.IsNullOrWhiteSpace(this.ConversationId)) { - // If we have a conversation id already, we shouldn't switch the thread to use a message store + // If we have a conversation id already, we shouldn't switch the thread to use a ChatHistoryProvider // since it means that the thread will not work with the original agent anymore. - throw new InvalidOperationException("Only the ConversationId or MessageStore may be set, but not both and switching from one to another is not supported."); + throw new InvalidOperationException("Only the ConversationId or ChatHistoryProvider may be set, but not both and switching from one to another is not supported."); } - this._messageStore = Throw.IfNull(value); + this._chatHistoryProvider = Throw.IfNull(value); } } @@ -117,9 +117,9 @@ internal set /// /// A representing the serialized state of the thread. /// Optional settings for customizing the JSON deserialization process. - /// - /// An optional factory function to create a custom from its serialized state. - /// If not provided, the default in-memory message store will be used. + /// + /// An optional factory function to create a custom from its serialized state. + /// If not provided, the default will be used. /// /// /// An optional factory function to create a custom from its serialized state. @@ -130,7 +130,7 @@ internal set internal static async Task DeserializeAsync( JsonElement serializedThreadState, JsonSerializerOptions? jsonSerializerOptions = null, - Func>? chatMessageStoreFactory = null, + Func>? chatHistoryProviderFactory = null, Func>? aiContextProviderFactory = null, CancellationToken cancellationToken = default) { @@ -152,14 +152,14 @@ internal static async Task DeserializeAsync( { thread.ConversationId = threadId; - // Since we have an ID, we should not have a chat message store and we can return here. + // Since we have an ID, we should not have a ChatHistoryProvider and we can return here. return thread; } - thread._messageStore = - chatMessageStoreFactory is not null - ? await chatMessageStoreFactory.Invoke(state?.StoreState ?? default, jsonSerializerOptions, cancellationToken).ConfigureAwait(false) - : new InMemoryChatMessageStore(state?.StoreState ?? default, jsonSerializerOptions); // default to an in-memory store + thread._chatHistoryProvider = + chatHistoryProviderFactory is not null + ? await chatHistoryProviderFactory.Invoke(state?.ChatHistoryProviderState ?? default, jsonSerializerOptions, cancellationToken).ConfigureAwait(false) + : new InMemoryChatHistoryProvider(state?.ChatHistoryProviderState ?? default, jsonSerializerOptions); // default to an in-memory ChatHistoryProvider return thread; } @@ -167,14 +167,14 @@ chatMessageStoreFactory is not null /// public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptions = null) { - JsonElement? storeState = this._messageStore?.Serialize(jsonSerializerOptions); + JsonElement? chatHistoryProviderState = this._chatHistoryProvider?.Serialize(jsonSerializerOptions); JsonElement? aiContextProviderState = this.AIContextProvider?.Serialize(jsonSerializerOptions); var state = new ThreadState { ConversationId = this.ConversationId, - StoreState = storeState is { ValueKind: not JsonValueKind.Undefined } ? storeState : null, + ChatHistoryProviderState = chatHistoryProviderState is { ValueKind: not JsonValueKind.Undefined } ? chatHistoryProviderState : null, AIContextProviderState = aiContextProviderState is { ValueKind: not JsonValueKind.Undefined } ? aiContextProviderState : null, }; @@ -185,20 +185,20 @@ public override JsonElement Serialize(JsonSerializerOptions? jsonSerializerOptio public override object? GetService(Type serviceType, object? serviceKey = null) => base.GetService(serviceType, serviceKey) ?? this.AIContextProvider?.GetService(serviceType, serviceKey) - ?? this.MessageStore?.GetService(serviceType, serviceKey); + ?? this.ChatHistoryProvider?.GetService(serviceType, serviceKey); [DebuggerBrowsable(DebuggerBrowsableState.Never)] private string DebuggerDisplay => this.ConversationId is { } conversationId ? $"ConversationId = {conversationId}" : - this._messageStore is InMemoryChatMessageStore inMemoryStore ? $"Count = {inMemoryStore.Count}" : - this._messageStore is { } store ? $"Store = {store.GetType().Name}" : + this._chatHistoryProvider is InMemoryChatHistoryProvider inMemoryChatHistoryProvider ? $"Count = {inMemoryChatHistoryProvider.Count}" : + this._chatHistoryProvider is { } chatHistoryProvider ? $"ChatHistoryProvider = {chatHistoryProvider.GetType().Name}" : "Count = 0"; internal sealed class ThreadState { public string? ConversationId { get; set; } - public JsonElement? StoreState { get; set; } + public JsonElement? ChatHistoryProviderState { get; set; } public JsonElement? AIContextProviderState { get; set; } } diff --git a/dotnet/src/Shared/Workflows/Execution/WorkflowRunner.cs b/dotnet/src/Shared/Workflows/Execution/WorkflowRunner.cs index b8666451f6..380ea5eaeb 100644 --- a/dotnet/src/Shared/Workflows/Execution/WorkflowRunner.cs +++ b/dotnet/src/Shared/Workflows/Execution/WorkflowRunner.cs @@ -95,7 +95,7 @@ public async Task ExecuteAsync(Func workflowProvider, string input) Debug.WriteLine($"RESTORE #{this.LastCheckpoint.CheckpointId}"); Notify("WORKFLOW: Restore", ConsoleColor.DarkYellow); - run = await InProcessExecution.ResumeStreamAsync(workflow, this.LastCheckpoint, checkpointManager, run.Run.RunId).ConfigureAwait(false); + run = await InProcessExecution.ResumeStreamAsync(workflow, this.LastCheckpoint, checkpointManager).ConfigureAwait(false); } else { diff --git a/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs b/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs index 2bec0b366e..e9a3cba95e 100644 --- a/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs +++ b/dotnet/tests/AnthropicChatCompletion.IntegrationTests/AnthropicChatCompletionFixture.cs @@ -39,12 +39,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) { var typedThread = (ChatClientAgentThread)thread; - if (typedThread.MessageStore is null) + if (typedThread.ChatHistoryProvider is null) { return []; } - return (await typedThread.MessageStore.InvokingAsync(new([]))).ToList(); + return (await typedThread.ChatHistoryProvider.InvokingAsync(new([]))).ToList(); } public Task CreateChatClientAgentAsync( diff --git a/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientCreateTests.cs b/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientCreateTests.cs index f626736418..d70d3d949d 100644 --- a/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientCreateTests.cs +++ b/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientCreateTests.cs @@ -22,9 +22,7 @@ public class AIProjectClientCreateTests [Theory] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] [InlineData("CreateWithFoundryOptionsAsync")] - [InlineData("CreateWithFoundryOptionsSync")] public async Task CreateAgent_CreatesAgentWithCorrectMetadataAsync(string createMechanism) { // Arrange. @@ -43,20 +41,9 @@ public async Task CreateAgent_CreatesAgentWithCorrectMetadataAsync(string create Description = AgentDescription, ChatOptions = new() { Instructions = AgentInstructions } }), - "CreateWithChatClientAgentOptionsSync" => this._client.CreateAIAgent( - model: s_config.DeploymentName, - options: new ChatClientAgentOptions() - { - Name = AgentName, - Description = AgentDescription, - ChatOptions = new() { Instructions = AgentInstructions } - }), "CreateWithFoundryOptionsAsync" => await this._client.CreateAIAgentAsync( name: AgentName, creationOptions: new AgentVersionCreationOptions(new PromptAgentDefinition(s_config.DeploymentName) { Instructions = AgentInstructions }) { Description = AgentDescription }), - "CreateWithFoundryOptionsSync" => this._client.CreateAIAgent( - name: AgentName, - creationOptions: new AgentVersionCreationOptions(new PromptAgentDefinition(s_config.DeploymentName) { Instructions = AgentInstructions }) { Description = AgentDescription }), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; @@ -84,9 +71,7 @@ public async Task CreateAgent_CreatesAgentWithCorrectMetadataAsync(string create [Theory(Skip = "For manual testing only")] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] [InlineData("CreateWithFoundryOptionsAsync")] - [InlineData("CreateWithFoundryOptionsSync")] public async Task CreateAgent_CreatesAgentWithVectorStoresAsync(string createMechanism) { // Arrange. @@ -120,21 +105,11 @@ You are a helpful agent that can help fetch data from files you know about. name: AgentName, instructions: AgentInstructions, tools: [new HostedFileSearchTool() { Inputs = [new HostedVectorStoreContent(vectorStoreMetadata.Value.Id)] }]), - "CreateWithChatClientAgentOptionsSync" => this._client.CreateAIAgent( - model: s_config.DeploymentName, - name: AgentName, - instructions: AgentInstructions, - tools: [new HostedFileSearchTool() { Inputs = [new HostedVectorStoreContent(vectorStoreMetadata.Value.Id)] }]), "CreateWithFoundryOptionsAsync" => await this._client.CreateAIAgentAsync( model: s_config.DeploymentName, name: AgentName, instructions: AgentInstructions, tools: [ResponseTool.CreateFileSearchTool(vectorStoreIds: [vectorStoreMetadata.Value.Id]).AsAITool()]), - "CreateWithFoundryOptionsSync" => this._client.CreateAIAgent( - model: s_config.DeploymentName, - name: AgentName, - instructions: AgentInstructions, - tools: [ResponseTool.CreateFileSearchTool(vectorStoreIds: [vectorStoreMetadata.Value.Id]).AsAITool()]), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; @@ -157,9 +132,7 @@ You are a helpful agent that can help fetch data from files you know about. [Theory] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] [InlineData("CreateWithFoundryOptionsAsync")] - [InlineData("CreateWithFoundryOptionsSync")] public async Task CreateAgent_CreatesAgentWithCodeInterpreterAsync(string createMechanism) { // Arrange. @@ -192,22 +165,12 @@ and report the SECRET_NUMBER value it prints. Respond only with the number. name: AgentName, instructions: AgentInstructions, tools: [new HostedCodeInterpreterTool() { Inputs = [new HostedFileContent(uploadedCodeFile.Id)] }]), - "CreateWithChatClientAgentOptionsSync" => this._client.CreateAIAgent( - model: s_config.DeploymentName, - name: AgentName, - instructions: AgentInstructions, - tools: [new HostedCodeInterpreterTool() { Inputs = [new HostedFileContent(uploadedCodeFile.Id)] }]), // Foundry (definitions + resources provided directly) "CreateWithFoundryOptionsAsync" => await this._client.CreateAIAgentAsync( model: s_config.DeploymentName, name: AgentName, instructions: AgentInstructions, tools: [ResponseTool.CreateCodeInterpreterTool(new CodeInterpreterToolContainer(CodeInterpreterToolContainerConfiguration.CreateAutomaticContainerConfiguration([uploadedCodeFile.Id]))).AsAITool()]), - "CreateWithFoundryOptionsSync" => this._client.CreateAIAgent( - model: s_config.DeploymentName, - name: AgentName, - instructions: AgentInstructions, - tools: [ResponseTool.CreateCodeInterpreterTool(new CodeInterpreterToolContainer(CodeInterpreterToolContainerConfiguration.CreateAutomaticContainerConfiguration([uploadedCodeFile.Id]))).AsAITool()]), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; @@ -229,7 +192,6 @@ and report the SECRET_NUMBER value it prints. Respond only with the number. [Theory] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] public async Task CreateAgent_CreatesAgentWithAIFunctionToolsAsync(string createMechanism) { // Arrange. @@ -248,13 +210,6 @@ public async Task CreateAgent_CreatesAgentWithAIFunctionToolsAsync(string create Name = AgentName, ChatOptions = new() { Instructions = AgentInstructions, Tools = [weatherFunction] } }), - "CreateWithChatClientAgentOptionsSync" => this._client.CreateAIAgent( - s_config.DeploymentName, - options: new ChatClientAgentOptions() - { - Name = AgentName, - ChatOptions = new() { Instructions = AgentInstructions, Tools = [weatherFunction] } - }), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; diff --git a/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs b/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs index ddb015eb17..41bbe82c5d 100644 --- a/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs +++ b/dotnet/tests/AzureAI.IntegrationTests/AIProjectClientFixture.cs @@ -48,12 +48,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) return await this.GetChatHistoryFromResponsesChainAsync(chatClientThread.ConversationId); } - if (chatClientThread.MessageStore is null) + if (chatClientThread.ChatHistoryProvider is null) { return []; } - return (await chatClientThread.MessageStore.InvokingAsync(new([]))).ToList(); + return (await chatClientThread.ChatHistoryProvider.InvokingAsync(new([]))).ToList(); } private async Task> GetChatHistoryFromResponsesChainAsync(string conversationId) diff --git a/dotnet/tests/AzureAIAgentsPersistent.IntegrationTests/AzureAIAgentsPersistentCreateTests.cs b/dotnet/tests/AzureAIAgentsPersistent.IntegrationTests/AzureAIAgentsPersistentCreateTests.cs index a10cc11d79..05b87539da 100644 --- a/dotnet/tests/AzureAIAgentsPersistent.IntegrationTests/AzureAIAgentsPersistentCreateTests.cs +++ b/dotnet/tests/AzureAIAgentsPersistent.IntegrationTests/AzureAIAgentsPersistentCreateTests.cs @@ -20,9 +20,7 @@ public class AzureAIAgentsPersistentCreateTests [Theory] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] [InlineData("CreateWithFoundryOptionsAsync")] - [InlineData("CreateWithFoundryOptionsSync")] public async Task CreateAgent_CreatesAgentWithCorrectMetadataAsync(string createMechanism) { // Arrange. @@ -41,24 +39,11 @@ public async Task CreateAgent_CreatesAgentWithCorrectMetadataAsync(string create Name = AgentName, Description = AgentDescription }), - "CreateWithChatClientAgentOptionsSync" => this._persistentAgentsClient.CreateAIAgent( - s_config.DeploymentName, - options: new ChatClientAgentOptions() - { - ChatOptions = new() { Instructions = AgentInstructions }, - Name = AgentName, - Description = AgentDescription - }), "CreateWithFoundryOptionsAsync" => await this._persistentAgentsClient.CreateAIAgentAsync( s_config.DeploymentName, instructions: AgentInstructions, name: AgentName, description: AgentDescription), - "CreateWithFoundryOptionsSync" => this._persistentAgentsClient.CreateAIAgent( - s_config.DeploymentName, - instructions: AgentInstructions, - name: AgentName, - description: AgentDescription), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; @@ -85,9 +70,7 @@ public async Task CreateAgent_CreatesAgentWithCorrectMetadataAsync(string create [Theory(Skip = "For manual testing only")] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] [InlineData("CreateWithFoundryOptionsAsync")] - [InlineData("CreateWithFoundryOptionsSync")] public async Task CreateAgent_CreatesAgentWithVectorStoresAsync(string createMechanism) { // Arrange. @@ -125,26 +108,11 @@ You are a helpful agent that can help fetch data from files you know about. Tools = [new HostedFileSearchTool() { Inputs = [new HostedVectorStoreContent(vectorStoreMetadata.Value.Id)] }] } }), - "CreateWithChatClientAgentOptionsSync" => this._persistentAgentsClient.CreateAIAgent( - s_config.DeploymentName, - options: new ChatClientAgentOptions() - { - ChatOptions = new() - { - Instructions = AgentInstructions, - Tools = [new HostedFileSearchTool() { Inputs = [new HostedVectorStoreContent(vectorStoreMetadata.Value.Id)] }] - } - }), "CreateWithFoundryOptionsAsync" => await this._persistentAgentsClient.CreateAIAgentAsync( s_config.DeploymentName, instructions: AgentInstructions, tools: [new FileSearchToolDefinition()], toolResources: new ToolResources() { FileSearch = new([vectorStoreMetadata.Value.Id], null) }), - "CreateWithFoundryOptionsSync" => this._persistentAgentsClient.CreateAIAgent( - s_config.DeploymentName, - instructions: AgentInstructions, - tools: [new FileSearchToolDefinition()], - toolResources: new ToolResources() { FileSearch = new([vectorStoreMetadata.Value.Id], null) }), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; @@ -167,9 +135,7 @@ You are a helpful agent that can help fetch data from files you know about. [Theory] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] [InlineData("CreateWithFoundryOptionsAsync")] - [InlineData("CreateWithFoundryOptionsSync")] public async Task CreateAgent_CreatesAgentWithCodeInterpreterAsync(string createMechanism) { // Arrange. @@ -205,26 +171,11 @@ and report the SECRET_NUMBER value it prints. Respond only with the number. Tools = [new HostedCodeInterpreterTool() { Inputs = [new HostedFileContent(uploadedCodeFile.Id)] }] } }), - "CreateWithChatClientAgentOptionsSync" => this._persistentAgentsClient.CreateAIAgent( - s_config.DeploymentName, - options: new ChatClientAgentOptions() - { - ChatOptions = new() - { - Instructions = AgentInstructions, - Tools = [new HostedCodeInterpreterTool() { Inputs = [new HostedFileContent(uploadedCodeFile.Id)] }] - } - }), "CreateWithFoundryOptionsAsync" => await this._persistentAgentsClient.CreateAIAgentAsync( s_config.DeploymentName, instructions: AgentInstructions, tools: [new CodeInterpreterToolDefinition()], toolResources: new ToolResources() { CodeInterpreter = toolResource }), - "CreateWithFoundryOptionsSync" => this._persistentAgentsClient.CreateAIAgent( - s_config.DeploymentName, - instructions: AgentInstructions, - tools: [new CodeInterpreterToolDefinition()], - toolResources: new ToolResources() { CodeInterpreter = toolResource }), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; @@ -246,7 +197,6 @@ and report the SECRET_NUMBER value it prints. Respond only with the number. [Theory] [InlineData("CreateWithChatClientAgentOptionsAsync")] - [InlineData("CreateWithChatClientAgentOptionsSync")] public async Task CreateAgent_CreatesAgentWithAIFunctionToolsAsync(string createMechanism) { // Arrange. @@ -267,16 +217,6 @@ public async Task CreateAgent_CreatesAgentWithAIFunctionToolsAsync(string create Tools = [weatherFunction] } }), - "CreateWithChatClientAgentOptionsSync" => this._persistentAgentsClient.CreateAIAgent( - s_config.DeploymentName, - options: new ChatClientAgentOptions() - { - ChatOptions = new() - { - Instructions = AgentInstructions, - Tools = [weatherFunction] - } - }), _ => throw new InvalidOperationException($"Unknown create mechanism: {createMechanism}") }; diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AdditionalPropertiesExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AdditionalPropertiesExtensionsTests.cs new file mode 100644 index 0000000000..86ce4f187e --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AdditionalPropertiesExtensionsTests.cs @@ -0,0 +1,490 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Abstractions.UnitTests; + +/// +/// Contains tests for the class. +/// +public sealed class AdditionalPropertiesExtensionsTests +{ + #region Add Method Tests + + [Fact] + public void Add_WithValidValue_StoresValueUsingTypeName() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass value = new() { Name = "Test" }; + + // Act + additionalProperties.Add(value); + + // Assert + Assert.True(additionalProperties.ContainsKey(typeof(TestClass).FullName!)); + Assert.Same(value, additionalProperties[typeof(TestClass).FullName!]); + } + + [Fact] + public void Add_WithNullDictionary_ThrowsArgumentNullException() + { + // Arrange + AdditionalPropertiesDictionary? additionalProperties = null; + TestClass value = new() { Name = "Test" }; + + // Act & Assert + Assert.Throws(() => additionalProperties!.Add(value)); + } + + [Fact] + public void Add_WithStringValue_StoresValueCorrectly() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + const string Value = "test string"; + + // Act + additionalProperties.Add(Value); + + // Assert + Assert.True(additionalProperties.ContainsKey(typeof(string).FullName!)); + Assert.Equal(Value, additionalProperties[typeof(string).FullName!]); + } + + [Fact] + public void Add_WithIntValue_StoresValueCorrectly() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + const int Value = 42; + + // Act + additionalProperties.Add(Value); + + // Assert + Assert.True(additionalProperties.ContainsKey(typeof(int).FullName!)); + Assert.Equal(Value, additionalProperties[typeof(int).FullName!]); + } + + [Fact] + public void Add_ThrowsArgumentException_WhenSameTypeAddedTwice() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass firstValue = new() { Name = "First" }; + TestClass secondValue = new() { Name = "Second" }; + additionalProperties.Add(firstValue); + + // Act & Assert + Assert.Throws(() => additionalProperties.Add(secondValue)); + } + + [Fact] + public void Add_WithMultipleDifferentTypes_StoresAllValues() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass testClassValue = new() { Name = "Test" }; + AnotherTestClass anotherValue = new() { Id = 123 }; + const string StringValue = "test"; + + // Act + additionalProperties.Add(testClassValue); + additionalProperties.Add(anotherValue); + additionalProperties.Add(StringValue); + + // Assert + Assert.Equal(3, additionalProperties.Count); + Assert.Same(testClassValue, additionalProperties[typeof(TestClass).FullName!]); + Assert.Same(anotherValue, additionalProperties[typeof(AnotherTestClass).FullName!]); + Assert.Equal(StringValue, additionalProperties[typeof(string).FullName!]); + } + + #endregion + + #region TryAdd Method Tests + + [Fact] + public void TryAdd_WithValidValue_ReturnsTrueAndStoresValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass value = new() { Name = "Test" }; + + // Act + bool result = additionalProperties.TryAdd(value); + + // Assert + Assert.True(result); + Assert.True(additionalProperties.ContainsKey(typeof(TestClass).FullName!)); + Assert.Same(value, additionalProperties[typeof(TestClass).FullName!]); + } + + [Fact] + public void TryAdd_WithNullDictionary_ThrowsArgumentNullException() + { + // Arrange + AdditionalPropertiesDictionary? additionalProperties = null; + TestClass value = new() { Name = "Test" }; + + // Act & Assert + Assert.Throws(() => additionalProperties!.TryAdd(value)); + } + + [Fact] + public void TryAdd_WithExistingType_ReturnsFalseAndKeepsOriginalValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass firstValue = new() { Name = "First" }; + TestClass secondValue = new() { Name = "Second" }; + additionalProperties.Add(firstValue); + + // Act + bool result = additionalProperties.TryAdd(secondValue); + + // Assert + Assert.False(result); + Assert.Single(additionalProperties); + Assert.Same(firstValue, additionalProperties[typeof(TestClass).FullName!]); + } + + [Fact] + public void TryAdd_WithStringValue_ReturnsTrueAndStoresValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + const string Value = "test string"; + + // Act + bool result = additionalProperties.TryAdd(Value); + + // Assert + Assert.True(result); + Assert.True(additionalProperties.ContainsKey(typeof(string).FullName!)); + Assert.Equal(Value, additionalProperties[typeof(string).FullName!]); + } + + [Fact] + public void TryAdd_WithIntValue_ReturnsTrueAndStoresValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + const int Value = 42; + + // Act + bool result = additionalProperties.TryAdd(Value); + + // Assert + Assert.True(result); + Assert.True(additionalProperties.ContainsKey(typeof(int).FullName!)); + Assert.Equal(Value, additionalProperties[typeof(int).FullName!]); + } + + [Fact] + public void TryAdd_WithMultipleDifferentTypes_StoresAllValues() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass testClassValue = new() { Name = "Test" }; + AnotherTestClass anotherValue = new() { Id = 123 }; + const string StringValue = "test"; + + // Act + bool result1 = additionalProperties.TryAdd(testClassValue); + bool result2 = additionalProperties.TryAdd(anotherValue); + bool result3 = additionalProperties.TryAdd(StringValue); + + // Assert + Assert.True(result1); + Assert.True(result2); + Assert.True(result3); + Assert.Equal(3, additionalProperties.Count); + Assert.Same(testClassValue, additionalProperties[typeof(TestClass).FullName!]); + Assert.Same(anotherValue, additionalProperties[typeof(AnotherTestClass).FullName!]); + Assert.Equal(StringValue, additionalProperties[typeof(string).FullName!]); + } + + #endregion + + #region TryGetValue Method Tests + + [Fact] + public void TryGetValue_WithExistingValue_ReturnsTrueAndValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass expectedValue = new() { Name = "Test" }; + additionalProperties.Add(expectedValue); + + // Act + bool result = additionalProperties.TryGetValue(out TestClass? actualValue); + + // Assert + Assert.True(result); + Assert.NotNull(actualValue); + Assert.Same(expectedValue, actualValue); + } + + [Fact] + public void TryGetValue_WithNonExistingValue_ReturnsFalseAndNull() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + + // Act + bool result = additionalProperties.TryGetValue(out TestClass? actualValue); + + // Assert + Assert.False(result); + Assert.Null(actualValue); + } + + [Fact] + public void TryGetValue_WithNullDictionary_ThrowsArgumentNullException() + { + // Arrange + AdditionalPropertiesDictionary? additionalProperties = null; + + // Act & Assert + Assert.Throws(() => additionalProperties!.TryGetValue(out _)); + } + + [Fact] + public void TryGetValue_WithStringValue_ReturnsCorrectValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + const string ExpectedValue = "test string"; + additionalProperties.Add(ExpectedValue); + + // Act + bool result = additionalProperties.TryGetValue(out string? actualValue); + + // Assert + Assert.True(result); + Assert.Equal(ExpectedValue, actualValue); + } + + [Fact] + public void TryGetValue_WithIntValue_ReturnsCorrectValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + const int ExpectedValue = 42; + additionalProperties.Add(ExpectedValue); + + // Act + bool result = additionalProperties.TryGetValue(out int actualValue); + + // Assert + Assert.True(result); + Assert.Equal(ExpectedValue, actualValue); + } + + [Fact] + public void TryGetValue_WithWrongType_ReturnsFalse() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass testValue = new() { Name = "Test" }; + additionalProperties.Add(testValue); + + // Act + bool result = additionalProperties.TryGetValue(out AnotherTestClass? actualValue); + + // Assert + Assert.False(result); + Assert.Null(actualValue); + } + + [Fact] + public void TryGetValue_AfterTryAddFails_ReturnsOriginalValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass firstValue = new() { Name = "First" }; + TestClass secondValue = new() { Name = "Second" }; + additionalProperties.Add(firstValue); + additionalProperties.TryAdd(secondValue); + + // Act + bool result = additionalProperties.TryGetValue(out TestClass? actualValue); + + // Assert + Assert.Single(additionalProperties); + Assert.True(result); + Assert.Same(firstValue, actualValue); + } + + #endregion + + #region Contains Method Tests + + [Fact] + public void Contains_WithExistingType_ReturnsTrue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass value = new() { Name = "Test" }; + additionalProperties.Add(value); + + // Act + bool result = additionalProperties.Contains(); + + // Assert + Assert.True(result); + } + + [Fact] + public void Contains_WithNonExistingType_ReturnsFalse() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + + // Act + bool result = additionalProperties.Contains(); + + // Assert + Assert.False(result); + } + + [Fact] + public void Contains_WithNullDictionary_ThrowsArgumentNullException() + { + // Arrange + AdditionalPropertiesDictionary? additionalProperties = null; + + // Act & Assert + Assert.Throws(() => additionalProperties!.Contains()); + } + + [Fact] + public void Contains_WithDifferentType_ReturnsFalse() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass value = new() { Name = "Test" }; + additionalProperties.Add(value); + + // Act + bool result = additionalProperties.Contains(); + + // Assert + Assert.False(result); + } + + [Fact] + public void Contains_AfterRemove_ReturnsFalse() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass value = new() { Name = "Test" }; + additionalProperties.Add(value); + additionalProperties.Remove(); + + // Act + bool result = additionalProperties.Contains(); + + // Assert + Assert.False(result); + } + + #endregion + + #region Remove Method Tests + + [Fact] + public void Remove_WithExistingType_ReturnsTrueAndRemovesValue() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass value = new() { Name = "Test" }; + additionalProperties.Add(value); + + // Act + bool result = additionalProperties.Remove(); + + // Assert + Assert.True(result); + Assert.Empty(additionalProperties); + } + + [Fact] + public void Remove_WithNonExistingType_ReturnsFalse() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + + // Act + bool result = additionalProperties.Remove(); + + // Assert + Assert.False(result); + } + + [Fact] + public void Remove_WithNullDictionary_ThrowsArgumentNullException() + { + // Arrange + AdditionalPropertiesDictionary? additionalProperties = null; + + // Act & Assert + Assert.Throws(() => additionalProperties!.Remove()); + } + + [Fact] + public void Remove_OnlyRemovesSpecifiedType() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass testValue = new() { Name = "Test" }; + AnotherTestClass anotherValue = new() { Id = 123 }; + additionalProperties.Add(testValue); + additionalProperties.Add(anotherValue); + + // Act + bool result = additionalProperties.Remove(); + + // Assert + Assert.True(result); + Assert.Single(additionalProperties); + Assert.False(additionalProperties.Contains()); + Assert.True(additionalProperties.Contains()); + } + + [Fact] + public void Remove_CalledTwice_ReturnsFalseOnSecondCall() + { + // Arrange + AdditionalPropertiesDictionary additionalProperties = new(); + TestClass value = new() { Name = "Test" }; + additionalProperties.Add(value); + + // Act + bool firstResult = additionalProperties.Remove(); + bool secondResult = additionalProperties.Remove(); + + // Assert + Assert.True(firstResult); + Assert.False(secondResult); + } + + #endregion + + #region Test Helper Classes + + private sealed class TestClass + { + public string Name { get; set; } = string.Empty; + } + + private sealed class AnotherTestClass + { + public int Id { get; set; } + } + + #endregion +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentResponseUpdateExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentResponseUpdateExtensionsTests.cs index 2f136066e4..2723ed081a 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentResponseUpdateExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentResponseUpdateExtensionsTests.cs @@ -42,9 +42,9 @@ public async Task ToAgentResponseSuccessfullyCreatesResponseAsync(bool useAsync) { AgentResponseUpdate[] updates = [ - new(ChatRole.Assistant, "Hello") { ResponseId = "someResponse", MessageId = "12345", CreatedAt = new DateTimeOffset(1, 2, 3, 4, 5, 6, TimeSpan.Zero), AgentId = "agentId" }, + new(ChatRole.Assistant, "Hello") { ResponseId = "someResponse", MessageId = "12345", CreatedAt = new DateTimeOffset(2024, 2, 3, 4, 5, 6, TimeSpan.Zero), AgentId = "agentId" }, new(new("human"), ", ") { AuthorName = "Someone", AdditionalProperties = new() { ["a"] = "b" } }, - new(null, "world!") { CreatedAt = new DateTimeOffset(2, 2, 3, 4, 5, 6, TimeSpan.Zero), AdditionalProperties = new() { ["c"] = "d" } }, + new(null, "world!") { CreatedAt = new DateTimeOffset(2025, 2, 3, 4, 5, 6, TimeSpan.Zero), AdditionalProperties = new() { ["c"] = "d" } }, new() { Contents = [new UsageContent(new() { InputTokenCount = 1, OutputTokenCount = 2 })] }, new() { Contents = [new UsageContent(new() { InputTokenCount = 4, OutputTokenCount = 5 })] }, @@ -62,7 +62,7 @@ public async Task ToAgentResponseSuccessfullyCreatesResponseAsync(bool useAsync) Assert.Equal(7, response.Usage.OutputTokenCount); Assert.Equal("someResponse", response.ResponseId); - Assert.Equal(new DateTimeOffset(2, 2, 3, 4, 5, 6, TimeSpan.Zero), response.CreatedAt); + Assert.Equal(new DateTimeOffset(2024, 2, 3, 4, 5, 6, TimeSpan.Zero), response.CreatedAt); Assert.Equal(2, response.Messages.Count); @@ -226,13 +226,13 @@ public async Task ToAgentResponse_AlternativeTimestampsAsync(bool useAsync) // Unix epoch (as "null") should not overwrite new(null, "b") { CreatedAt = unixEpoch }, - // Newer timestamp should overwrite + // Newer timestamp should not overwrite (first timestamp wins) new(null, "c") { CreatedAt = middle }, // Older timestamp should not overwrite new(null, "d") { CreatedAt = early }, - // Even newer timestamp should overwrite + // Even newer timestamp should not overwrite (first timestamp wins) new(null, "e") { CreatedAt = late }, // Unix epoch should not overwrite again @@ -249,20 +249,20 @@ public async Task ToAgentResponse_AlternativeTimestampsAsync(bool useAsync) Assert.Equal("abcdefg", response.Messages[0].Text); Assert.Equal(ChatRole.Tool, response.Messages[0].Role); - Assert.Equal(late, response.Messages[0].CreatedAt); - Assert.Equal(late, response.CreatedAt); + Assert.Equal(early, response.Messages[0].CreatedAt); + Assert.Equal(early, response.CreatedAt); } public static IEnumerable ToAgentResponse_TimestampFolding_MemberData() { - // Base test cases + // Base test cases - first non-null valid timestamp wins var testCases = new (string? timestamp1, string? timestamp2, string? expectedTimestamp)[] { (null, null, null), ("2024-01-01T10:00:00Z", null, "2024-01-01T10:00:00Z"), (null, "2024-01-01T10:00:00Z", "2024-01-01T10:00:00Z"), - ("2024-01-01T10:00:00Z", "2024-01-01T11:00:00Z", "2024-01-01T11:00:00Z"), - ("2024-01-01T11:00:00Z", "2024-01-01T10:00:00Z", "2024-01-01T11:00:00Z"), + ("2024-01-01T10:00:00Z", "2024-01-01T11:00:00Z", "2024-01-01T10:00:00Z"), // First timestamp wins + ("2024-01-01T11:00:00Z", "2024-01-01T10:00:00Z", "2024-01-01T11:00:00Z"), // First timestamp wins ("2024-01-01T10:00:00Z", "1970-01-01T00:00:00Z", "2024-01-01T10:00:00Z"), ("1970-01-01T00:00:00Z", "2024-01-01T10:00:00Z", "2024-01-01T10:00:00Z"), }; diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderExtensionsTests.cs new file mode 100644 index 0000000000..84a0242320 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderExtensionsTests.cs @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; +using Moq; + +namespace Microsoft.Agents.AI.Abstractions.UnitTests; + +/// +/// Contains tests for the class. +/// +public sealed class ChatHistoryProviderExtensionsTests +{ + [Fact] + public void WithMessageFilters_ReturnsChatHistoryProviderMessageFilter() + { + // Arrange + Mock providerMock = new(); + + // Act + ChatHistoryProvider result = providerMock.Object.WithMessageFilters( + invokingMessagesFilter: msgs => msgs, + invokedMessagesFilter: ctx => ctx); + + // Assert + Assert.IsType(result); + } + + [Fact] + public async Task WithMessageFilters_InvokingFilter_IsAppliedAsync() + { + // Arrange + Mock providerMock = new(); + List innerMessages = [new(ChatRole.User, "Hello"), new(ChatRole.Assistant, "Hi")]; + ChatHistoryProvider.InvokingContext context = new([new ChatMessage(ChatRole.User, "Test")]); + + providerMock + .Setup(p => p.InvokingAsync(context, It.IsAny())) + .ReturnsAsync(innerMessages); + + ChatHistoryProvider filtered = providerMock.Object.WithMessageFilters( + invokingMessagesFilter: msgs => msgs.Where(m => m.Role == ChatRole.User)); + + // Act + List result = (await filtered.InvokingAsync(context, CancellationToken.None)).ToList(); + + // Assert + Assert.Single(result); + Assert.Equal(ChatRole.User, result[0].Role); + } + + [Fact] + public async Task WithMessageFilters_InvokedFilter_IsAppliedAsync() + { + // Arrange + Mock providerMock = new(); + List requestMessages = [new(ChatRole.User, "Hello")]; + List chatHistoryProviderMessages = [new(ChatRole.System, "System")]; + ChatHistoryProvider.InvokedContext context = new(requestMessages, chatHistoryProviderMessages) + { + ResponseMessages = [new ChatMessage(ChatRole.Assistant, "Response")] + }; + + ChatHistoryProvider.InvokedContext? capturedContext = null; + providerMock + .Setup(p => p.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, _) => capturedContext = ctx) + .Returns(default(ValueTask)); + + ChatHistoryProvider filtered = providerMock.Object.WithMessageFilters( + invokedMessagesFilter: ctx => + { + ctx.ResponseMessages = null; + return ctx; + }); + + // Act + await filtered.InvokedAsync(context, CancellationToken.None); + + // Assert + Assert.NotNull(capturedContext); + Assert.Null(capturedContext.ResponseMessages); + } + + [Fact] + public void WithAIContextProviderMessageRemoval_ReturnsChatHistoryProviderMessageFilter() + { + // Arrange + Mock providerMock = new(); + + // Act + ChatHistoryProvider result = providerMock.Object.WithAIContextProviderMessageRemoval(); + + // Assert + Assert.IsType(result); + } + + [Fact] + public async Task WithAIContextProviderMessageRemoval_RemovesAIContextProviderMessagesAsync() + { + // Arrange + Mock providerMock = new(); + List requestMessages = [new(ChatRole.User, "Hello")]; + List chatHistoryProviderMessages = [new(ChatRole.System, "System")]; + List aiContextProviderMessages = [new(ChatRole.System, "Context")]; + ChatHistoryProvider.InvokedContext context = new(requestMessages, chatHistoryProviderMessages) + { + AIContextProviderMessages = aiContextProviderMessages + }; + + ChatHistoryProvider.InvokedContext? capturedContext = null; + providerMock + .Setup(p => p.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, _) => capturedContext = ctx) + .Returns(default(ValueTask)); + + ChatHistoryProvider filtered = providerMock.Object.WithAIContextProviderMessageRemoval(); + + // Act + await filtered.InvokedAsync(context, CancellationToken.None); + + // Assert + Assert.NotNull(capturedContext); + Assert.Null(capturedContext.AIContextProviderMessages); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreMessageFilterTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderMessageFilterTests.cs similarity index 58% rename from dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreMessageFilterTests.cs rename to dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderMessageFilterTests.cs index ab10c377ae..43a3e78f10 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreMessageFilterTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderMessageFilterTests.cs @@ -12,60 +12,60 @@ namespace Microsoft.Agents.AI.Abstractions.UnitTests; /// -/// Contains tests for the class. +/// Contains tests for the class. /// -public sealed class ChatMessageStoreMessageFilterTests +public sealed class ChatHistoryProviderMessageFilterTests { [Fact] - public void Constructor_WithNullInnerStore_ThrowsArgumentNullException() + public void Constructor_WithNullInnerProvider_ThrowsArgumentNullException() { // Arrange, Act & Assert - Assert.Throws(() => new ChatMessageStoreMessageFilter(null!)); + Assert.Throws(() => new ChatHistoryProviderMessageFilter(null!)); } [Fact] - public void Constructor_WithOnlyInnerStore_Throws() + public void Constructor_WithOnlyInnerProvider_Throws() { // Arrange - var innerStoreMock = new Mock(); + var innerProviderMock = new Mock(); // Act & Assert - Assert.Throws(() => new ChatMessageStoreMessageFilter(innerStoreMock.Object)); + Assert.Throws(() => new ChatHistoryProviderMessageFilter(innerProviderMock.Object)); } [Fact] public void Constructor_WithAllParameters_CreatesInstance() { // Arrange - var innerStoreMock = new Mock(); + var innerProviderMock = new Mock(); IEnumerable InvokingFilter(IEnumerable msgs) => msgs; - ChatMessageStore.InvokedContext InvokedFilter(ChatMessageStore.InvokedContext ctx) => ctx; + ChatHistoryProvider.InvokedContext InvokedFilter(ChatHistoryProvider.InvokedContext ctx) => ctx; // Act - var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, InvokingFilter, InvokedFilter); + var filter = new ChatHistoryProviderMessageFilter(innerProviderMock.Object, InvokingFilter, InvokedFilter); // Assert Assert.NotNull(filter); } [Fact] - public async Task InvokingAsync_WithNoOpFilters_ReturnsInnerStoreMessagesAsync() + public async Task InvokingAsync_WithNoOpFilters_ReturnsInnerProviderMessagesAsync() { // Arrange - var innerStoreMock = new Mock(); + var innerProviderMock = new Mock(); var expectedMessages = new List { new(ChatRole.User, "Hello"), new(ChatRole.Assistant, "Hi there!") }; - var context = new ChatMessageStore.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); + var context = new ChatHistoryProvider.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); - innerStoreMock + innerProviderMock .Setup(s => s.InvokingAsync(context, It.IsAny())) .ReturnsAsync(expectedMessages); - var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, x => x, x => x); + var filter = new ChatHistoryProviderMessageFilter(innerProviderMock.Object, x => x, x => x); // Act var result = (await filter.InvokingAsync(context, CancellationToken.None)).ToList(); @@ -74,30 +74,30 @@ public async Task InvokingAsync_WithNoOpFilters_ReturnsInnerStoreMessagesAsync() Assert.Equal(2, result.Count); Assert.Equal("Hello", result[0].Text); Assert.Equal("Hi there!", result[1].Text); - innerStoreMock.Verify(s => s.InvokingAsync(context, It.IsAny()), Times.Once); + innerProviderMock.Verify(s => s.InvokingAsync(context, It.IsAny()), Times.Once); } [Fact] public async Task InvokingAsync_WithInvokingFilter_AppliesFilterAsync() { // Arrange - var innerStoreMock = new Mock(); + var innerProviderMock = new Mock(); var innerMessages = new List { new(ChatRole.User, "Hello"), new(ChatRole.Assistant, "Hi there!"), new(ChatRole.User, "How are you?") }; - var context = new ChatMessageStore.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); + var context = new ChatHistoryProvider.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); - innerStoreMock + innerProviderMock .Setup(s => s.InvokingAsync(context, It.IsAny())) .ReturnsAsync(innerMessages); // Filter to only user messages IEnumerable InvokingFilter(IEnumerable msgs) => msgs.Where(m => m.Role == ChatRole.User); - var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, InvokingFilter); + var filter = new ChatHistoryProviderMessageFilter(innerProviderMock.Object, InvokingFilter); // Act var result = (await filter.InvokingAsync(context, CancellationToken.None)).ToList(); @@ -105,22 +105,22 @@ public async Task InvokingAsync_WithInvokingFilter_AppliesFilterAsync() // Assert Assert.Equal(2, result.Count); Assert.All(result, msg => Assert.Equal(ChatRole.User, msg.Role)); - innerStoreMock.Verify(s => s.InvokingAsync(context, It.IsAny()), Times.Once); + innerProviderMock.Verify(s => s.InvokingAsync(context, It.IsAny()), Times.Once); } [Fact] public async Task InvokingAsync_WithInvokingFilter_CanModifyMessagesAsync() { // Arrange - var innerStoreMock = new Mock(); + var innerProviderMock = new Mock(); var innerMessages = new List { new(ChatRole.User, "Hello"), new(ChatRole.Assistant, "Hi there!") }; - var context = new ChatMessageStore.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); + var context = new ChatHistoryProvider.InvokingContext([new ChatMessage(ChatRole.User, "Test")]); - innerStoreMock + innerProviderMock .Setup(s => s.InvokingAsync(context, It.IsAny())) .ReturnsAsync(innerMessages); @@ -128,7 +128,7 @@ public async Task InvokingAsync_WithInvokingFilter_CanModifyMessagesAsync() IEnumerable InvokingFilter(IEnumerable msgs) => msgs.Select(m => new ChatMessage(m.Role, $"[FILTERED] {m.Text}")); - var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, InvokingFilter); + var filter = new ChatHistoryProviderMessageFilter(innerProviderMock.Object, InvokingFilter); // Act var result = (await filter.InvokingAsync(context, CancellationToken.None)).ToList(); @@ -143,26 +143,26 @@ IEnumerable InvokingFilter(IEnumerable msgs) => public async Task InvokedAsync_WithInvokedFilter_AppliesFilterAsync() { // Arrange - var innerStoreMock = new Mock(); + var innerProviderMock = new Mock(); var requestMessages = new List { new(ChatRole.User, "Hello") }; - var chatMessageStoreMessages = new List { new(ChatRole.System, "System") }; + var chatHistoryProviderMessages = new List { new(ChatRole.System, "System") }; var responseMessages = new List { new(ChatRole.Assistant, "Response") }; - var context = new ChatMessageStore.InvokedContext(requestMessages, chatMessageStoreMessages) + var context = new ChatHistoryProvider.InvokedContext(requestMessages, chatHistoryProviderMessages) { ResponseMessages = responseMessages }; - ChatMessageStore.InvokedContext? capturedContext = null; - innerStoreMock - .Setup(s => s.InvokedAsync(It.IsAny(), It.IsAny())) - .Callback((ctx, ct) => capturedContext = ctx) + ChatHistoryProvider.InvokedContext? capturedContext = null; + innerProviderMock + .Setup(s => s.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, ct) => capturedContext = ctx) .Returns(default(ValueTask)); // Filter that modifies the context - ChatMessageStore.InvokedContext InvokedFilter(ChatMessageStore.InvokedContext ctx) + ChatHistoryProvider.InvokedContext InvokedFilter(ChatHistoryProvider.InvokedContext ctx) { var modifiedRequestMessages = ctx.RequestMessages.Select(m => new ChatMessage(m.Role, $"[FILTERED] {m.Text}")).ToList(); - return new ChatMessageStore.InvokedContext(modifiedRequestMessages, ctx.ChatMessageStoreMessages) + return new ChatHistoryProvider.InvokedContext(modifiedRequestMessages, ctx.ChatHistoryProviderMessages) { ResponseMessages = ctx.ResponseMessages, AIContextProviderMessages = ctx.AIContextProviderMessages, @@ -170,7 +170,7 @@ ChatMessageStore.InvokedContext InvokedFilter(ChatMessageStore.InvokedContext ct }; } - var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, invokedMessagesFilter: InvokedFilter); + var filter = new ChatHistoryProviderMessageFilter(innerProviderMock.Object, invokedMessagesFilter: InvokedFilter); // Act await filter.InvokedAsync(context, CancellationToken.None); @@ -179,27 +179,27 @@ ChatMessageStore.InvokedContext InvokedFilter(ChatMessageStore.InvokedContext ct Assert.NotNull(capturedContext); Assert.Single(capturedContext.RequestMessages); Assert.Equal("[FILTERED] Hello", capturedContext.RequestMessages.First().Text); - innerStoreMock.Verify(s => s.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + innerProviderMock.Verify(s => s.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); } [Fact] - public void Serialize_DelegatesToInnerStore() + public void Serialize_DelegatesToInnerProvider() { // Arrange - var innerStoreMock = new Mock(); + var innerProviderMock = new Mock(); var expectedJson = JsonSerializer.SerializeToElement("data", TestJsonSerializerContext.Default.String); - innerStoreMock + innerProviderMock .Setup(s => s.Serialize(It.IsAny())) .Returns(expectedJson); - var filter = new ChatMessageStoreMessageFilter(innerStoreMock.Object, x => x, x => x); + var filter = new ChatHistoryProviderMessageFilter(innerProviderMock.Object, x => x, x => x); // Act var result = filter.Serialize(); // Assert Assert.Equal(expectedJson.GetRawText(), result.GetRawText()); - innerStoreMock.Verify(s => s.Serialize(null), Times.Once); + innerProviderMock.Verify(s => s.Serialize(null), Times.Once); } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderTests.cs similarity index 53% rename from dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreTests.cs rename to dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderTests.cs index 883941458c..02955f4a25 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatMessageStoreTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/ChatHistoryProviderTests.cs @@ -10,73 +10,73 @@ namespace Microsoft.Agents.AI.Abstractions.UnitTests; /// -/// Contains tests for the class. +/// Contains tests for the class. /// -public class ChatMessageStoreTests +public class ChatHistoryProviderTests { #region GetService Method Tests [Fact] - public void GetService_RequestingExactStoreType_ReturnsStore() + public void GetService_RequestingExactProviderType_ReturnsProvider() { - var store = new TestChatMessageStore(); - var result = store.GetService(typeof(TestChatMessageStore)); + var provider = new TestChatHistoryProvider(); + var result = provider.GetService(typeof(TestChatHistoryProvider)); Assert.NotNull(result); - Assert.Same(store, result); + Assert.Same(provider, result); } [Fact] - public void GetService_RequestingBaseStoreType_ReturnsStore() + public void GetService_RequestingBaseProviderType_ReturnsProvider() { - var store = new TestChatMessageStore(); - var result = store.GetService(typeof(ChatMessageStore)); + var provider = new TestChatHistoryProvider(); + var result = provider.GetService(typeof(ChatHistoryProvider)); Assert.NotNull(result); - Assert.Same(store, result); + Assert.Same(provider, result); } [Fact] public void GetService_RequestingUnrelatedType_ReturnsNull() { - var store = new TestChatMessageStore(); - var result = store.GetService(typeof(string)); + var provider = new TestChatHistoryProvider(); + var result = provider.GetService(typeof(string)); Assert.Null(result); } [Fact] public void GetService_WithServiceKey_ReturnsNull() { - var store = new TestChatMessageStore(); - var result = store.GetService(typeof(TestChatMessageStore), "some-key"); + var provider = new TestChatHistoryProvider(); + var result = provider.GetService(typeof(TestChatHistoryProvider), "some-key"); Assert.Null(result); } [Fact] public void GetService_WithNullServiceType_ThrowsArgumentNullException() { - var store = new TestChatMessageStore(); - Assert.Throws(() => store.GetService(null!)); + var provider = new TestChatHistoryProvider(); + Assert.Throws(() => provider.GetService(null!)); } [Fact] public void GetService_Generic_ReturnsCorrectType() { - var store = new TestChatMessageStore(); - var result = store.GetService(); + var provider = new TestChatHistoryProvider(); + var result = provider.GetService(); Assert.NotNull(result); - Assert.Same(store, result); + Assert.Same(provider, result); } [Fact] public void GetService_Generic_ReturnsNullForUnrelatedType() { - var store = new TestChatMessageStore(); - var result = store.GetService(); + var provider = new TestChatHistoryProvider(); + var result = provider.GetService(); Assert.Null(result); } #endregion - private sealed class TestChatMessageStore : ChatMessageStore + private sealed class TestChatHistoryProvider : ChatHistoryProvider { public override ValueTask> InvokingAsync(InvokingContext context, CancellationToken cancellationToken = default) => new(Array.Empty()); diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryAgentThreadTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryAgentThreadTests.cs index 906db4d30c..c35ff98711 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryAgentThreadTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryAgentThreadTests.cs @@ -16,29 +16,29 @@ public class InMemoryAgentThreadTests #region Constructor and Property Tests [Fact] - public void Constructor_SetsDefaultMessageStore() + public void Constructor_SetsDefaultChatHistoryProvider() { // Arrange & Act var thread = new TestInMemoryAgentThread(); // Assert - Assert.NotNull(thread.GetMessageStore()); - Assert.Empty(thread.GetMessageStore()); + Assert.NotNull(thread.GetChatHistoryProvider()); + Assert.Empty(thread.GetChatHistoryProvider()); } [Fact] - public void Constructor_WithMessageStore_SetsProperty() + public void Constructor_WithChatHistoryProvider_SetsProperty() { // Arrange - InMemoryChatMessageStore store = [new(ChatRole.User, "Hello")]; + InMemoryChatHistoryProvider provider = [new(ChatRole.User, "Hello")]; // Act - var thread = new TestInMemoryAgentThread(store); + var thread = new TestInMemoryAgentThread(provider); // Assert - Assert.Same(store, thread.GetMessageStore()); - Assert.Single(thread.GetMessageStore()); - Assert.Equal("Hello", thread.GetMessageStore()[0].Text); + Assert.Same(provider, thread.GetChatHistoryProvider()); + Assert.Single(thread.GetChatHistoryProvider()); + Assert.Equal("Hello", thread.GetChatHistoryProvider()[0].Text); } [Fact] @@ -51,27 +51,27 @@ public void Constructor_WithMessages_SetsProperty() var thread = new TestInMemoryAgentThread(messages); // Assert - Assert.NotNull(thread.GetMessageStore()); - Assert.Single(thread.GetMessageStore()); - Assert.Equal("Hi", thread.GetMessageStore()[0].Text); + Assert.NotNull(thread.GetChatHistoryProvider()); + Assert.Single(thread.GetChatHistoryProvider()); + Assert.Equal("Hi", thread.GetChatHistoryProvider()[0].Text); } [Fact] public void Constructor_WithSerializedState_SetsProperty() { // Arrange - InMemoryChatMessageStore store = [new(ChatRole.User, "TestMsg")]; - var storeState = store.Serialize(); - var threadStateWrapper = new InMemoryAgentThread.InMemoryAgentThreadState { StoreState = storeState }; + InMemoryChatHistoryProvider provider = [new(ChatRole.User, "TestMsg")]; + var providerState = provider.Serialize(); + var threadStateWrapper = new InMemoryAgentThread.InMemoryAgentThreadState { ChatHistoryProviderState = providerState }; var json = JsonSerializer.SerializeToElement(threadStateWrapper, TestJsonSerializerContext.Default.InMemoryAgentThreadState); // Act var thread = new TestInMemoryAgentThread(json); // Assert - Assert.NotNull(thread.GetMessageStore()); - Assert.Single(thread.GetMessageStore()); - Assert.Equal("TestMsg", thread.GetMessageStore()[0].Text); + Assert.NotNull(thread.GetChatHistoryProvider()); + Assert.Single(thread.GetChatHistoryProvider()); + Assert.Equal("TestMsg", thread.GetChatHistoryProvider()[0].Text); } [Fact] @@ -99,9 +99,9 @@ public void Serialize_ReturnsCorrectJson_WhenMessagesExist() // Assert Assert.Equal(JsonValueKind.Object, json.ValueKind); - Assert.True(json.TryGetProperty("storeState", out var storeStateProperty)); - Assert.Equal(JsonValueKind.Object, storeStateProperty.ValueKind); - Assert.True(storeStateProperty.TryGetProperty("messages", out var messagesProperty)); + Assert.True(json.TryGetProperty("chatHistoryProviderState", out var providerStateProperty)); + Assert.Equal(JsonValueKind.Object, providerStateProperty.ValueKind); + Assert.True(providerStateProperty.TryGetProperty("messages", out var messagesProperty)); Assert.Equal(JsonValueKind.Array, messagesProperty.ValueKind); var messagesList = messagesProperty.EnumerateArray().ToList(); Assert.Single(messagesList); @@ -118,9 +118,9 @@ public void Serialize_ReturnsEmptyMessages_WhenNoMessages() // Assert Assert.Equal(JsonValueKind.Object, json.ValueKind); - Assert.True(json.TryGetProperty("storeState", out var storeStateProperty)); - Assert.Equal(JsonValueKind.Object, storeStateProperty.ValueKind); - Assert.True(storeStateProperty.TryGetProperty("messages", out var messagesProperty)); + Assert.True(json.TryGetProperty("chatHistoryProviderState", out var providerStateProperty)); + Assert.Equal(JsonValueKind.Object, providerStateProperty.ValueKind); + Assert.True(providerStateProperty.TryGetProperty("messages", out var messagesProperty)); Assert.Equal(JsonValueKind.Array, messagesProperty.ValueKind); Assert.Empty(messagesProperty.EnumerateArray()); } @@ -130,15 +130,15 @@ public void Serialize_ReturnsEmptyMessages_WhenNoMessages() #region GetService Tests [Fact] - public void GetService_RequestingChatMessageStore_ReturnsChatMessageStore() + public void GetService_RequestingChatHistoryProvider_ReturnsChatHistoryProvider() { // Arrange var thread = new TestInMemoryAgentThread(); // Act & Assert - Assert.NotNull(thread.GetService(typeof(ChatMessageStore))); - Assert.Same(thread.GetMessageStore(), thread.GetService(typeof(ChatMessageStore))); - Assert.Same(thread.GetMessageStore(), thread.GetService(typeof(InMemoryChatMessageStore))); + Assert.NotNull(thread.GetService(typeof(ChatHistoryProvider))); + Assert.Same(thread.GetChatHistoryProvider(), thread.GetService(typeof(ChatHistoryProvider))); + Assert.Same(thread.GetChatHistoryProvider(), thread.GetService(typeof(InMemoryChatHistoryProvider))); } #endregion @@ -147,9 +147,9 @@ public void GetService_RequestingChatMessageStore_ReturnsChatMessageStore() private sealed class TestInMemoryAgentThread : InMemoryAgentThread { public TestInMemoryAgentThread() { } - public TestInMemoryAgentThread(InMemoryChatMessageStore? store) : base(store) { } + public TestInMemoryAgentThread(InMemoryChatHistoryProvider? provider) : base(provider) { } public TestInMemoryAgentThread(IEnumerable messages) : base(messages) { } public TestInMemoryAgentThread(JsonElement serializedThreadState) : base(serializedThreadState) { } - public InMemoryChatMessageStore GetMessageStore() => this.MessageStore; + public InMemoryChatHistoryProvider GetChatHistoryProvider() => this.ChatHistoryProvider; } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatMessageStoreTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatHistoryProviderTests.cs similarity index 60% rename from dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatMessageStoreTests.cs rename to dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatHistoryProviderTests.cs index 43bfacca79..debaff73ef 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatMessageStoreTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/InMemoryChatHistoryProviderTests.cs @@ -14,24 +14,24 @@ namespace Microsoft.Agents.AI.Abstractions.UnitTests; /// -/// Contains tests for the class. +/// Contains tests for the class. /// -public class InMemoryChatMessageStoreTests +public class InMemoryChatHistoryProviderTests { [Fact] public void Constructor_Throws_ForNullReducer() => // Arrange & Act & Assert - Assert.Throws(() => new InMemoryChatMessageStore(null!)); + Assert.Throws(() => new InMemoryChatHistoryProvider(null!)); [Fact] public void Constructor_DefaultsToBeforeMessageRetrieval_ForNotProvidedTriggerEvent() { // Arrange & Act var reducerMock = new Mock(); - var store = new InMemoryChatMessageStore(reducerMock.Object); + var provider = new InMemoryChatHistoryProvider(reducerMock.Object); // Assert - Assert.Equal(InMemoryChatMessageStore.ChatReducerTriggerEvent.BeforeMessagesRetrieval, store.ReducerTriggerEvent); + Assert.Equal(InMemoryChatHistoryProvider.ChatReducerTriggerEvent.BeforeMessagesRetrieval, provider.ReducerTriggerEvent); } [Fact] @@ -39,11 +39,11 @@ public void Constructor_Arguments_SetOnPropertiesCorrectly() { // Arrange & Act var reducerMock = new Mock(); - var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.AfterMessageAdded); + var provider = new InMemoryChatHistoryProvider(reducerMock.Object, InMemoryChatHistoryProvider.ChatReducerTriggerEvent.AfterMessageAdded); // Assert - Assert.Same(reducerMock.Object, store.ChatReducer); - Assert.Equal(InMemoryChatMessageStore.ChatReducerTriggerEvent.AfterMessageAdded, store.ReducerTriggerEvent); + Assert.Same(reducerMock.Object, provider.ChatReducer); + Assert.Equal(InMemoryChatHistoryProvider.ChatReducerTriggerEvent.AfterMessageAdded, provider.ReducerTriggerEvent); } [Fact] @@ -57,7 +57,7 @@ public async Task InvokedAsyncAddsMessagesAsync() { new(ChatRole.Assistant, "Hi there!") }; - var messageStoreMessages = new List() + var providerMessages = new List() { new(ChatRole.System, "original instructions") }; @@ -66,44 +66,44 @@ public async Task InvokedAsyncAddsMessagesAsync() new(ChatRole.System, "additional context") }; - var store = new InMemoryChatMessageStore(); - store.Add(messageStoreMessages[0]); - var context = new ChatMessageStore.InvokedContext(requestMessages, messageStoreMessages) + var provider = new InMemoryChatHistoryProvider(); + provider.Add(providerMessages[0]); + var context = new ChatHistoryProvider.InvokedContext(requestMessages, providerMessages) { AIContextProviderMessages = aiContextProviderMessages, ResponseMessages = responseMessages }; - await store.InvokedAsync(context, CancellationToken.None); + await provider.InvokedAsync(context, CancellationToken.None); - Assert.Equal(4, store.Count); - Assert.Equal("original instructions", store[0].Text); - Assert.Equal("Hello", store[1].Text); - Assert.Equal("additional context", store[2].Text); - Assert.Equal("Hi there!", store[3].Text); + Assert.Equal(4, provider.Count); + Assert.Equal("original instructions", provider[0].Text); + Assert.Equal("Hello", provider[1].Text); + Assert.Equal("additional context", provider[2].Text); + Assert.Equal("Hi there!", provider[3].Text); } [Fact] public async Task InvokedAsyncWithEmptyDoesNotFailAsync() { - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); - var context = new ChatMessageStore.InvokedContext([], []); - await store.InvokedAsync(context, CancellationToken.None); + var context = new ChatHistoryProvider.InvokedContext([], []); + await provider.InvokedAsync(context, CancellationToken.None); - Assert.Empty(store); + Assert.Empty(provider); } [Fact] public async Task InvokingAsyncReturnsAllMessagesAsync() { - var store = new InMemoryChatMessageStore + var provider = new InMemoryChatHistoryProvider { new ChatMessage(ChatRole.User, "Test1"), new ChatMessage(ChatRole.Assistant, "Test2") }; - var context = new ChatMessageStore.InvokingContext([]); - var result = (await store.InvokingAsync(context, CancellationToken.None)).ToList(); + var context = new ChatHistoryProvider.InvokingContext([]); + var result = (await provider.InvokingAsync(context, CancellationToken.None)).ToList(); Assert.Equal(2, result.Count); Assert.Contains(result, m => m.Text == "Test1"); @@ -115,26 +115,26 @@ public async Task DeserializeConstructorWithEmptyElementAsync() { var emptyObject = JsonSerializer.Deserialize("{}", TestJsonSerializerContext.Default.JsonElement); - var newStore = new InMemoryChatMessageStore(emptyObject); + var newProvider = new InMemoryChatHistoryProvider(emptyObject); - Assert.Empty(newStore); + Assert.Empty(newProvider); } [Fact] public async Task SerializeAndDeserializeConstructorRoundtripsAsync() { - var store = new InMemoryChatMessageStore + var provider = new InMemoryChatHistoryProvider { new ChatMessage(ChatRole.User, "A"), new ChatMessage(ChatRole.Assistant, "B") }; - var jsonElement = store.Serialize(); - var newStore = new InMemoryChatMessageStore(jsonElement); + var jsonElement = provider.Serialize(); + var newProvider = new InMemoryChatHistoryProvider(jsonElement); - Assert.Equal(2, newStore.Count); - Assert.Equal("A", newStore[0].Text); - Assert.Equal("B", newStore[1].Text); + Assert.Equal(2, newProvider.Count); + Assert.Equal("A", newProvider[0].Text); + Assert.Equal("B", newProvider[1].Text); } [Fact] @@ -147,66 +147,66 @@ public async Task SerializeAndDeserializeConstructorRoundtripsWithCustomAIConten }; options.AddAIContentType(typeDiscriminatorId: "testContent"); - var store = new InMemoryChatMessageStore + var provider = new InMemoryChatHistoryProvider { new ChatMessage(ChatRole.User, [new TestAIContent("foo data")]), }; - var jsonElement = store.Serialize(options); - var newStore = new InMemoryChatMessageStore(jsonElement, options); + var jsonElement = provider.Serialize(options); + var newProvider = new InMemoryChatHistoryProvider(jsonElement, options); - Assert.Single(newStore); - var actualTestAIContent = Assert.IsType(newStore[0].Contents[0]); + Assert.Single(newProvider); + var actualTestAIContent = Assert.IsType(newProvider[0].Contents[0]); Assert.Equal("foo data", actualTestAIContent.TestData); } [Fact] public async Task SerializeAndDeserializeWorksWithExperimentalContentTypesAsync() { - var store = new InMemoryChatMessageStore + var provider = new InMemoryChatHistoryProvider { new ChatMessage(ChatRole.User, [new FunctionApprovalRequestContent("call123", new FunctionCallContent("call123", "some_func"))]), new ChatMessage(ChatRole.Assistant, [new FunctionApprovalResponseContent("call123", true, new FunctionCallContent("call123", "some_func"))]) }; - var jsonElement = store.Serialize(); - var newStore = new InMemoryChatMessageStore(jsonElement); + var jsonElement = provider.Serialize(); + var newProvider = new InMemoryChatHistoryProvider(jsonElement); - Assert.Equal(2, newStore.Count); - Assert.IsType(newStore[0].Contents[0]); - Assert.IsType(newStore[1].Contents[0]); + Assert.Equal(2, newProvider.Count); + Assert.IsType(newProvider[0].Contents[0]); + Assert.IsType(newProvider[1].Contents[0]); } [Fact] - public async Task InvokedAsyncWithEmptyMessagesDoesNotChangeStoreAsync() + public async Task InvokedAsyncWithEmptyMessagesDoesNotChangeProviderAsync() { - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var messages = new List(); - var context = new ChatMessageStore.InvokedContext(messages, []); - await store.InvokedAsync(context, CancellationToken.None); + var context = new ChatHistoryProvider.InvokedContext(messages, []); + await provider.InvokedAsync(context, CancellationToken.None); - Assert.Empty(store); + Assert.Empty(provider); } [Fact] public async Task InvokedAsync_WithNullContext_ThrowsArgumentNullExceptionAsync() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); // Act & Assert - await Assert.ThrowsAsync(() => store.InvokedAsync(null!, CancellationToken.None).AsTask()); + await Assert.ThrowsAsync(() => provider.InvokedAsync(null!, CancellationToken.None).AsTask()); } [Fact] - public void DeserializeContructor_WithNullSerializedState_CreatesEmptyStore() + public void DeserializeContructor_WithNullSerializedState_CreatesEmptyProvider() { // Act - var store = new InMemoryChatMessageStore(new JsonElement()); + var provider = new InMemoryChatHistoryProvider(new JsonElement()); // Assert - Assert.Empty(store); + Assert.Empty(provider); } [Fact] @@ -218,10 +218,10 @@ public async Task DeserializeContructor_WithEmptyMessages_DoesNotAddMessagesAsyn TestJsonSerializerContext.Default.IDictionaryStringObject); // Act - var store = new InMemoryChatMessageStore(stateWithEmptyMessages); + var provider = new InMemoryChatHistoryProvider(stateWithEmptyMessages); // Assert - Assert.Empty(store); + Assert.Empty(provider); } [Fact] @@ -233,10 +233,10 @@ public async Task DeserializeConstructor_WithNullMessages_DoesNotAddMessagesAsyn TestJsonSerializerContext.Default.DictionaryStringObject); // Act - var store = new InMemoryChatMessageStore(stateWithNullMessages); + var provider = new InMemoryChatHistoryProvider(stateWithNullMessages); // Assert - Assert.Empty(store); + Assert.Empty(provider); } [Fact] @@ -254,159 +254,159 @@ public async Task DeserializeConstructor_WithValidMessages_AddsMessagesAsync() TestJsonSerializerContext.Default.DictionaryStringObject); // Act - var store = new InMemoryChatMessageStore(serializedState); + var provider = new InMemoryChatHistoryProvider(serializedState); // Assert - Assert.Equal(2, store.Count); - Assert.Equal("User message", store[0].Text); - Assert.Equal("Assistant message", store[1].Text); + Assert.Equal(2, provider.Count); + Assert.Equal("User message", provider[0].Text); + Assert.Equal("Assistant message", provider[1].Text); } [Fact] public void IndexerGet_ReturnsCorrectMessage() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); - store.Add(message1); - store.Add(message2); + provider.Add(message1); + provider.Add(message2); // Act & Assert - Assert.Same(message1, store[0]); - Assert.Same(message2, store[1]); + Assert.Same(message1, provider[0]); + Assert.Same(message2, provider[1]); } [Fact] public void IndexerSet_UpdatesMessage() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var originalMessage = new ChatMessage(ChatRole.User, "Original"); var newMessage = new ChatMessage(ChatRole.User, "Updated"); - store.Add(originalMessage); + provider.Add(originalMessage); // Act - store[0] = newMessage; + provider[0] = newMessage; // Assert - Assert.Same(newMessage, store[0]); - Assert.Equal("Updated", store[0].Text); + Assert.Same(newMessage, provider[0]); + Assert.Equal("Updated", provider[0].Text); } [Fact] public void IsReadOnly_ReturnsFalse() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); // Act & Assert - Assert.False(store.IsReadOnly); + Assert.False(provider.IsReadOnly); } [Fact] public void IndexOf_ReturnsCorrectIndex() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); var message3 = new ChatMessage(ChatRole.User, "Third"); - store.Add(message1); - store.Add(message2); + provider.Add(message1); + provider.Add(message2); // Act & Assert - Assert.Equal(0, store.IndexOf(message1)); - Assert.Equal(1, store.IndexOf(message2)); - Assert.Equal(-1, store.IndexOf(message3)); // Not in store + Assert.Equal(0, provider.IndexOf(message1)); + Assert.Equal(1, provider.IndexOf(message2)); + Assert.Equal(-1, provider.IndexOf(message3)); // Not in provider } [Fact] public void Insert_InsertsMessageAtCorrectIndex() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); var insertMessage = new ChatMessage(ChatRole.User, "Inserted"); - store.Add(message1); - store.Add(message2); + provider.Add(message1); + provider.Add(message2); // Act - store.Insert(1, insertMessage); + provider.Insert(1, insertMessage); // Assert - Assert.Equal(3, store.Count); - Assert.Same(message1, store[0]); - Assert.Same(insertMessage, store[1]); - Assert.Same(message2, store[2]); + Assert.Equal(3, provider.Count); + Assert.Same(message1, provider[0]); + Assert.Same(insertMessage, provider[1]); + Assert.Same(message2, provider[2]); } [Fact] public void RemoveAt_RemovesMessageAtIndex() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); var message3 = new ChatMessage(ChatRole.User, "Third"); - store.Add(message1); - store.Add(message2); - store.Add(message3); + provider.Add(message1); + provider.Add(message2); + provider.Add(message3); // Act - store.RemoveAt(1); + provider.RemoveAt(1); // Assert - Assert.Equal(2, store.Count); - Assert.Same(message1, store[0]); - Assert.Same(message3, store[1]); + Assert.Equal(2, provider.Count); + Assert.Same(message1, provider[0]); + Assert.Same(message3, provider[1]); } [Fact] public void Clear_RemovesAllMessages() { // Arrange - var store = new InMemoryChatMessageStore + var provider = new InMemoryChatHistoryProvider { new ChatMessage(ChatRole.User, "First"), new ChatMessage(ChatRole.Assistant, "Second") }; // Act - store.Clear(); + provider.Clear(); // Assert - Assert.Empty(store); + Assert.Empty(provider); } [Fact] public void Contains_ReturnsTrueForExistingMessage() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); - store.Add(message1); + provider.Add(message1); // Act & Assert - Assert.Contains(message1, store); - Assert.DoesNotContain(message2, store); + Assert.Contains(message1, provider); + Assert.DoesNotContain(message2, provider); } [Fact] public void CopyTo_CopiesMessagesToArray() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); - store.Add(message1); - store.Add(message2); + provider.Add(message1); + provider.Add(message2); var array = new ChatMessage[4]; // Act - store.CopyTo(array, 1); + provider.CopyTo(array, 1); // Assert Assert.Null(array[0]); @@ -419,54 +419,54 @@ public void CopyTo_CopiesMessagesToArray() public void Remove_RemovesSpecificMessage() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); var message3 = new ChatMessage(ChatRole.User, "Third"); - store.Add(message1); - store.Add(message2); - store.Add(message3); + provider.Add(message1); + provider.Add(message2); + provider.Add(message3); // Act - var removed = store.Remove(message2); + var removed = provider.Remove(message2); // Assert Assert.True(removed); - Assert.Equal(2, store.Count); - Assert.Same(message1, store[0]); - Assert.Same(message3, store[1]); + Assert.Equal(2, provider.Count); + Assert.Same(message1, provider[0]); + Assert.Same(message3, provider[1]); } [Fact] public void Remove_ReturnsFalseForNonExistentMessage() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); - store.Add(message1); + provider.Add(message1); // Act - var removed = store.Remove(message2); + var removed = provider.Remove(message2); // Assert Assert.False(removed); - Assert.Single(store); + Assert.Single(provider); } [Fact] public void GetEnumerator_Generic_ReturnsAllMessages() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); - store.Add(message1); - store.Add(message2); + provider.Add(message1); + provider.Add(message2); // Act var messages = new List(); - messages.AddRange(store); + messages.AddRange(provider); // Assert Assert.Equal(2, messages.Count); @@ -478,15 +478,15 @@ public void GetEnumerator_Generic_ReturnsAllMessages() public void GetEnumerator_NonGeneric_ReturnsAllMessages() { // Arrange - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); var message1 = new ChatMessage(ChatRole.User, "First"); var message2 = new ChatMessage(ChatRole.Assistant, "Second"); - store.Add(message1); - store.Add(message2); + provider.Add(message1); + provider.Add(message2); // Act var messages = new List(); - var enumerator = ((System.Collections.IEnumerable)store).GetEnumerator(); + var enumerator = ((System.Collections.IEnumerable)provider).GetEnumerator(); while (enumerator.MoveNext()) { messages.Add((ChatMessage)enumerator.Current); @@ -517,15 +517,15 @@ public async Task AddMessagesAsync_WithReducer_AfterMessageAdded_InvokesReducerA .Setup(r => r.ReduceAsync(It.Is>(x => x.SequenceEqual(originalMessages)), It.IsAny())) .ReturnsAsync(reducedMessages); - var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.AfterMessageAdded); + var provider = new InMemoryChatHistoryProvider(reducerMock.Object, InMemoryChatHistoryProvider.ChatReducerTriggerEvent.AfterMessageAdded); // Act - var context = new ChatMessageStore.InvokedContext(originalMessages, []); - await store.InvokedAsync(context, CancellationToken.None); + var context = new ChatHistoryProvider.InvokedContext(originalMessages, []); + await provider.InvokedAsync(context, CancellationToken.None); // Assert - Assert.Single(store); - Assert.Equal("Reduced", store[0].Text); + Assert.Single(provider); + Assert.Equal("Reduced", provider[0].Text); reducerMock.Verify(r => r.ReduceAsync(It.Is>(x => x.SequenceEqual(originalMessages)), It.IsAny()), Times.Once); } @@ -548,16 +548,16 @@ public async Task GetMessagesAsync_WithReducer_BeforeMessagesRetrieval_InvokesRe .Setup(r => r.ReduceAsync(It.Is>(x => x.SequenceEqual(originalMessages)), It.IsAny())) .ReturnsAsync(reducedMessages); - var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.BeforeMessagesRetrieval); - // Add messages directly to the store for this test + var provider = new InMemoryChatHistoryProvider(reducerMock.Object, InMemoryChatHistoryProvider.ChatReducerTriggerEvent.BeforeMessagesRetrieval); + // Add messages directly to the provider for this test foreach (var msg in originalMessages) { - store.Add(msg); + provider.Add(msg); } // Act - var invokingContext = new ChatMessageStore.InvokingContext(Array.Empty()); - var result = (await store.InvokingAsync(invokingContext, CancellationToken.None)).ToList(); + var invokingContext = new ChatHistoryProvider.InvokingContext(Array.Empty()); + var result = (await provider.InvokingAsync(invokingContext, CancellationToken.None)).ToList(); // Assert Assert.Single(result); @@ -576,15 +576,15 @@ public async Task AddMessagesAsync_WithReducer_ButWrongTrigger_DoesNotInvokeRedu var reducerMock = new Mock(); - var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.BeforeMessagesRetrieval); + var provider = new InMemoryChatHistoryProvider(reducerMock.Object, InMemoryChatHistoryProvider.ChatReducerTriggerEvent.BeforeMessagesRetrieval); // Act - var context = new ChatMessageStore.InvokedContext(originalMessages, []); - await store.InvokedAsync(context, CancellationToken.None); + var context = new ChatHistoryProvider.InvokedContext(originalMessages, []); + await provider.InvokedAsync(context, CancellationToken.None); // Assert - Assert.Single(store); - Assert.Equal("Hello", store[0].Text); + Assert.Single(provider); + Assert.Equal("Hello", provider[0].Text); reducerMock.Verify(r => r.ReduceAsync(It.IsAny>(), It.IsAny()), Times.Never); } @@ -599,14 +599,14 @@ public async Task GetMessagesAsync_WithReducer_ButWrongTrigger_DoesNotInvokeRedu var reducerMock = new Mock(); - var store = new InMemoryChatMessageStore(reducerMock.Object, InMemoryChatMessageStore.ChatReducerTriggerEvent.AfterMessageAdded) + var provider = new InMemoryChatHistoryProvider(reducerMock.Object, InMemoryChatHistoryProvider.ChatReducerTriggerEvent.AfterMessageAdded) { originalMessages[0] }; // Act - var invokingContext = new ChatMessageStore.InvokingContext(Array.Empty()); - var result = (await store.InvokingAsync(invokingContext, CancellationToken.None)).ToList(); + var invokingContext = new ChatHistoryProvider.InvokingContext(Array.Empty()); + var result = (await provider.InvokingAsync(invokingContext, CancellationToken.None)).ToList(); // Assert Assert.Single(result); diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/TestJsonSerializerContext.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/TestJsonSerializerContext.cs index 1f6f9bb578..397cd48c86 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/TestJsonSerializerContext.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/TestJsonSerializerContext.cs @@ -22,5 +22,5 @@ namespace Microsoft.Agents.AI.Abstractions.UnitTests; [JsonSerializable(typeof(InMemoryAgentThread.InMemoryAgentThreadState))] [JsonSerializable(typeof(ServiceIdAgentThread.ServiceIdAgentThreadState))] [JsonSerializable(typeof(ServiceIdAgentThreadTests.EmptyObject))] -[JsonSerializable(typeof(InMemoryChatMessageStoreTests.TestAIContent))] +[JsonSerializable(typeof(InMemoryChatHistoryProviderTests.TestAIContent))] internal sealed partial class TestJsonSerializerContext : JsonSerializerContext; diff --git a/dotnet/tests/Microsoft.Agents.AI.AzureAI.Persistent.UnitTests/Extensions/PersistentAgentsClientExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.AzureAI.Persistent.UnitTests/Extensions/PersistentAgentsClientExtensionsTests.cs index a3d3be27fe..51de9ac64e 100644 --- a/dotnet/tests/Microsoft.Agents.AI.AzureAI.Persistent.UnitTests/Extensions/PersistentAgentsClientExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.AzureAI.Persistent.UnitTests/Extensions/PersistentAgentsClientExtensionsTests.cs @@ -18,44 +18,6 @@ namespace Microsoft.Agents.AI.AzureAI.Persistent.UnitTests.Extensions; public sealed class PersistentAgentsClientExtensionsTests { - /// - /// Verify that GetAIAgent throws ArgumentNullException when client is null. - /// - [Fact] - public void GetAIAgent_WithNullClient_ThrowsArgumentNullException() - { - // Act & Assert - var exception = Assert.Throws(() => - ((PersistentAgentsClient)null!).GetAIAgent("test-agent")); - - Assert.Equal("persistentAgentsClient", exception.ParamName); - } - - /// - /// Verify that GetAIAgent throws ArgumentException when agentId is null or whitespace. - /// - [Fact] - public void GetAIAgent_WithNullOrWhitespaceAgentId_ThrowsArgumentException() - { - // Arrange - var mockClient = new Mock(); - - // Act & Assert - null agentId - var exception1 = Assert.Throws(() => - mockClient.Object.GetAIAgent(null!)); - Assert.Equal("agentId", exception1.ParamName); - - // Act & Assert - empty agentId - var exception2 = Assert.Throws(() => - mockClient.Object.GetAIAgent("")); - Assert.Equal("agentId", exception2.ParamName); - - // Act & Assert - whitespace agentId - var exception3 = Assert.Throws(() => - mockClient.Object.GetAIAgent(" ")); - Assert.Equal("agentId", exception3.ParamName); - } - /// /// Verify that GetAIAgentAsync throws ArgumentNullException when client is null. /// @@ -94,19 +56,6 @@ public async Task GetAIAgentAsync_WithNullOrWhitespaceAgentId_ThrowsArgumentExce Assert.Equal("agentId", exception3.ParamName); } - /// - /// Verify that CreateAIAgent throws ArgumentNullException when client is null. - /// - [Fact] - public void CreateAIAgent_WithNullClient_ThrowsArgumentNullException() - { - // Act & Assert - var exception = Assert.Throws(() => - ((PersistentAgentsClient)null!).CreateAIAgent("test-model")); - - Assert.Equal("persistentAgentsClient", exception.ParamName); - } - /// /// Verify that CreateAIAgentAsync throws ArgumentNullException when client is null. /// @@ -124,14 +73,14 @@ public async Task CreateAIAgentAsync_WithNullClient_ThrowsArgumentNullExceptionA /// Verify that GetAIAgent with clientFactory parameter correctly applies the factory. /// [Fact] - public void GetAIAgent_WithClientFactory_AppliesFactoryCorrectly() + public async Task GetAIAgentAsync_WithClientFactory_AppliesFactoryCorrectlyAsync() { // Arrange var client = CreateFakePersistentAgentsClient(); TestChatClient? testChatClient = null; // Act - var agent = client.GetAIAgent( + var agent = await client.GetAIAgentAsync( agentId: "test-agent-id", clientFactory: (innerClient) => testChatClient = new TestChatClient(innerClient)); @@ -146,13 +95,13 @@ public void GetAIAgent_WithClientFactory_AppliesFactoryCorrectly() /// Verify that GetAIAgent without clientFactory works normally. /// [Fact] - public void GetAIAgent_WithoutClientFactory_WorksNormally() + public async Task GetAIAgentAsync_WithoutClientFactory_WorksNormallyAsync() { // Arrange var client = CreateFakePersistentAgentsClient(); // Act - var agent = client.GetAIAgent(agentId: "test-agent-id"); + var agent = await client.GetAIAgentAsync(agentId: "test-agent-id"); // Assert Assert.NotNull(agent); @@ -164,13 +113,13 @@ public void GetAIAgent_WithoutClientFactory_WorksNormally() /// Verify that GetAIAgent with null clientFactory works normally. /// [Fact] - public void GetAIAgent_WithNullClientFactory_WorksNormally() + public async Task GetAIAgentAsync_WithNullClientFactory_WorksNormallyAsync() { // Arrange PersistentAgentsClient client = CreateFakePersistentAgentsClient(); // Act - var agent = client.GetAIAgent(agentId: "test-agent-id", clientFactory: null); + var agent = await client.GetAIAgentAsync(agentId: "test-agent-id", clientFactory: null); // Assert Assert.NotNull(agent); @@ -178,29 +127,6 @@ public void GetAIAgent_WithNullClientFactory_WorksNormally() Assert.Null(retrievedTestClient); } - /// - /// Verify that CreateAIAgent with clientFactory parameter correctly applies the factory. - /// - [Fact] - public void CreateAIAgent_WithClientFactory_AppliesFactoryCorrectly() - { - // Arrange - // Arrange - var client = CreateFakePersistentAgentsClient(); - TestChatClient? testChatClient = null; - - // Act - var agent = client.CreateAIAgent( - model: "test-model", - clientFactory: (innerClient) => testChatClient = new TestChatClient(innerClient)); - - // Assert - Assert.NotNull(agent); - var retrievedTestClient = agent.GetService(); - Assert.NotNull(retrievedTestClient); - Assert.Same(testChatClient, retrievedTestClient); - } - /// /// Verify that CreateAIAgentAsync with clientFactory parameter correctly applies the factory. /// @@ -223,42 +149,6 @@ public async Task CreateAIAgentAsync_WithClientFactory_AppliesFactoryCorrectlyAs Assert.Same(testChatClient, retrievedTestClient); } - /// - /// Verify that CreateAIAgent without clientFactory works normally. - /// - [Fact] - public void CreateAIAgent_WithoutClientFactory_WorksNormally() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - - // Act - var agent = client.CreateAIAgent(model: "test-model"); - - // Assert - Assert.NotNull(agent); - var retrievedTestClient = agent.GetService(); - Assert.Null(retrievedTestClient); - } - - /// - /// Verify that CreateAIAgent with null clientFactory works normally. - /// - [Fact] - public void CreateAIAgent_WithNullClientFactory_WorksNormally() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - - // Act - var agent = client.CreateAIAgent(model: "test-model", clientFactory: null); - - // Assert - Assert.NotNull(agent); - var retrievedTestClient = agent.GetService(); - Assert.Null(retrievedTestClient); - } - /// /// Verify that CreateAIAgent without clientFactory works normally. /// @@ -372,33 +262,6 @@ public void GetAIAgent_WithPersistentAgentAndOptionsWithNullFields_FallsBackToAg Assert.Equal("Original Instructions", agent.Instructions); } - /// - /// Verify that GetAIAgent with agentId and options works correctly. - /// - [Fact] - public void GetAIAgent_WithAgentIdAndOptions_WorksCorrectly() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - const string AgentId = "agent_abc123"; - - var options = new ChatClientAgentOptions - { - Name = "Override Name", - Description = "Override Description", - ChatOptions = new() { Instructions = "Override Instructions" } - }; - - // Act - var agent = client.GetAIAgent(AgentId, options); - - // Assert - Assert.NotNull(agent); - Assert.Equal("Override Name", agent.Name); - Assert.Equal("Override Description", agent.Description); - Assert.Equal("Override Instructions", agent.Instructions); - } - /// /// Verify that GetAIAgentAsync with agentId and options works correctly. /// @@ -509,23 +372,6 @@ public void GetAIAgent_WithNullOptions_ThrowsArgumentNullException() Assert.Equal("options", exception.ParamName); } - /// - /// Verify that GetAIAgent throws ArgumentException when agentId is empty. - /// - [Fact] - public void GetAIAgent_WithOptionsAndEmptyAgentId_ThrowsArgumentException() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - var options = new ChatClientAgentOptions(); - - // Act & Assert - var exception = Assert.Throws(() => - client.GetAIAgent(string.Empty, options)); - - Assert.Equal("agentId", exception.ParamName); - } - /// /// Verify that GetAIAgentAsync throws ArgumentException when agentId is empty. /// @@ -543,33 +389,6 @@ public async Task GetAIAgentAsync_WithOptionsAndEmptyAgentId_ThrowsArgumentExcep Assert.Equal("agentId", exception.ParamName); } - /// - /// Verify that CreateAIAgent with options works correctly. - /// - [Fact] - public void CreateAIAgent_WithOptions_WorksCorrectly() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - const string Model = "test-model"; - - var options = new ChatClientAgentOptions - { - Name = "Test Agent", - Description = "Test description", - ChatOptions = new() { Instructions = "Test instructions" } - }; - - // Act - var agent = client.CreateAIAgent(Model, options); - - // Assert - Assert.NotNull(agent); - Assert.Equal("Test Agent", agent.Name); - Assert.Equal("Test description", agent.Description); - Assert.Equal("Test instructions", agent.Instructions); - } - /// /// Verify that CreateAIAgentAsync with options works correctly. /// @@ -597,38 +416,6 @@ public async Task CreateAIAgentAsync_WithOptions_WorksCorrectlyAsync() Assert.Equal("Test instructions", agent.Instructions); } - /// - /// Verify that CreateAIAgent with options and clientFactory applies the factory correctly. - /// - [Fact] - public void CreateAIAgent_WithOptionsAndClientFactory_AppliesFactoryCorrectly() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - TestChatClient? testChatClient = null; - const string Model = "test-model"; - - var options = new ChatClientAgentOptions - { - Name = "Test Agent" - }; - - // Act - var agent = client.CreateAIAgent( - Model, - options, - clientFactory: (innerClient) => testChatClient = new TestChatClient(innerClient)); - - // Assert - Assert.NotNull(agent); - Assert.Equal("Test Agent", agent.Name); - - // Verify that the custom chat client can be retrieved from the agent's service collection - var retrievedTestClient = agent.GetService(); - Assert.NotNull(retrievedTestClient); - Assert.Same(testChatClient, retrievedTestClient); - } - /// /// Verify that CreateAIAgentAsync with options and clientFactory applies the factory correctly. /// @@ -661,22 +448,6 @@ public async Task CreateAIAgentAsync_WithOptionsAndClientFactory_AppliesFactoryC Assert.Same(testChatClient, retrievedTestClient); } - /// - /// Verify that CreateAIAgent throws ArgumentNullException when options is null. - /// - [Fact] - public void CreateAIAgent_WithNullOptions_ThrowsArgumentNullException() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - - // Act & Assert - var exception = Assert.Throws(() => - client.CreateAIAgent("test-model", (ChatClientAgentOptions)null!)); - - Assert.Equal("options", exception.ParamName); - } - /// /// Verify that CreateAIAgentAsync throws ArgumentNullException when options is null. /// @@ -693,23 +464,6 @@ public async Task CreateAIAgentAsync_WithNullOptions_ThrowsArgumentNullException Assert.Equal("options", exception.ParamName); } - /// - /// Verify that CreateAIAgent throws ArgumentException when model is empty. - /// - [Fact] - public void CreateAIAgent_WithEmptyModel_ThrowsArgumentException() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - var options = new ChatClientAgentOptions(); - - // Act & Assert - var exception = Assert.Throws(() => - client.CreateAIAgent(string.Empty, options)); - - Assert.Equal("model", exception.ParamName); - } - /// /// Verify that CreateAIAgentAsync throws ArgumentException when model is empty. /// @@ -727,35 +481,6 @@ public async Task CreateAIAgentAsync_WithEmptyModel_ThrowsArgumentExceptionAsync Assert.Equal("model", exception.ParamName); } - /// - /// Verify that CreateAIAgent with services parameter correctly passes it through to the ChatClientAgent. - /// - [Fact] - public void CreateAIAgent_WithServices_PassesServicesToAgent() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - var serviceProvider = new TestServiceProvider(); - const string Model = "test-model"; - - // Act - var agent = client.CreateAIAgent( - Model, - instructions: "Test instructions", - name: "Test Agent", - services: serviceProvider); - - // Assert - Assert.NotNull(agent); - - // Verify the IServiceProvider was passed through to the FunctionInvokingChatClient - var chatClient = agent.GetService(); - Assert.NotNull(chatClient); - var functionInvokingClient = chatClient.GetService(); - Assert.NotNull(functionInvokingClient); - Assert.Same(serviceProvider, GetFunctionInvocationServices(functionInvokingClient)); - } - /// /// Verify that CreateAIAgentAsync with services parameter correctly passes it through to the ChatClientAgent. /// @@ -785,30 +510,6 @@ public async Task CreateAIAgentAsync_WithServices_PassesServicesToAgentAsync() Assert.Same(serviceProvider, GetFunctionInvocationServices(functionInvokingClient)); } - /// - /// Verify that GetAIAgent with services parameter correctly passes it through to the ChatClientAgent. - /// - [Fact] - public void GetAIAgent_WithServices_PassesServicesToAgent() - { - // Arrange - var client = CreateFakePersistentAgentsClient(); - var serviceProvider = new TestServiceProvider(); - - // Act - var agent = client.GetAIAgent("agent_abc123", services: serviceProvider); - - // Assert - Assert.NotNull(agent); - - // Verify the IServiceProvider was passed through to the FunctionInvokingChatClient - var chatClient = agent.GetService(); - Assert.NotNull(chatClient); - var functionInvokingClient = chatClient.GetService(); - Assert.NotNull(functionInvokingClient); - Assert.Same(serviceProvider, GetFunctionInvocationServices(functionInvokingClient)); - } - /// /// Verify that GetAIAgentAsync with services parameter correctly passes it through to the ChatClientAgent. /// @@ -837,7 +538,7 @@ public async Task GetAIAgentAsync_WithServices_PassesServicesToAgentAsync() /// Verify that CreateAIAgent with both clientFactory and services works correctly. /// [Fact] - public void CreateAIAgent_WithClientFactoryAndServices_AppliesBothCorrectly() + public async Task CreateAIAgentAsync_WithClientFactoryAndServices_AppliesBothCorrectlyAsync() { // Arrange var client = CreateFakePersistentAgentsClient(); @@ -846,7 +547,7 @@ public void CreateAIAgent_WithClientFactoryAndServices_AppliesBothCorrectly() const string Model = "test-model"; // Act - var agent = client.CreateAIAgent( + var agent = await client.CreateAIAgentAsync( Model, instructions: "Test instructions", name: "Test Agent", diff --git a/dotnet/tests/Microsoft.Agents.AI.AzureAI.UnitTests/AzureAIProjectChatClientExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.AzureAI.UnitTests/AzureAIProjectChatClientExtensionsTests.cs index 528dc323af..eb2ea449e6 100644 --- a/dotnet/tests/Microsoft.Agents.AI.AzureAI.UnitTests/AzureAIProjectChatClientExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.AzureAI.UnitTests/AzureAIProjectChatClientExtensionsTests.cs @@ -180,7 +180,7 @@ public void AsAIAgent_WithAgentVersion_WithClientFactory_AppliesFactoryCorrectly } /// - /// Verify that GetAIAgent with requireInvocableTools=true enforces invocable tools. + /// Verify that AsAIAgent with requireInvocableTools=true enforces invocable tools. /// [Fact] public void AsAIAgent_WithAgentVersion_WithRequireInvocableToolsTrue_EnforcesInvocableTools() @@ -202,7 +202,7 @@ public void AsAIAgent_WithAgentVersion_WithRequireInvocableToolsTrue_EnforcesInv } /// - /// Verify that GetAIAgent with requireInvocableTools=false allows declarative functions. + /// Verify that AsAIAgent with requireInvocableTools=false allows declarative functions. /// [Fact] public void AsAIAgent_WithAgentVersion_WithRequireInvocableToolsFalse_AllowsDeclarativeFunctions() @@ -221,101 +221,6 @@ public void AsAIAgent_WithAgentVersion_WithRequireInvocableToolsFalse_AllowsDecl #endregion - #region GetAIAgent(AIProjectClient, ChatClientAgentOptions) Tests - - /// - /// Verify that GetAIAgent with ChatClientAgentOptions throws ArgumentNullException when client is null. - /// - [Fact] - public void GetAIAgent_WithOptions_WithNullClient_ThrowsArgumentNullException() - { - // Arrange - AIProjectClient? client = null; - var options = new ChatClientAgentOptions { Name = "test-agent" }; - - // Act & Assert - var exception = Assert.Throws(() => - client!.GetAIAgent(options)); - - Assert.Equal("aiProjectClient", exception.ParamName); - } - - /// - /// Verify that GetAIAgent with ChatClientAgentOptions throws ArgumentNullException when options is null. - /// - [Fact] - public void GetAIAgent_WithOptions_WithNullOptions_ThrowsArgumentNullException() - { - // Arrange - var mockClient = new Mock(); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.GetAIAgent((ChatClientAgentOptions)null!)); - - Assert.Equal("options", exception.ParamName); - } - - /// - /// Verify that GetAIAgent with ChatClientAgentOptions throws ArgumentException when options.Name is null. - /// - [Fact] - public void GetAIAgent_WithOptions_WithoutName_ThrowsArgumentException() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(); - var options = new ChatClientAgentOptions(); - - // Act & Assert - var exception = Assert.Throws(() => - client.GetAIAgent(options)); - - Assert.Contains("Agent name must be provided", exception.Message); - } - - /// - /// Verify that GetAIAgent with ChatClientAgentOptions creates a valid agent. - /// - [Fact] - public void GetAIAgent_WithOptions_CreatesValidAgent() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent"); - var options = new ChatClientAgentOptions { Name = "test-agent" }; - - // Act - var agent = client.GetAIAgent(options); - - // Assert - Assert.NotNull(agent); - Assert.Equal("test-agent", agent.Name); - } - - /// - /// Verify that GetAIAgent with ChatClientAgentOptions and clientFactory applies the factory. - /// - [Fact] - public void GetAIAgent_WithOptions_WithClientFactory_AppliesFactoryCorrectly() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent"); - var options = new ChatClientAgentOptions { Name = "test-agent" }; - TestChatClient? testChatClient = null; - - // Act - var agent = client.GetAIAgent( - options, - clientFactory: (innerClient) => testChatClient = new TestChatClient(innerClient)); - - // Assert - Assert.NotNull(agent); - var retrievedTestClient = agent.GetService(); - Assert.NotNull(retrievedTestClient); - Assert.Same(testChatClient, retrievedTestClient); - } - - #endregion - #region GetAIAgentAsync(AIProjectClient, ChatClientAgentOptions) Tests /// @@ -371,20 +276,20 @@ public async Task GetAIAgentAsync_WithOptions_CreatesValidAgentAsync() #endregion - #region GetAIAgent(AIProjectClient, string) Tests + #region AsAIAgent(AIProjectClient, string) Tests /// /// Verify that AsAIAgent throws ArgumentNullException when AIProjectClient is null. /// [Fact] - public void GetAIAgent_ByName_WithNullClient_ThrowsArgumentNullException() + public void AsAIAgent_ByName_WithNullClient_ThrowsArgumentNullException() { // Arrange AIProjectClient? client = null; // Act & Assert var exception = Assert.Throws(() => - client!.GetAIAgent("test-agent")); + client!.AsAIAgent("test-agent")); Assert.Equal("aiProjectClient", exception.ParamName); } @@ -393,14 +298,14 @@ public void GetAIAgent_ByName_WithNullClient_ThrowsArgumentNullException() /// Verify that AsAIAgent throws ArgumentNullException when name is null. /// [Fact] - public void GetAIAgent_ByName_WithNullName_ThrowsArgumentNullException() + public void AsAIAgent_ByName_WithNullName_ThrowsArgumentNullException() { // Arrange var mockClient = new Mock(); // Act & Assert var exception = Assert.Throws(() => - mockClient.Object.GetAIAgent((string)null!)); + mockClient.Object.AsAIAgent((string)null!)); Assert.Equal("name", exception.ParamName); } @@ -409,41 +314,18 @@ public void GetAIAgent_ByName_WithNullName_ThrowsArgumentNullException() /// Verify that AsAIAgent throws ArgumentException when name is empty. /// [Fact] - public void GetAIAgent_ByName_WithEmptyName_ThrowsArgumentException() + public void AsAIAgent_ByName_WithEmptyName_ThrowsArgumentException() { // Arrange var mockClient = new Mock(); // Act & Assert var exception = Assert.Throws(() => - mockClient.Object.GetAIAgent(string.Empty)); + mockClient.Object.AsAIAgent(string.Empty)); Assert.Equal("name", exception.ParamName); } - /// - /// Verify that AsAIAgent throws InvalidOperationException when agent is not found. - /// - [Fact] - public void GetAIAgent_ByName_WithNonExistentAgent_ThrowsInvalidOperationException() - { - // Arrange - var mockAgentOperations = new Mock(); - mockAgentOperations - .Setup(c => c.GetAgent(It.IsAny(), It.IsAny())) - .Returns(ClientResult.FromOptionalValue((AgentRecord)null!, new MockPipelineResponse(200, BinaryData.FromString("null")))); - - var mockClient = new Mock(); - mockClient.SetupGet(x => x.Agents).Returns(mockAgentOperations.Object); - mockClient.Setup(x => x.GetConnection(It.IsAny())).Returns(new ClientConnection("fake-connection-id", "http://localhost", ClientPipeline.Create(), CredentialKind.None)); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.GetAIAgent("non-existent-agent")); - - Assert.Contains("not found", exception.Message); - } - #endregion #region GetAIAgentAsync(AIProjectClient, string) Tests @@ -578,220 +460,6 @@ public async Task GetAIAgentAsync_WithNameAndTools_CreatesAgentAsync() Assert.IsType(agent); } - #endregion - - #region CreateAIAgent(AIProjectClient, string, string) Tests - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when AIProjectClient is null. - /// - [Fact] - public void CreateAIAgent_WithBasicParams_WithNullClient_ThrowsArgumentNullException() - { - // Arrange - AIProjectClient? client = null; - - // Act & Assert - var exception = Assert.Throws(() => - client!.CreateAIAgent("test-agent", "model", "instructions")); - - Assert.Equal("aiProjectClient", exception.ParamName); - } - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when name is null. - /// - [Fact] - public void CreateAIAgent_WithBasicParams_WithNullName_ThrowsArgumentNullException() - { - // Arrange - var mockClient = new Mock(); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.CreateAIAgent(null!, "model", "instructions")); - - Assert.Equal("name", exception.ParamName); - } - - #endregion - - #region CreateAIAgent(AIProjectClient, string, AgentDefinition) Tests - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when AIProjectClient is null. - /// - [Fact] - public void CreateAIAgent_WithAgentDefinition_WithNullClient_ThrowsArgumentNullException() - { - // Arrange - AIProjectClient? client = null; - var definition = new PromptAgentDefinition("test-model"); - var options = new AgentVersionCreationOptions(definition); - - // Act & Assert - var exception = Assert.Throws(() => - client!.CreateAIAgent("test-agent", options)); - - Assert.Equal("aiProjectClient", exception.ParamName); - } - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when name is null. - /// - [Fact] - public void CreateAIAgent_WithAgentDefinition_WithNullName_ThrowsArgumentNullException() - { - // Arrange - var mockClient = new Mock(); - var definition = new PromptAgentDefinition("test-model"); - var options = new AgentVersionCreationOptions(definition); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.CreateAIAgent(null!, options)); - - Assert.Equal("name", exception.ParamName); - } - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when creationOptions is null. - /// - [Fact] - public void CreateAIAgent_WithAgentDefinition_WithNullDefinition_ThrowsArgumentNullException() - { - // Arrange - var mockClient = new Mock(); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.CreateAIAgent("test-agent", (AgentVersionCreationOptions)null!)); - - Assert.Equal("creationOptions", exception.ParamName); - } - - #endregion - - #region CreateAIAgent(AIProjectClient, ChatClientAgentOptions, string) Tests - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when AIProjectClient is null. - /// - [Fact] - public void CreateAIAgent_WithOptions_WithNullClient_ThrowsArgumentNullException() - { - // Arrange - AIProjectClient? client = null; - var options = new ChatClientAgentOptions { Name = "test-agent" }; - - // Act & Assert - var exception = Assert.Throws(() => - client!.CreateAIAgent("model", options)); - - Assert.Equal("aiProjectClient", exception.ParamName); - } - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when options is null. - /// - [Fact] - public void CreateAIAgent_WithOptions_WithNullOptions_ThrowsArgumentNullException() - { - // Arrange - var mockClient = new Mock(); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.CreateAIAgent("model", (ChatClientAgentOptions)null!)); - - Assert.Equal("options", exception.ParamName); - } - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when model is null. - /// - [Fact] - public void CreateAIAgent_WithOptions_WithNullModel_ThrowsArgumentNullException() - { - // Arrange - var mockClient = new Mock(); - var options = new ChatClientAgentOptions { Name = "test-agent" }; - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.CreateAIAgent(null!, options)); - - Assert.Equal("model", exception.ParamName); - } - - /// - /// Verify that CreateAIAgent throws ArgumentNullException when options.Name is null. - /// - [Fact] - public void CreateAIAgent_WithOptions_WithoutName_ThrowsException() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(); - var options = new ChatClientAgentOptions(); - - // Act & Assert - var exception = Assert.Throws(() => - client.CreateAIAgent("test-model", options)); - - Assert.Contains("Agent name must be provided", exception.Message); - } - - /// - /// Verify that CreateAIAgent with model and options creates a valid agent. - /// - [Fact] - public void CreateAIAgent_WithModelAndOptions_CreatesValidAgent() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent", instructions: "Test instructions"); - var options = new ChatClientAgentOptions - { - Name = "test-agent", - ChatOptions = new() { Instructions = "Test instructions" } - }; - - // Act - var agent = client.CreateAIAgent("test-model", options); - - // Assert - Assert.NotNull(agent); - Assert.Equal("test-agent", agent.Name); - Assert.Equal("Test instructions", agent.Instructions); - } - - /// - /// Verify that CreateAIAgent with model and options and clientFactory applies the factory. - /// - [Fact] - public void CreateAIAgent_WithModelAndOptions_WithClientFactory_AppliesFactoryCorrectly() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent", instructions: "Test instructions"); - var options = new ChatClientAgentOptions - { - Name = "test-agent", - ChatOptions = new() { Instructions = "Test instructions" } - }; - TestChatClient? testChatClient = null; - - // Act - var agent = client.CreateAIAgent( - "test-model", - options, - clientFactory: (innerClient) => testChatClient = new TestChatClient(innerClient)); - - // Assert - Assert.NotNull(agent); - var retrievedTestClient = agent.GetService(); - Assert.NotNull(retrievedTestClient); - Assert.Same(testChatClient, retrievedTestClient); - } - /// /// Verify that CreateAIAgentAsync with model and options creates a valid agent. /// @@ -889,7 +557,7 @@ public async Task CreateAIAgentAsync_WithAgentDefinition_WithNullDefinition_Thro /// Verify that CreateAIAgent creates an agent successfully. /// [Fact] - public void CreateAIAgent_WithDefinition_CreatesAgentSuccessfully() + public async Task CreateAIAgentAsync_WithDefinition_CreatesAgentSuccessfullyAsync() { // Arrange AIProjectClient client = this.CreateTestAgentClient(); @@ -897,7 +565,7 @@ public void CreateAIAgent_WithDefinition_CreatesAgentSuccessfully() var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent("test-agent", options); + var agent = await client.CreateAIAgentAsync("test-agent", options); // Assert Assert.NotNull(agent); @@ -908,7 +576,7 @@ public void CreateAIAgent_WithDefinition_CreatesAgentSuccessfully() /// Verify that CreateAIAgent without tools parameter creates an agent successfully. /// [Fact] - public void CreateAIAgent_WithoutToolsParameter_CreatesAgentSuccessfully() + public async Task CreateAIAgentAsync_WithoutToolsParameter_CreatesAgentSuccessfullyAsync() { // Arrange var definition = new PromptAgentDefinition("test-model") { Instructions = "Test" }; @@ -919,7 +587,7 @@ public void CreateAIAgent_WithoutToolsParameter_CreatesAgentSuccessfully() var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent("test-agent", options); + var agent = await client.CreateAIAgentAsync("test-agent", options); // Assert Assert.NotNull(agent); @@ -930,7 +598,7 @@ public void CreateAIAgent_WithoutToolsParameter_CreatesAgentSuccessfully() /// Verify that CreateAIAgent without tools in definition creates an agent successfully. /// [Fact] - public void CreateAIAgent_WithoutToolsInDefinition_CreatesAgentSuccessfully() + public async Task CreateAIAgentAsync_WithoutToolsInDefinition_CreatesAgentSuccessfullyAsync() { // Arrange var definition = new PromptAgentDefinition("test-model") { Instructions = "Test" }; @@ -939,7 +607,7 @@ public void CreateAIAgent_WithoutToolsInDefinition_CreatesAgentSuccessfully() var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent("test-agent", options); + var agent = await client.CreateAIAgentAsync("test-agent", options); // Assert Assert.NotNull(agent); @@ -950,7 +618,7 @@ public void CreateAIAgent_WithoutToolsInDefinition_CreatesAgentSuccessfully() /// Verify that CreateAIAgent uses tools from the definition when no separate tools parameter is provided. /// [Fact] - public void CreateAIAgent_WithDefinitionTools_UsesDefinitionTools() + public async Task CreateAIAgentAsync_WithDefinitionTools_UsesDefinitionToolsAsync() { // Arrange var definition = new PromptAgentDefinition("test-model") { Instructions = "Test" }; @@ -965,7 +633,7 @@ public void CreateAIAgent_WithDefinitionTools_UsesDefinitionTools() var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent("test-agent", options); + var agent = await client.CreateAIAgentAsync("test-agent", options); // Assert Assert.NotNull(agent); @@ -981,59 +649,55 @@ public void CreateAIAgent_WithDefinitionTools_UsesDefinitionTools() } /// - /// Verify that CreateAIAgentAsync when AI Tools are provided, uses them for the definition via http request. + /// Verify that CreateAIAgent creates an agent successfully when definition has a mix of custom and hosted tools. /// [Fact] - public async Task CreateAIAgentAsync_WithNameAndAITools_SendsToolDefinitionViaHttpAsync() + public async Task CreateAIAgentAsync_WithMixedToolsInDefinition_CreatesAgentSuccessfullyAsync() { // Arrange - using var httpHandler = new HttpHandlerAssert(async (request) => - { - if (request.Content is not null) - { - var requestBody = await request.Content.ReadAsStringAsync().ConfigureAwait(false); - - Assert.Contains("required_tool", requestBody); - } + var definition = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; + definition.Tools.Add(ResponseTool.CreateFunctionTool("create_tool", BinaryData.FromString("{}"), strictModeEnabled: false)); + definition.Tools.Add(new HostedWebSearchTool().GetService() ?? new HostedWebSearchTool().AsOpenAIResponseTool()); + definition.Tools.Add(new HostedFileSearchTool().GetService() ?? new HostedFileSearchTool().AsOpenAIResponseTool()); - return new HttpResponseMessage(HttpStatusCode.OK) { Content = new StringContent(TestDataUtil.GetAgentVersionResponseJson(), Encoding.UTF8, "application/json") }; - }); + // Simulate agent definition response with the tools + var definitionResponse = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; + foreach (var tool in definition.Tools) + { + definitionResponse.Tools.Add(tool); + } -#pragma warning disable CA5399 - using var httpClient = new HttpClient(httpHandler); -#pragma warning restore CA5399 + AIProjectClient client = this.CreateTestAgentClient(agentDefinitionResponse: definitionResponse); - var client = new AIProjectClient(new Uri("https://test.openai.azure.com/"), new FakeAuthenticationTokenProvider(), new() { Transport = new HttpClientPipelineTransport(httpClient) }); + var options = new AgentVersionCreationOptions(definition); // Act - var agent = await client.CreateAIAgentAsync( - name: "test-agent", - model: "test-model", - instructions: "Test", - tools: [AIFunctionFactory.Create(() => true, "required_tool")]); + var agent = await client.CreateAIAgentAsync("test-agent", options); // Assert Assert.NotNull(agent); Assert.IsType(agent); var agentVersion = agent.GetService(); Assert.NotNull(agentVersion); - Assert.IsType(agentVersion.Definition); + if (agentVersion.Definition is PromptAgentDefinition promptDef) + { + Assert.NotEmpty(promptDef.Tools); + Assert.Equal(3, promptDef.Tools.Count); + } } /// - /// Verify that CreateAIAgent when AI Tools are provided, uses them for the definition via http request. + /// Verify that CreateAIAgentAsync when AI Tools are provided, uses them for the definition via http request. /// [Fact] - public void CreateAIAgent_WithNameAndAITools_SendsToolDefinitionViaHttp() + public async Task CreateAIAgentAsync_WithNameAndAITools_SendsToolDefinitionViaHttpAsync() { // Arrange - using var httpHandler = new HttpHandlerAssert((request) => + using var httpHandler = new HttpHandlerAssert(async (request) => { if (request.Content is not null) { -#pragma warning disable VSTHRD002 // Avoid problematic synchronous waits - var requestBody = request.Content.ReadAsStringAsync().GetAwaiter().GetResult(); -#pragma warning restore VSTHRD002 // Avoid problematic synchronous waits + var requestBody = await request.Content.ReadAsStringAsync().ConfigureAwait(false); Assert.Contains("required_tool", requestBody); } @@ -1048,7 +712,7 @@ public void CreateAIAgent_WithNameAndAITools_SendsToolDefinitionViaHttp() var client = new AIProjectClient(new Uri("https://test.openai.azure.com/"), new FakeAuthenticationTokenProvider(), new() { Transport = new HttpClientPipelineTransport(httpClient) }); // Act - var agent = client.CreateAIAgent( + var agent = await client.CreateAIAgentAsync( name: "test-agent", model: "test-model", instructions: "Test", @@ -1063,32 +727,10 @@ public void CreateAIAgent_WithNameAndAITools_SendsToolDefinitionViaHttp() } /// - /// Verify that CreateAIAgent without tools creates an agent successfully. - /// - [Fact] - public void CreateAIAgent_WithoutTools_CreatesAgentSuccessfully() - { - // Arrange - var definition = new PromptAgentDefinition("test-model"); - - var agentDefinitionResponse = GeneratePromptDefinitionResponse(definition, null); - AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent", agentDefinitionResponse: agentDefinitionResponse); - - var options = new AgentVersionCreationOptions(definition); - - // Act - var agent = client.CreateAIAgent("test-agent", options); - - // Assert - Assert.NotNull(agent); - Assert.IsType(agent); - } - - /// - /// Verify that when providing AITools with GetAIAgent, any additional tool that doesn't match the tools in agent definition are ignored. + /// Verify that when providing AITools with AsAIAgent, any additional tool that doesn't match the tools in agent definition are ignored. /// [Fact] - public void GetAIAgent_AdditionalAITools_WhenNotInTheDefinitionAreIgnored() + public void AsAIAgent_AdditionalAITools_WhenNotInTheDefinitionAreIgnored() { // Arrange AIProjectClient client = this.CreateTestAgentClient(); @@ -1121,158 +763,37 @@ public void GetAIAgent_AdditionalAITools_WhenNotInTheDefinitionAreIgnored() #region Inline Tools vs Parameter Tools Tests /// - /// Verify that tools passed as parameters are accepted by GetAIAgent. + /// Verify that tools passed as parameters are accepted by AsAIAgent. /// - [Fact] - public void GetAIAgent_WithParameterTools_AcceptsTools() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(); - AgentRecord agentRecord = this.CreateTestAgentRecord(); - var tools = new List - { - AIFunctionFactory.Create(() => "tool1", "param_tool_1", "First parameter tool"), - AIFunctionFactory.Create(() => "tool2", "param_tool_2", "Second parameter tool") - }; - - // Act - var agent = client.AsAIAgent(agentRecord, tools: tools); - - // Assert - Assert.NotNull(agent); - Assert.IsType(agent); - var chatClient = agent.GetService(); - Assert.NotNull(chatClient); - var agentVersion = chatClient.GetService(); - Assert.NotNull(agentVersion); - } - - /// - /// Verify that CreateAIAgent with tools in definition creates an agent successfully. - /// - [Fact] - public void CreateAIAgent_WithDefinitionTools_CreatesAgentSuccessfully() - { - // Arrange - var definition = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; - definition.Tools.Add(ResponseTool.CreateFunctionTool("create_tool", BinaryData.FromString("{}"), strictModeEnabled: false)); - - // Simulate agent definition response with the tools - var definitionResponse = GeneratePromptDefinitionResponse(definition, definition.Tools.Select(t => t.AsAITool()).ToList()); - - AIProjectClient client = this.CreateTestAgentClient(agentDefinitionResponse: definitionResponse); - - var options = new AgentVersionCreationOptions(definition); - - // Act - var agent = client.CreateAIAgent("test-agent", options); - - // Assert - Assert.NotNull(agent); - Assert.IsType(agent); - var agentVersion = agent.GetService(); - Assert.NotNull(agentVersion); - if (agentVersion.Definition is PromptAgentDefinition promptDef) - { - Assert.NotEmpty(promptDef.Tools); - Assert.Single(promptDef.Tools); - } - } - - /// - /// Verify that CreateAIAgent creates an agent successfully when definition has a mix of custom and hosted tools. - /// - [Fact] - public void CreateAIAgent_WithMixedToolsInDefinition_CreatesAgentSuccessfully() - { - // Arrange - var definition = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; - definition.Tools.Add(ResponseTool.CreateFunctionTool("create_tool", BinaryData.FromString("{}"), strictModeEnabled: false)); - definition.Tools.Add(new HostedWebSearchTool().GetService() ?? new HostedWebSearchTool().AsOpenAIResponseTool()); - definition.Tools.Add(new HostedFileSearchTool().GetService() ?? new HostedFileSearchTool().AsOpenAIResponseTool()); - - // Simulate agent definition response with the tools - var definitionResponse = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; - foreach (var tool in definition.Tools) - { - definitionResponse.Tools.Add(tool); - } - - AIProjectClient client = this.CreateTestAgentClient(agentDefinitionResponse: definitionResponse); - - var options = new AgentVersionCreationOptions(definition); - - // Act - var agent = client.CreateAIAgent("test-agent", options); - - // Assert - Assert.NotNull(agent); - Assert.IsType(agent); - var agentVersion = agent.GetService(); - Assert.NotNull(agentVersion); - if (agentVersion.Definition is PromptAgentDefinition promptDef) - { - Assert.NotEmpty(promptDef.Tools); - Assert.Equal(3, promptDef.Tools.Count); - } - } - - /// - /// Verifies that CreateAIAgent uses tools from definition when they are ResponseTool instances, resulting in successful agent creation. - /// - [Fact] - public void CreateAIAgent_WithResponseToolsInDefinition_CreatesAgentSuccessfully() - { - // Arrange - var definition = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; - - var fabricToolOptions = new FabricDataAgentToolOptions(); - fabricToolOptions.ProjectConnections.Add(new ToolProjectConnection("connection-id")); - - var sharepointOptions = new SharePointGroundingToolOptions(); - sharepointOptions.ProjectConnections.Add(new ToolProjectConnection("connection-id")); - - var structuredOutputs = new StructuredOutputDefinition("name", "description", BinaryData.FromString(AIJsonUtilities.CreateJsonSchema(new { id = "test" }.GetType()).ToString()), false); - - // Add tools to the definition - definition.Tools.Add(ResponseTool.CreateFunctionTool("create_tool", BinaryData.FromString("{}"), strictModeEnabled: false)); - definition.Tools.Add((ResponseTool)AgentTool.CreateBingCustomSearchTool(new BingCustomSearchToolParameters([new BingCustomSearchConfiguration("connection-id", "instance-name")]))); - definition.Tools.Add((ResponseTool)AgentTool.CreateBrowserAutomationTool(new BrowserAutomationToolParameters(new BrowserAutomationToolConnectionParameters("id")))); - definition.Tools.Add(AgentTool.CreateA2ATool(new Uri("https://test-uri.microsoft.com"))); - definition.Tools.Add((ResponseTool)AgentTool.CreateBingGroundingTool(new BingGroundingSearchToolOptions([new BingGroundingSearchConfiguration("connection-id")]))); - definition.Tools.Add((ResponseTool)AgentTool.CreateMicrosoftFabricTool(fabricToolOptions)); - definition.Tools.Add((ResponseTool)AgentTool.CreateOpenApiTool(new OpenAPIFunctionDefinition("name", BinaryData.FromString(OpenAPISpec), new OpenAPIAnonymousAuthenticationDetails()))); - definition.Tools.Add((ResponseTool)AgentTool.CreateSharepointTool(sharepointOptions)); - definition.Tools.Add((ResponseTool)AgentTool.CreateStructuredOutputsTool(structuredOutputs)); - definition.Tools.Add((ResponseTool)AgentTool.CreateAzureAISearchTool(new AzureAISearchToolOptions([new AzureAISearchToolIndex() { IndexName = "name" }]))); - - // Generate agent definition response with the tools - var definitionResponse = GeneratePromptDefinitionResponse(definition, definition.Tools.Select(t => t.AsAITool()).ToList()); - - AIProjectClient client = this.CreateTestAgentClient(agentDefinitionResponse: definitionResponse); - - var options = new AgentVersionCreationOptions(definition); + [Fact] + public void AsAIAgent_WithParameterTools_AcceptsTools() + { + // Arrange + AIProjectClient client = this.CreateTestAgentClient(); + AgentRecord agentRecord = this.CreateTestAgentRecord(); + var tools = new List + { + AIFunctionFactory.Create(() => "tool1", "param_tool_1", "First parameter tool"), + AIFunctionFactory.Create(() => "tool2", "param_tool_2", "Second parameter tool") + }; // Act - var agent = client.CreateAIAgent("test-agent", options); + var agent = client.AsAIAgent(agentRecord, tools: tools); // Assert Assert.NotNull(agent); Assert.IsType(agent); - var agentVersion = agent.GetService(); + var chatClient = agent.GetService(); + Assert.NotNull(chatClient); + var agentVersion = chatClient.GetService(); Assert.NotNull(agentVersion); - if (agentVersion.Definition is PromptAgentDefinition promptDef) - { - Assert.NotEmpty(promptDef.Tools); - Assert.Equal(10, promptDef.Tools.Count); - } } /// /// Verify that CreateAIAgent with string parameters and tools creates an agent. /// [Fact] - public void CreateAIAgent_WithStringParamsAndTools_CreatesAgent() + public async Task CreateAIAgentAsync_WithStringParamsAndTools_CreatesAgentAsync() { // Arrange var tools = new List @@ -1285,7 +806,7 @@ public void CreateAIAgent_WithStringParamsAndTools_CreatesAgent() AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent", agentDefinitionResponse: definitionResponse); // Act - var agent = client.CreateAIAgent( + var agent = await client.CreateAIAgentAsync( "test-agent", "test-model", "Test instructions", @@ -1350,97 +871,54 @@ public async Task GetAIAgentAsync_WithToolsParameter_CreatesAgentAsync() #region Declarative Function Handling Tests /// - /// Verify that CreateAIAgent accepts declarative functions from definition. - /// - [Fact] - public void CreateAIAgent_WithDeclarativeFunctionInDefinition_AcceptsDeclarativeFunction() - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(); - var definition = new PromptAgentDefinition("test-model") { Instructions = "Test" }; - - // Create a declarative function (not invocable) using AIFunctionFactory.CreateDeclaration - using var doc = JsonDocument.Parse("{}"); - var declarativeFunction = AIFunctionFactory.CreateDeclaration("test_function", "A test function", doc.RootElement); - - // Add to definition - definition.Tools.Add(declarativeFunction.AsOpenAIResponseTool() ?? throw new InvalidOperationException()); - - var options = new AgentVersionCreationOptions(definition); - - // Act - var agent = client.CreateAIAgent("test-agent", options); - - // Assert - Assert.NotNull(agent); - Assert.IsType(agent); - } - - /// - /// Verify that CreateAIAgent accepts declarative functions from definition. + /// Verifies that CreateAIAgent uses tools from definition when they are ResponseTool instances, resulting in successful agent creation. /// [Fact] - public void CreateAIAgent_WithDeclarativeFunctionFromDefinition_AcceptsDeclarativeFunction() + public async Task CreateAIAgentAsync_WithResponseToolsInDefinition_CreatesAgentSuccessfullyAsync() { // Arrange - var definition = new PromptAgentDefinition("test-model") { Instructions = "Test" }; - - // Create a declarative function (not invocable) using AIFunctionFactory.CreateDeclaration - using var doc = JsonDocument.Parse("{}"); - var declarativeFunction = AIFunctionFactory.CreateDeclaration("test_function", "A test function", doc.RootElement); - - // Add to definition - definition.Tools.Add(declarativeFunction.AsOpenAIResponseTool() ?? throw new InvalidOperationException()); - - // Generate response with the declarative function - var definitionResponse = new PromptAgentDefinition("test-model") { Instructions = "Test" }; - definitionResponse.Tools.Add(declarativeFunction.AsOpenAIResponseTool() ?? throw new InvalidOperationException()); - - AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent", agentDefinitionResponse: definitionResponse); - - var options = new AgentVersionCreationOptions(definition); + var definition = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; - // Act - var agent = client.CreateAIAgent("test-agent", options); + var fabricToolOptions = new FabricDataAgentToolOptions(); + fabricToolOptions.ProjectConnections.Add(new ToolProjectConnection("connection-id")); - // Assert - Assert.NotNull(agent); - Assert.IsType(agent); - } + var sharepointOptions = new SharePointGroundingToolOptions(); + sharepointOptions.ProjectConnections.Add(new ToolProjectConnection("connection-id")); - /// - /// Verify that CreateAIAgent accepts FunctionTools from definition. - /// - [Fact] - public void CreateAIAgent_WithFunctionToolsInDefinition_AcceptsDeclarativeFunction() - { - // Arrange - var functionTool = ResponseTool.CreateFunctionTool( - functionName: "get_user_name", - functionParameters: BinaryData.FromString("{}"), - strictModeEnabled: false, - functionDescription: "Gets the user's name, as used for friendly address." - ); + var structuredOutputs = new StructuredOutputDefinition("name", "description", BinaryData.FromString(AIJsonUtilities.CreateJsonSchema(new { id = "test" }.GetType()).ToString()), false); - var definition = new PromptAgentDefinition("test-model") { Instructions = "Test" }; - definition.Tools.Add(functionTool); + // Add tools to the definition + definition.Tools.Add(ResponseTool.CreateFunctionTool("create_tool", BinaryData.FromString("{}"), strictModeEnabled: false)); + definition.Tools.Add((ResponseTool)AgentTool.CreateBingCustomSearchTool(new BingCustomSearchToolParameters([new BingCustomSearchConfiguration("connection-id", "instance-name")]))); + definition.Tools.Add((ResponseTool)AgentTool.CreateBrowserAutomationTool(new BrowserAutomationToolParameters(new BrowserAutomationToolConnectionParameters("id")))); + definition.Tools.Add(AgentTool.CreateA2ATool(new Uri("https://test-uri.microsoft.com"))); + definition.Tools.Add((ResponseTool)AgentTool.CreateBingGroundingTool(new BingGroundingSearchToolOptions([new BingGroundingSearchConfiguration("connection-id")]))); + definition.Tools.Add((ResponseTool)AgentTool.CreateMicrosoftFabricTool(fabricToolOptions)); + definition.Tools.Add((ResponseTool)AgentTool.CreateOpenApiTool(new OpenAPIFunctionDefinition("name", BinaryData.FromString(OpenAPISpec), new OpenAPIAnonymousAuthenticationDetails()))); + definition.Tools.Add((ResponseTool)AgentTool.CreateSharepointTool(sharepointOptions)); + definition.Tools.Add((ResponseTool)AgentTool.CreateStructuredOutputsTool(structuredOutputs)); + definition.Tools.Add((ResponseTool)AgentTool.CreateAzureAISearchTool(new AzureAISearchToolOptions([new AzureAISearchToolIndex() { IndexName = "name" }]))); - // Generate response with the declarative function - var definitionResponse = new PromptAgentDefinition("test-model") { Instructions = "Test" }; - definitionResponse.Tools.Add(functionTool); + // Generate agent definition response with the tools + var definitionResponse = GeneratePromptDefinitionResponse(definition, definition.Tools.Select(t => t.AsAITool()).ToList()); - AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent", agentDefinitionResponse: definitionResponse); + AIProjectClient client = this.CreateTestAgentClient(agentDefinitionResponse: definitionResponse); var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent("test-agent", options); + var agent = await client.CreateAIAgentAsync("test-agent", options); // Assert Assert.NotNull(agent); Assert.IsType(agent); - var definitionFromAgent = Assert.IsType(agent.GetService()?.Definition); - Assert.Single(definitionFromAgent.Tools); + var agentVersion = agent.GetService(); + Assert.NotNull(agentVersion); + if (agentVersion.Definition is PromptAgentDefinition promptDef) + { + Assert.NotEmpty(promptDef.Tools); + Assert.Equal(10, promptDef.Tools.Count); + } } /// @@ -1543,7 +1021,7 @@ public async Task CreateAIAgentAsync_WithDeclarativeFunctionInDefinition_Accepts /// Verify that ChatClientAgentOptions are generated correctly without tools. /// [Fact] - public void CreateAIAgent_GeneratesCorrectChatClientAgentOptions() + public async Task CreateAIAgentAsync_GeneratesCorrectChatClientAgentOptionsAsync() { // Arrange var definition = new PromptAgentDefinition("test-model") { Instructions = "Test instructions" }; @@ -1554,7 +1032,7 @@ public void CreateAIAgent_GeneratesCorrectChatClientAgentOptions() var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent("test-agent", options); + var agent = await client.CreateAIAgentAsync("test-agent", options); // Assert Assert.NotNull(agent); @@ -1565,10 +1043,10 @@ public void CreateAIAgent_GeneratesCorrectChatClientAgentOptions() } /// - /// Verify that ChatClientAgentOptions preserve custom properties from input options. + /// Verify that GetAIAgentAsync with options preserves custom properties from input options. /// [Fact] - public void GetAIAgent_WithOptions_PreservesCustomProperties() + public async Task GetAIAgentAsync_WithOptions_PreservesCustomPropertiesAsync() { // Arrange AIProjectClient client = this.CreateTestAgentClient(agentName: "test-agent", instructions: "Custom instructions", description: "Custom description"); @@ -1580,7 +1058,7 @@ public void GetAIAgent_WithOptions_PreservesCustomProperties() }; // Act - var agent = client.GetAIAgent(options); + var agent = await client.GetAIAgentAsync(options); // Assert Assert.NotNull(agent); @@ -1590,10 +1068,10 @@ public void GetAIAgent_WithOptions_PreservesCustomProperties() } /// - /// Verify that CreateAIAgent with options generates correct ChatClientAgentOptions with tools. + /// Verify that CreateAIAgentAsync with options and tools generates correct ChatClientAgentOptions. /// [Fact] - public void CreateAIAgent_WithOptionsAndTools_GeneratesCorrectOptions() + public async Task CreateAIAgentAsync_WithOptionsAndTools_GeneratesCorrectOptionsAsync() { // Arrange var tools = new List @@ -1614,7 +1092,7 @@ public void CreateAIAgent_WithOptionsAndTools_GeneratesCorrectOptions() }; // Act - var agent = client.CreateAIAgent("test-model", options); + var agent = await client.CreateAIAgentAsync("test-model", options); // Assert Assert.NotNull(agent); @@ -1636,14 +1114,14 @@ public void CreateAIAgent_WithOptionsAndTools_GeneratesCorrectOptions() /// [Theory] [MemberData(nameof(InvalidAgentNameTestData.GetInvalidAgentNames), MemberType = typeof(InvalidAgentNameTestData))] - public void GetAIAgent_ByName_WithInvalidAgentName_ThrowsArgumentException(string invalidName) + public void AsAIAgent_ByName_WithInvalidAgentName_ThrowsArgumentException(string invalidName) { // Arrange var mockClient = new Mock(); // Act & Assert var exception = Assert.Throws(() => - mockClient.Object.GetAIAgent(invalidName)); + mockClient.Object.AsAIAgent(invalidName)); Assert.Equal("name", exception.ParamName); Assert.Contains("Agent name must be 1-63 characters long", exception.Message); @@ -1667,25 +1145,6 @@ public async Task GetAIAgentAsync_ByName_WithInvalidAgentName_ThrowsArgumentExce Assert.Contains("Agent name must be 1-63 characters long", exception.Message); } - /// - /// Verify that GetAIAgent with ChatClientAgentOptions throws ArgumentException when agent name is invalid. - /// - [Theory] - [MemberData(nameof(InvalidAgentNameTestData.GetInvalidAgentNames), MemberType = typeof(InvalidAgentNameTestData))] - public void GetAIAgent_WithOptions_WithInvalidAgentName_ThrowsArgumentException(string invalidName) - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(); - var options = new ChatClientAgentOptions { Name = invalidName }; - - // Act & Assert - var exception = Assert.Throws(() => - client.GetAIAgent(options)); - - Assert.Equal("name", exception.ParamName); - Assert.Contains("Agent name must be 1-63 characters long", exception.Message); - } - /// /// Verify that GetAIAgentAsync with ChatClientAgentOptions throws ArgumentException when agent name is invalid. /// @@ -1705,24 +1164,6 @@ public async Task GetAIAgentAsync_WithOptions_WithInvalidAgentName_ThrowsArgumen Assert.Contains("Agent name must be 1-63 characters long", exception.Message); } - /// - /// Verify that CreateAIAgent throws ArgumentException when agent name is invalid. - /// - [Theory] - [MemberData(nameof(InvalidAgentNameTestData.GetInvalidAgentNames), MemberType = typeof(InvalidAgentNameTestData))] - public void CreateAIAgent_WithBasicParams_WithInvalidAgentName_ThrowsArgumentException(string invalidName) - { - // Arrange - var mockClient = new Mock(); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.CreateAIAgent(invalidName, "model", "instructions")); - - Assert.Equal("name", exception.ParamName); - Assert.Contains("Agent name must be 1-63 characters long", exception.Message); - } - /// /// Verify that CreateAIAgentAsync throws ArgumentException when agent name is invalid. /// @@ -1741,26 +1182,6 @@ public async Task CreateAIAgentAsync_WithBasicParams_WithInvalidAgentName_Throws Assert.Contains("Agent name must be 1-63 characters long", exception.Message); } - /// - /// Verify that CreateAIAgent with AgentVersionCreationOptions throws ArgumentException when agent name is invalid. - /// - [Theory] - [MemberData(nameof(InvalidAgentNameTestData.GetInvalidAgentNames), MemberType = typeof(InvalidAgentNameTestData))] - public void CreateAIAgent_WithAgentDefinition_WithInvalidAgentName_ThrowsArgumentException(string invalidName) - { - // Arrange - var mockClient = new Mock(); - var definition = new PromptAgentDefinition("test-model"); - var options = new AgentVersionCreationOptions(definition); - - // Act & Assert - var exception = Assert.Throws(() => - mockClient.Object.CreateAIAgent(invalidName, options)); - - Assert.Equal("name", exception.ParamName); - Assert.Contains("Agent name must be 1-63 characters long", exception.Message); - } - /// /// Verify that CreateAIAgentAsync with AgentVersionCreationOptions throws ArgumentException when agent name is invalid. /// @@ -1781,25 +1202,6 @@ public async Task CreateAIAgentAsync_WithAgentDefinition_WithInvalidAgentName_Th Assert.Contains("Agent name must be 1-63 characters long", exception.Message); } - /// - /// Verify that CreateAIAgent with ChatClientAgentOptions throws ArgumentException when agent name is invalid. - /// - [Theory] - [MemberData(nameof(InvalidAgentNameTestData.GetInvalidAgentNames), MemberType = typeof(InvalidAgentNameTestData))] - public void CreateAIAgent_WithOptions_WithInvalidAgentName_ThrowsArgumentException(string invalidName) - { - // Arrange - AIProjectClient client = this.CreateTestAgentClient(); - var options = new ChatClientAgentOptions { Name = invalidName }; - - // Act & Assert - var exception = Assert.Throws(() => - client.CreateAIAgent("test-model", options)); - - Assert.Equal("name", exception.ParamName); - Assert.Contains("Agent name must be 1-63 characters long", exception.Message); - } - /// /// Verify that CreateAIAgentAsync with ChatClientAgentOptions throws ArgumentException when agent name is invalid. /// @@ -1820,11 +1222,11 @@ public async Task CreateAIAgentAsync_WithOptions_WithInvalidAgentName_ThrowsArgu } /// - /// Verify that GetAIAgent with AgentReference throws ArgumentException when agent name is invalid. + /// Verify that AsAIAgent with AgentReference throws ArgumentException when agent name is invalid. /// [Theory] [MemberData(nameof(InvalidAgentNameTestData.GetInvalidAgentNames), MemberType = typeof(InvalidAgentNameTestData))] - public void GetAIAgent_WithAgentReference_WithInvalidAgentName_ThrowsArgumentException(string invalidName) + public void AsAIAgent_WithAgentReference_WithInvalidAgentName_ThrowsArgumentException(string invalidName) { // Arrange var mockClient = new Mock(); @@ -1832,7 +1234,7 @@ public void GetAIAgent_WithAgentReference_WithInvalidAgentName_ThrowsArgumentExc // Act & Assert var exception = Assert.Throws(() => - mockClient.Object.GetAIAgent(agentReference)); + mockClient.Object.AsAIAgent(agentReference)); Assert.Equal("name", exception.ParamName); Assert.Contains("Agent name must be 1-63 characters long", exception.Message); @@ -1873,7 +1275,7 @@ public void AsAIAgent_WithClientFactory_WrapsUnderlyingChatClient() /// Verify that clientFactory is called with the correct underlying chat client. /// [Fact] - public void CreateAIAgent_WithClientFactory_ReceivesCorrectUnderlyingClient() + public async Task CreateAIAgentAsync_WithClientFactory_ReceivesCorrectUnderlyingClientAsync() { // Arrange AIProjectClient client = this.CreateTestAgentClient(); @@ -1883,7 +1285,7 @@ public void CreateAIAgent_WithClientFactory_ReceivesCorrectUnderlyingClient() var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent( + var agent = await client.CreateAIAgentAsync( "test-agent", options, clientFactory: (innerClient) => @@ -1932,7 +1334,7 @@ public void AsAIAgent_MultipleCallsWithClientFactory_CreatesIndependentClients() /// Verify that agent created with clientFactory maintains agent properties. /// [Fact] - public void CreateAIAgent_WithClientFactory_PreservesAgentProperties() + public async Task CreateAIAgentAsync_WithClientFactory_PreservesAgentPropertiesAsync() { // Arrange const string AgentName = "test-agent"; @@ -1941,7 +1343,7 @@ public void CreateAIAgent_WithClientFactory_PreservesAgentProperties() AIProjectClient client = this.CreateTestAgentClient(AgentName, Instructions); // Act - var agent = client.CreateAIAgent( + var agent = await client.CreateAIAgentAsync( AgentName, Model, Instructions, @@ -1959,7 +1361,7 @@ public void CreateAIAgent_WithClientFactory_PreservesAgentProperties() /// Verify that agent created with clientFactory is created successfully. /// [Fact] - public void CreateAIAgent_WithClientFactory_CreatesAgentSuccessfully() + public async Task CreateAIAgentAsync_WithClientFactory_CreatesAgentSuccessfullyAsync() { // Arrange var definition = new PromptAgentDefinition("test-model") { Instructions = "Test" }; @@ -1970,7 +1372,7 @@ public void CreateAIAgent_WithClientFactory_CreatesAgentSuccessfully() var options = new AgentVersionCreationOptions(definition); // Act - var agent = client.CreateAIAgent( + var agent = await client.CreateAIAgentAsync( "test-agent", options, clientFactory: (innerClient) => new TestChatClient(innerClient)); @@ -1987,122 +1389,11 @@ public void CreateAIAgent_WithClientFactory_CreatesAgentSuccessfully() #region User-Agent Header Tests - /// - /// Verify that GetAIAgent(string name) passes RequestOptions to the Protocol method. - /// - [Fact] - public void GetAIAgent_WithStringName_PassesRequestOptionsToProtocol() - { - // Arrange - RequestOptions? capturedRequestOptions = null; - - var mockAgentOperations = new Mock(); - mockAgentOperations - .Setup(x => x.GetAgent(It.IsAny(), It.IsAny())) - .Callback((name, options) => capturedRequestOptions = options) - .Returns(ClientResult.FromResponse(new MockPipelineResponse(200, BinaryData.FromString(TestDataUtil.GetAgentResponseJson())))); - - var mockAgentClient = new Mock(new Uri("https://test.openai.azure.com/"), new FakeAuthenticationTokenProvider()); - mockAgentClient.SetupGet(x => x.Agents).Returns(mockAgentOperations.Object); - mockAgentClient.Setup(x => x.GetConnection(It.IsAny())).Returns(new ClientConnection("fake-connection-id", "http://localhost", ClientPipeline.Create(), CredentialKind.None)); - - // Act - var agent = mockAgentClient.Object.GetAIAgent("test-agent"); - - // Assert - Assert.NotNull(agent); - Assert.NotNull(capturedRequestOptions); - } - - /// - /// Verify that GetAIAgentAsync(string name) passes RequestOptions to the Protocol method. - /// - [Fact] - public async Task GetAIAgentAsync_WithStringName_PassesRequestOptionsToProtocolAsync() - { - // Arrange - RequestOptions? capturedRequestOptions = null; - - var mockAgentOperations = new Mock(); - mockAgentOperations - .Setup(x => x.GetAgentAsync(It.IsAny(), It.IsAny())) - .Callback((name, options) => capturedRequestOptions = options) - .Returns(Task.FromResult(ClientResult.FromResponse(new MockPipelineResponse(200, BinaryData.FromString(TestDataUtil.GetAgentResponseJson()))))); - - var mockAgentClient = new Mock(new Uri("https://test.openai.azure.com/"), new FakeAuthenticationTokenProvider()); - mockAgentClient.SetupGet(x => x.Agents).Returns(mockAgentOperations.Object); - mockAgentClient.Setup(x => x.GetConnection(It.IsAny())).Returns(new ClientConnection("fake-connection-id", "http://localhost", ClientPipeline.Create(), CredentialKind.None)); - // Act - var agent = await mockAgentClient.Object.GetAIAgentAsync("test-agent"); - - // Assert - Assert.NotNull(agent); - Assert.NotNull(capturedRequestOptions); - } - - /// - /// Verify that CreateAIAgent(string model, ChatClientAgentOptions options) passes RequestOptions to the Protocol method. - /// - [Fact] - public void CreateAIAgent_WithChatClientAgentOptions_PassesRequestOptionsToProtocol() - { - // Arrange - RequestOptions? capturedRequestOptions = null; - - var mockAgentOperations = new Mock(); - mockAgentOperations - .Setup(x => x.CreateAgentVersion(It.IsAny(), It.IsAny(), It.IsAny())) - .Callback((name, content, options) => capturedRequestOptions = options) - .Returns(ClientResult.FromResponse(new MockPipelineResponse(200, BinaryData.FromString(TestDataUtil.GetAgentVersionResponseJson())))); - - var mockAgentClient = new Mock(new Uri("https://test.openai.azure.com/"), new FakeAuthenticationTokenProvider()); - mockAgentClient.SetupGet(x => x.Agents).Returns(mockAgentOperations.Object); - mockAgentClient.Setup(x => x.GetConnection(It.IsAny())).Returns(new ClientConnection("fake-connection-id", "http://localhost", ClientPipeline.Create(), CredentialKind.None)); - - var agentOptions = new ChatClientAgentOptions { Name = "test-agent" }; - - // Act - var agent = mockAgentClient.Object.CreateAIAgent("gpt-4", agentOptions); - - // Assert - Assert.NotNull(agent); - Assert.NotNull(capturedRequestOptions); - } - - /// - /// Verify that CreateAIAgentAsync(string model, ChatClientAgentOptions options) passes RequestOptions to the Protocol method. - /// - [Fact] - public async Task CreateAIAgentAsync_WithChatClientAgentOptions_PassesRequestOptionsToProtocolAsync() - { - // Arrange - RequestOptions? capturedRequestOptions = null; - - var mockAgentOperations = new Mock(); - mockAgentOperations - .Setup(x => x.CreateAgentVersionAsync(It.IsAny(), It.IsAny(), It.IsAny())) - .Callback((name, content, options) => capturedRequestOptions = options) - .Returns(Task.FromResult(ClientResult.FromResponse(new MockPipelineResponse(200, BinaryData.FromString(TestDataUtil.GetAgentVersionResponseJson()))))); - - var mockAgentClient = new Mock(new Uri("https://test.openai.azure.com/"), new FakeAuthenticationTokenProvider()); - mockAgentClient.SetupGet(x => x.Agents).Returns(mockAgentOperations.Object); - mockAgentClient.Setup(x => x.GetConnection(It.IsAny())).Returns(new ClientConnection("fake-connection-id", "http://localhost", ClientPipeline.Create(), CredentialKind.None)); - - var agentOptions = new ChatClientAgentOptions { Name = "test-agent" }; - - // Act - var agent = await mockAgentClient.Object.CreateAIAgentAsync("gpt-4", agentOptions); - - // Assert - Assert.NotNull(agent); - Assert.NotNull(capturedRequestOptions); - } - /// /// Verifies that the user-agent header is added to both synchronous and asynchronous requests made by agent creation methods. /// [Fact] - public async Task CreateAIAgent_UserAgentHeaderAddedToRequestsAsync() + public async Task CreateAIAgentAsync_UserAgentHeaderAddedToRequestsAsync() { using var httpHandler = new HttpHandlerAssert(request => { @@ -2122,16 +1413,14 @@ public async Task CreateAIAgent_UserAgentHeaderAddedToRequestsAsync() var agentOptions = new ChatClientAgentOptions { Name = "test-agent" }; // Act - var agent1 = aiProjectClient.CreateAIAgent("test", agentOptions); - var agent2 = await aiProjectClient.CreateAIAgentAsync("test", agentOptions); + var agent = await aiProjectClient.CreateAIAgentAsync("test", agentOptions); // Assert - Assert.NotNull(agent1); - Assert.NotNull(agent2); + Assert.NotNull(agent); } /// - /// Verifies that the user-agent header is added to both synchronous and asynchronous GetAIAgent requests. + /// Verifies that the user-agent header is added to asynchronous GetAIAgentAsync requests. /// [Fact] public async Task GetAIAgent_UserAgentHeaderAddedToRequestsAsync() @@ -2152,12 +1441,10 @@ public async Task GetAIAgent_UserAgentHeaderAddedToRequestsAsync() var aiProjectClient = new AIProjectClient(new Uri("https://test.openai.azure.com/"), new FakeAuthenticationTokenProvider(), new() { Transport = new HttpClientPipelineTransport(httpClient) }); // Act - var agent1 = aiProjectClient.GetAIAgent("test"); - var agent2 = await aiProjectClient.GetAIAgentAsync("test"); + var agent = await aiProjectClient.GetAIAgentAsync("test"); // Assert - Assert.NotNull(agent1); - Assert.NotNull(agent2); + Assert.NotNull(agent); } #endregion @@ -2168,7 +1455,7 @@ public async Task GetAIAgent_UserAgentHeaderAddedToRequestsAsync() /// Verify that AsAIAgent throws ArgumentNullException when AIProjectClient is null. /// [Fact] - public void GetAIAgent_WithAgentReference_WithNullClient_ThrowsArgumentNullException() + public void AsAIAgent_WithAgentReference_WithNullClient_ThrowsArgumentNullException() { // Arrange AIProjectClient? client = null; @@ -2176,7 +1463,7 @@ public void GetAIAgent_WithAgentReference_WithNullClient_ThrowsArgumentNullExcep // Act & Assert var exception = Assert.Throws(() => - client!.GetAIAgent(agentReference)); + client!.AsAIAgent(agentReference)); Assert.Equal("aiProjectClient", exception.ParamName); } @@ -2185,30 +1472,30 @@ public void GetAIAgent_WithAgentReference_WithNullClient_ThrowsArgumentNullExcep /// Verify that AsAIAgent throws ArgumentNullException when agentReference is null. /// [Fact] - public void GetAIAgent_WithAgentReference_WithNullAgentReference_ThrowsArgumentNullException() + public void AsAIAgent_WithAgentReference_WithNullAgentReference_ThrowsArgumentNullException() { // Arrange var mockClient = new Mock(); // Act & Assert var exception = Assert.Throws(() => - mockClient.Object.GetAIAgent((AgentReference)null!)); + mockClient.Object.AsAIAgent((AgentReference)null!)); Assert.Equal("agentReference", exception.ParamName); } /// - /// Verify that GetAIAgent with AgentReference creates a valid agent. + /// Verify that AsAIAgent with AgentReference creates a valid agent. /// [Fact] - public void GetAIAgent_WithAgentReference_CreatesValidAgent() + public void AsAIAgent_WithAgentReference_CreatesValidAgent() { // Arrange AIProjectClient client = this.CreateTestAgentClient(); var agentReference = new AgentReference("test-name", "1"); // Act - var agent = client.GetAIAgent(agentReference); + var agent = client.AsAIAgent(agentReference); // Assert Assert.NotNull(agent); @@ -2217,10 +1504,10 @@ public void GetAIAgent_WithAgentReference_CreatesValidAgent() } /// - /// Verify that GetAIAgent with AgentReference and clientFactory applies the factory. + /// Verify that AsAIAgent with AgentReference and clientFactory applies the factory. /// [Fact] - public void GetAIAgent_WithAgentReference_WithClientFactory_AppliesFactoryCorrectly() + public void AsAIAgent_WithAgentReference_WithClientFactory_AppliesFactoryCorrectly() { // Arrange AIProjectClient client = this.CreateTestAgentClient(); @@ -2228,7 +1515,7 @@ public void GetAIAgent_WithAgentReference_WithClientFactory_AppliesFactoryCorrec TestChatClient? testChatClient = null; // Act - var agent = client.GetAIAgent( + var agent = client.AsAIAgent( agentReference, clientFactory: (innerClient) => testChatClient = new TestChatClient(innerClient)); @@ -2240,17 +1527,17 @@ public void GetAIAgent_WithAgentReference_WithClientFactory_AppliesFactoryCorrec } /// - /// Verify that GetAIAgent with AgentReference sets the agent ID correctly. + /// Verify that AsAIAgent with AgentReference sets the agent ID correctly. /// [Fact] - public void GetAIAgent_WithAgentReference_SetsAgentIdCorrectly() + public void AsAIAgent_WithAgentReference_SetsAgentIdCorrectly() { // Arrange AIProjectClient client = this.CreateTestAgentClient(); var agentReference = new AgentReference("test-name", "2"); // Act - var agent = client.GetAIAgent(agentReference); + var agent = client.AsAIAgent(agentReference); // Assert Assert.NotNull(agent); @@ -2258,10 +1545,10 @@ public void GetAIAgent_WithAgentReference_SetsAgentIdCorrectly() } /// - /// Verify that GetAIAgent with AgentReference and tools includes the tools in ChatOptions. + /// Verify that AsAIAgent with AgentReference and tools includes the tools in ChatOptions. /// [Fact] - public void GetAIAgent_WithAgentReference_WithTools_IncludesToolsInChatOptions() + public void AsAIAgent_WithAgentReference_WithTools_IncludesToolsInChatOptions() { // Arrange AIProjectClient client = this.CreateTestAgentClient(); @@ -2272,7 +1559,7 @@ public void GetAIAgent_WithAgentReference_WithTools_IncludesToolsInChatOptions() }; // Act - var agent = client.GetAIAgent(agentReference, tools: tools); + var agent = client.AsAIAgent(agentReference, tools: tools); // Assert Assert.NotNull(agent); @@ -2316,7 +1603,7 @@ public void GetService_WithAgentReference_ReturnsNullForAgentRecord() var agentReference = new AgentReference("test-name", "1"); // Act - var agent = client.GetAIAgent(agentReference); + var agent = client.AsAIAgent(agentReference); var retrievedRecord = agent.GetService(); // Assert @@ -2357,7 +1644,7 @@ public void GetService_WithAgentReference_ReturnsNullForAgentVersion() var agentReference = new AgentReference("test-name", "1"); // Act - var agent = client.GetAIAgent(agentReference); + var agent = client.AsAIAgent(agentReference); var retrievedVersion = agent.GetService(); // Assert @@ -2447,7 +1734,7 @@ public void GetService_WithAgentReference_ReturnsAgentReference() var agentReference = new AgentReference("test-agent", "1.0"); // Act - var agent = client.GetAIAgent(agentReference); + var agent = client.AsAIAgent(agentReference); var retrievedReference = agent.GetService(); // Assert @@ -2505,7 +1792,7 @@ public void GetService_WithAgentReference_ReturnsCorrectVersionInformation() var agentReference = new AgentReference("versioned-agent", "3.5"); // Act - var agent = client.GetAIAgent(agentReference); + var agent = client.AsAIAgent(agentReference); var retrievedReference = agent.GetService(); // Assert diff --git a/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatMessageStoreTests.cs b/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatHistoryProviderTests.cs similarity index 72% rename from dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatMessageStoreTests.cs rename to dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatHistoryProviderTests.cs index 9410e68f1b..ab2f58dfd5 100644 --- a/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatMessageStoreTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/CosmosChatHistoryProviderTests.cs @@ -14,7 +14,7 @@ namespace Microsoft.Agents.AI.CosmosNoSql.UnitTests; /// -/// Contains tests for . +/// Contains tests for . /// /// Test Modes: /// - Default Mode: Cleans up all test data after each test run (deletes database) @@ -39,7 +39,7 @@ namespace Microsoft.Agents.AI.CosmosNoSql.UnitTests; /// - Reset to cleanup mode: $env:COSMOS_PRESERVE_CONTAINERS=""; dotnet test tests/Microsoft.Agents.AI.CosmosNoSql.UnitTests/ /// [Collection("CosmosDB")] -public sealed class CosmosChatMessageStoreTests : IAsyncLifetime, IDisposable +public sealed class CosmosChatHistoryProviderTests : IAsyncLifetime, IDisposable { // Cosmos DB Emulator connection settings private const string EmulatorEndpoint = "https://localhost:8081"; @@ -154,13 +154,13 @@ public void Constructor_WithConnectionString_ShouldCreateInstance() this.SkipIfEmulatorNotAvailable(); // Act - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, "test-conversation"); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, "test-conversation"); // Assert - Assert.NotNull(store); - Assert.Equal("test-conversation", store.ConversationId); - Assert.Equal(s_testDatabaseId, store.DatabaseId); - Assert.Equal(TestContainerId, store.ContainerId); + Assert.NotNull(provider); + Assert.Equal("test-conversation", provider.ConversationId); + Assert.Equal(s_testDatabaseId, provider.DatabaseId); + Assert.Equal(TestContainerId, provider.ContainerId); } [SkippableFact] @@ -171,13 +171,13 @@ public void Constructor_WithConnectionStringNoConversationId_ShouldCreateInstanc this.SkipIfEmulatorNotAvailable(); // Act - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId); // Assert - Assert.NotNull(store); - Assert.NotNull(store.ConversationId); - Assert.Equal(s_testDatabaseId, store.DatabaseId); - Assert.Equal(TestContainerId, store.ContainerId); + Assert.NotNull(provider); + Assert.NotNull(provider.ConversationId); + Assert.Equal(s_testDatabaseId, provider.DatabaseId); + Assert.Equal(TestContainerId, provider.ContainerId); } [SkippableFact] @@ -186,7 +186,7 @@ public void Constructor_WithNullConnectionString_ShouldThrowArgumentException() { // Arrange & Act & Assert Assert.Throws(() => - new CosmosChatMessageStore((string)null!, s_testDatabaseId, TestContainerId, "test-conversation")); + new CosmosChatHistoryProvider((string)null!, s_testDatabaseId, TestContainerId, "test-conversation")); } [SkippableFact] @@ -197,7 +197,7 @@ public void Constructor_WithEmptyConversationId_ShouldThrowArgumentException() this.SkipIfEmulatorNotAvailable(); Assert.Throws(() => - new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, "")); + new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, "")); } #endregion @@ -211,23 +211,23 @@ public async Task InvokedAsync_WithSingleMessage_ShouldAddMessageAsync() // Arrange this.SkipIfEmulatorNotAvailable(); var conversationId = Guid.NewGuid().ToString(); - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); var message = new ChatMessage(ChatRole.User, "Hello, world!"); - var context = new ChatMessageStore.InvokedContext([message], []) + var context = new ChatHistoryProvider.InvokedContext([message], []) { ResponseMessages = [] }; // Act - await store.InvokedAsync(context); + await provider.InvokedAsync(context); // Wait a moment for eventual consistency await Task.Delay(100); // Assert - var invokingContext = new ChatMessageStore.InvokingContext([]); - var messages = await store.InvokingAsync(invokingContext); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); + var messages = await provider.InvokingAsync(invokingContext); var messageList = messages.ToList(); // Simple assertion - if this fails, we know the deserialization is the issue @@ -277,7 +277,7 @@ public async Task InvokedAsync_WithMultipleMessages_ShouldAddAllMessagesAsync() // Arrange this.SkipIfEmulatorNotAvailable(); var conversationId = Guid.NewGuid().ToString(); - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); var requestMessages = new[] { new ChatMessage(ChatRole.User, "First message"), @@ -293,18 +293,18 @@ public async Task InvokedAsync_WithMultipleMessages_ShouldAddAllMessagesAsync() new ChatMessage(ChatRole.Assistant, "Response message") }; - var context = new ChatMessageStore.InvokedContext(requestMessages, []) + var context = new ChatHistoryProvider.InvokedContext(requestMessages, []) { AIContextProviderMessages = aiContextProviderMessages, ResponseMessages = responseMessages }; // Act - await store.InvokedAsync(context); + await provider.InvokedAsync(context); // Assert - var invokingContext = new ChatMessageStore.InvokingContext([]); - var retrievedMessages = await store.InvokingAsync(invokingContext); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); + var retrievedMessages = await provider.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); Assert.Equal(5, messageList.Count); Assert.Equal("First message", messageList[0].Text); @@ -324,11 +324,11 @@ public async Task InvokingAsync_WithNoMessages_ShouldReturnEmptyAsync() { // Arrange this.SkipIfEmulatorNotAvailable(); - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, Guid.NewGuid().ToString()); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, Guid.NewGuid().ToString()); // Act - var invokingContext = new ChatMessageStore.InvokingContext([]); - var messages = await store.InvokingAsync(invokingContext); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); + var messages = await provider.InvokingAsync(invokingContext); // Assert Assert.Empty(messages); @@ -343,18 +343,18 @@ public async Task InvokingAsync_WithConversationIsolation_ShouldOnlyReturnMessag var conversation1 = Guid.NewGuid().ToString(); var conversation2 = Guid.NewGuid().ToString(); - using var store1 = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversation1); - using var store2 = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversation2); + using var store1 = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, conversation1); + using var store2 = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, conversation2); - var context1 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message for conversation 1")], []); - var context2 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message for conversation 2")], []); + var context1 = new ChatHistoryProvider.InvokedContext([new ChatMessage(ChatRole.User, "Message for conversation 1")], []); + var context2 = new ChatHistoryProvider.InvokedContext([new ChatMessage(ChatRole.User, "Message for conversation 2")], []); await store1.InvokedAsync(context1); await store2.InvokedAsync(context2); // Act - var invokingContext1 = new ChatMessageStore.InvokingContext([]); - var invokingContext2 = new ChatMessageStore.InvokingContext([]); + var invokingContext1 = new ChatHistoryProvider.InvokingContext([]); + var invokingContext2 = new ChatHistoryProvider.InvokingContext([]); var messages1 = await store1.InvokingAsync(invokingContext1); var messages2 = await store2.InvokingAsync(invokingContext2); @@ -379,7 +379,7 @@ public async Task FullWorkflow_AddAndGet_ShouldWorkCorrectlyAsync() // Arrange this.SkipIfEmulatorNotAvailable(); var conversationId = $"test-conversation-{Guid.NewGuid():N}"; // Use unique conversation ID - using var originalStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); + using var originalStore = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); var messages = new[] { @@ -391,18 +391,18 @@ public async Task FullWorkflow_AddAndGet_ShouldWorkCorrectlyAsync() }; // Act 1: Add messages - var invokedContext = new ChatMessageStore.InvokedContext(messages, []); + var invokedContext = new ChatHistoryProvider.InvokedContext(messages, []); await originalStore.InvokedAsync(invokedContext); // Act 2: Verify messages were added - var invokingContext = new ChatMessageStore.InvokingContext([]); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); var retrievedMessages = await originalStore.InvokingAsync(invokingContext); var retrievedList = retrievedMessages.ToList(); Assert.Equal(5, retrievedList.Count); - // Act 3: Create new store instance for same conversation (test persistence) - using var newStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); - var persistedMessages = await newStore.InvokingAsync(invokingContext); + // Act 3: Create new provider instance for same conversation (test persistence) + using var newProvider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, conversationId); + var persistedMessages = await newProvider.InvokingAsync(invokingContext); var persistedList = persistedMessages.ToList(); // Assert final state @@ -424,10 +424,10 @@ public void Dispose_AfterUse_ShouldNotThrow() { // Arrange this.SkipIfEmulatorNotAvailable(); - var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, Guid.NewGuid().ToString()); + var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, Guid.NewGuid().ToString()); // Act & Assert - store.Dispose(); // Should not throw + provider.Dispose(); // Should not throw } [SkippableFact] @@ -436,11 +436,11 @@ public void Dispose_MultipleCalls_ShouldNotThrow() { // Arrange this.SkipIfEmulatorNotAvailable(); - var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, Guid.NewGuid().ToString()); + var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, Guid.NewGuid().ToString()); // Act & Assert - store.Dispose(); // First call - store.Dispose(); // Second call - should not throw + provider.Dispose(); // First call + provider.Dispose(); // Second call - should not throw } #endregion @@ -455,13 +455,13 @@ public void Constructor_WithHierarchicalConnectionString_ShouldCreateInstance() this.SkipIfEmulatorNotAvailable(); // Act - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", "session-789"); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", "session-789"); // Assert - Assert.NotNull(store); - Assert.Equal("session-789", store.ConversationId); - Assert.Equal(s_testDatabaseId, store.DatabaseId); - Assert.Equal(HierarchicalTestContainerId, store.ContainerId); + Assert.NotNull(provider); + Assert.Equal("session-789", provider.ConversationId); + Assert.Equal(s_testDatabaseId, provider.DatabaseId); + Assert.Equal(HierarchicalTestContainerId, provider.ContainerId); } [SkippableFact] @@ -473,13 +473,13 @@ public void Constructor_WithHierarchicalEndpoint_ShouldCreateInstance() // Act TokenCredential credential = new DefaultAzureCredential(); - using var store = new CosmosChatMessageStore(EmulatorEndpoint, credential, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", "session-789"); + using var provider = new CosmosChatHistoryProvider(EmulatorEndpoint, credential, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", "session-789"); // Assert - Assert.NotNull(store); - Assert.Equal("session-789", store.ConversationId); - Assert.Equal(s_testDatabaseId, store.DatabaseId); - Assert.Equal(HierarchicalTestContainerId, store.ContainerId); + Assert.NotNull(provider); + Assert.Equal("session-789", provider.ConversationId); + Assert.Equal(s_testDatabaseId, provider.DatabaseId); + Assert.Equal(HierarchicalTestContainerId, provider.ContainerId); } [SkippableFact] @@ -490,13 +490,13 @@ public void Constructor_WithHierarchicalCosmosClient_ShouldCreateInstance() this.SkipIfEmulatorNotAvailable(); using var cosmosClient = new CosmosClient(EmulatorEndpoint, EmulatorKey); - using var store = new CosmosChatMessageStore(cosmosClient, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", "session-789"); + using var provider = new CosmosChatHistoryProvider(cosmosClient, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", "session-789"); // Assert - Assert.NotNull(store); - Assert.Equal("session-789", store.ConversationId); - Assert.Equal(s_testDatabaseId, store.DatabaseId); - Assert.Equal(HierarchicalTestContainerId, store.ContainerId); + Assert.NotNull(provider); + Assert.Equal("session-789", provider.ConversationId); + Assert.Equal(s_testDatabaseId, provider.DatabaseId); + Assert.Equal(HierarchicalTestContainerId, provider.ContainerId); } [SkippableFact] @@ -507,7 +507,7 @@ public void Constructor_WithHierarchicalNullTenantId_ShouldThrowArgumentExceptio this.SkipIfEmulatorNotAvailable(); Assert.Throws(() => - new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, null!, "user-456", "session-789")); + new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, null!, "user-456", "session-789")); } [SkippableFact] @@ -518,7 +518,7 @@ public void Constructor_WithHierarchicalEmptyUserId_ShouldThrowArgumentException this.SkipIfEmulatorNotAvailable(); Assert.Throws(() => - new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "", "session-789")); + new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "", "session-789")); } [SkippableFact] @@ -529,7 +529,7 @@ public void Constructor_WithHierarchicalWhitespaceSessionId_ShouldThrowArgumentE this.SkipIfEmulatorNotAvailable(); Assert.Throws(() => - new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", " ")); + new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-123", "user-456", " ")); } [SkippableFact] @@ -542,20 +542,20 @@ public async Task InvokedAsync_WithHierarchicalPartitioning_ShouldAddMessageWith const string UserId = "user-456"; const string SessionId = "session-789"; // Test hierarchical partitioning constructor with connection string - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); var message = new ChatMessage(ChatRole.User, "Hello from hierarchical partitioning!"); - var context = new ChatMessageStore.InvokedContext([message], []); + var context = new ChatHistoryProvider.InvokedContext([message], []); // Act - await store.InvokedAsync(context); + await provider.InvokedAsync(context); // Wait a moment for eventual consistency await Task.Delay(100); // Assert - var invokingContext = new ChatMessageStore.InvokingContext([]); - var messages = await store.InvokingAsync(invokingContext); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); + var messages = await provider.InvokingAsync(invokingContext); var messageList = messages.ToList(); Assert.Single(messageList); @@ -594,7 +594,7 @@ public async Task InvokedAsync_WithHierarchicalMultipleMessages_ShouldAddAllMess const string UserId = "user-batch"; const string SessionId = "session-batch"; // Test hierarchical partitioning constructor with connection string - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); var messages = new[] { new ChatMessage(ChatRole.User, "First hierarchical message"), @@ -602,17 +602,17 @@ public async Task InvokedAsync_WithHierarchicalMultipleMessages_ShouldAddAllMess new ChatMessage(ChatRole.User, "Third hierarchical message") }; - var context = new ChatMessageStore.InvokedContext(messages, []); + var context = new ChatHistoryProvider.InvokedContext(messages, []); // Act - await store.InvokedAsync(context); + await provider.InvokedAsync(context); // Wait a moment for eventual consistency await Task.Delay(100); // Assert - var invokingContext = new ChatMessageStore.InvokingContext([]); - var retrievedMessages = await store.InvokingAsync(invokingContext); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); + var retrievedMessages = await provider.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); Assert.Equal(3, messageList.Count); @@ -633,12 +633,12 @@ public async Task InvokingAsync_WithHierarchicalPartitionIsolation_ShouldIsolate const string SessionId = "session-isolation"; // Different userIds create different hierarchical partitions, providing proper isolation - using var store1 = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId1, SessionId); - using var store2 = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId2, SessionId); + using var store1 = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId1, SessionId); + using var store2 = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId2, SessionId); // Add messages to both stores - var context1 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message from user 1")], []); - var context2 = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Message from user 2")], []); + var context1 = new ChatHistoryProvider.InvokedContext([new ChatMessage(ChatRole.User, "Message from user 1")], []); + var context2 = new ChatHistoryProvider.InvokedContext([new ChatMessage(ChatRole.User, "Message from user 2")], []); await store1.InvokedAsync(context1); await store2.InvokedAsync(context2); @@ -647,8 +647,8 @@ public async Task InvokingAsync_WithHierarchicalPartitionIsolation_ShouldIsolate await Task.Delay(100); // Act & Assert - var invokingContext1 = new ChatMessageStore.InvokingContext([]); - var invokingContext2 = new ChatMessageStore.InvokingContext([]); + var invokingContext1 = new ChatHistoryProvider.InvokingContext([]); + var invokingContext2 = new ChatHistoryProvider.InvokingContext([]); var messages1 = await store1.InvokingAsync(invokingContext1); var messageList1 = messages1.ToList(); @@ -673,27 +673,27 @@ public async Task SerializeDeserialize_WithHierarchicalPartitioning_ShouldPreser const string UserId = "user-serialize"; const string SessionId = "session-serialize"; - using var originalStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); + using var originalStore = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, TenantId, UserId, SessionId); - var context = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Test serialization message")], []); + var context = new ChatHistoryProvider.InvokedContext([new ChatMessage(ChatRole.User, "Test serialization message")], []); await originalStore.InvokedAsync(context); - // Act - Serialize the store state + // Act - Serialize the provider state var serializedState = originalStore.Serialize(); - // Create a new store from the serialized state + // Create a new provider from the serialized state using var cosmosClient = new CosmosClient(EmulatorEndpoint, EmulatorKey); var serializerOptions = new JsonSerializerOptions { TypeInfoResolver = new DefaultJsonTypeInfoResolver() }; - using var deserializedStore = CosmosChatMessageStore.CreateFromSerializedState(cosmosClient, serializedState, s_testDatabaseId, HierarchicalTestContainerId, serializerOptions); + using var deserializedStore = CosmosChatHistoryProvider.CreateFromSerializedState(cosmosClient, serializedState, s_testDatabaseId, HierarchicalTestContainerId, serializerOptions); // Wait a moment for eventual consistency await Task.Delay(100); - // Assert - The deserialized store should have the same functionality - var invokingContext = new ChatMessageStore.InvokingContext([]); + // Assert - The deserialized provider should have the same functionality + var invokingContext = new ChatHistoryProvider.InvokingContext([]); var messages = await deserializedStore.InvokingAsync(invokingContext); var messageList = messages.ToList(); @@ -712,27 +712,27 @@ public async Task HierarchicalAndSimplePartitioning_ShouldCoexistAsync() this.SkipIfEmulatorNotAvailable(); const string SessionId = "coexist-session"; - // Create simple store using simple partitioning container and hierarchical store using hierarchical container - using var simpleStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, SessionId); - using var hierarchicalStore = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-coexist", "user-coexist", SessionId); + // Create simple provider using simple partitioning container and hierarchical provider using hierarchical container + using var simpleProvider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, SessionId); + using var hierarchicalProvider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, HierarchicalTestContainerId, "tenant-coexist", "user-coexist", SessionId); // Add messages to both - var simpleContext = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Simple partitioning message")], []); - var hierarchicalContext = new ChatMessageStore.InvokedContext([new ChatMessage(ChatRole.User, "Hierarchical partitioning message")], []); + var simpleContext = new ChatHistoryProvider.InvokedContext([new ChatMessage(ChatRole.User, "Simple partitioning message")], []); + var hierarchicalContext = new ChatHistoryProvider.InvokedContext([new ChatMessage(ChatRole.User, "Hierarchical partitioning message")], []); - await simpleStore.InvokedAsync(simpleContext); - await hierarchicalStore.InvokedAsync(hierarchicalContext); + await simpleProvider.InvokedAsync(simpleContext); + await hierarchicalProvider.InvokedAsync(hierarchicalContext); // Wait a moment for eventual consistency await Task.Delay(100); // Act & Assert - var invokingContext = new ChatMessageStore.InvokingContext([]); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); - var simpleMessages = await simpleStore.InvokingAsync(invokingContext); + var simpleMessages = await simpleProvider.InvokingAsync(invokingContext); var simpleMessageList = simpleMessages.ToList(); - var hierarchicalMessages = await hierarchicalStore.InvokingAsync(invokingContext); + var hierarchicalMessages = await hierarchicalProvider.InvokingAsync(invokingContext); var hierarchicalMessageList = hierarchicalMessages.ToList(); // Each should only see its own messages since they use different containers @@ -750,7 +750,7 @@ public async Task MaxMessagesToRetrieve_ShouldLimitAndReturnMostRecentAsync() this.SkipIfEmulatorNotAvailable(); const string ConversationId = "max-messages-test"; - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, ConversationId); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, ConversationId); // Add 10 messages var messages = new List(); @@ -760,16 +760,16 @@ public async Task MaxMessagesToRetrieve_ShouldLimitAndReturnMostRecentAsync() await Task.Delay(10); // Small delay to ensure different timestamps } - var context = new ChatMessageStore.InvokedContext(messages, []); - await store.InvokedAsync(context); + var context = new ChatHistoryProvider.InvokedContext(messages, []); + await provider.InvokedAsync(context); // Wait for eventual consistency await Task.Delay(100); // Act - Set max to 5 and retrieve - store.MaxMessagesToRetrieve = 5; - var invokingContext = new ChatMessageStore.InvokingContext([]); - var retrievedMessages = await store.InvokingAsync(invokingContext); + provider.MaxMessagesToRetrieve = 5; + var invokingContext = new ChatHistoryProvider.InvokingContext([]); + var retrievedMessages = await provider.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); // Assert - Should get the 5 most recent messages (6-10) in ascending order @@ -789,7 +789,7 @@ public async Task MaxMessagesToRetrieve_Null_ShouldReturnAllMessagesAsync() this.SkipIfEmulatorNotAvailable(); const string ConversationId = "max-messages-null-test"; - using var store = new CosmosChatMessageStore(this._connectionString, s_testDatabaseId, TestContainerId, ConversationId); + using var provider = new CosmosChatHistoryProvider(this._connectionString, s_testDatabaseId, TestContainerId, ConversationId); // Add 10 messages var messages = new List(); @@ -798,15 +798,15 @@ public async Task MaxMessagesToRetrieve_Null_ShouldReturnAllMessagesAsync() messages.Add(new ChatMessage(ChatRole.User, $"Message {i}")); } - var context = new ChatMessageStore.InvokedContext(messages, []); - await store.InvokedAsync(context); + var context = new ChatHistoryProvider.InvokedContext(messages, []); + await provider.InvokedAsync(context); // Wait for eventual consistency await Task.Delay(100); // Act - No limit set (default null) - var invokingContext = new ChatMessageStore.InvokingContext([]); - var retrievedMessages = await store.InvokingAsync(invokingContext); + var invokingContext = new ChatHistoryProvider.InvokingContext([]); + var retrievedMessages = await provider.InvokingAsync(invokingContext); var messageList = retrievedMessages.ToList(); // Assert - Should get all 10 messages diff --git a/dotnet/tests/Microsoft.Agents.AI.DurableTask.IntegrationTests/ConsoleAppSamplesValidation.cs b/dotnet/tests/Microsoft.Agents.AI.DurableTask.IntegrationTests/ConsoleAppSamplesValidation.cs new file mode 100644 index 0000000000..9a6159dbb6 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.DurableTask.IntegrationTests/ConsoleAppSamplesValidation.cs @@ -0,0 +1,960 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Concurrent; +using System.Diagnostics; +using System.Reflection; +using System.Text; +using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.Logging; +using Xunit.Abstractions; + +namespace Microsoft.Agents.AI.DurableTask.IntegrationTests; + +[Collection("Samples")] +[Trait("Category", "SampleValidation")] +public sealed class ConsoleAppSamplesValidation(ITestOutputHelper outputHelper) : IAsyncLifetime +{ + private const string DtsPort = "8080"; + private const string RedisPort = "6379"; + + private static readonly string s_dotnetTargetFramework = GetTargetFramework(); + private static readonly IConfiguration s_configuration = + new ConfigurationBuilder() + .AddUserSecrets(Assembly.GetExecutingAssembly()) + .AddEnvironmentVariables() + .Build(); + + private static bool s_infrastructureStarted; + private static readonly string s_samplesPath = Path.GetFullPath( + Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "..", "..", "..", "..", "..", "samples", "DurableAgents", "ConsoleApps")); + + private readonly ITestOutputHelper _outputHelper = outputHelper; + + async Task IAsyncLifetime.InitializeAsync() + { + if (!s_infrastructureStarted) + { + await this.StartSharedInfrastructureAsync(); + s_infrastructureStarted = true; + } + } + + async Task IAsyncLifetime.DisposeAsync() + { + // Nothing to clean up + await Task.CompletedTask; + } + + [Fact] + public async Task SingleAgentSampleValidationAsync() + { + using CancellationTokenSource testTimeoutCts = this.CreateTestTimeoutCts(); + string samplePath = Path.Combine(s_samplesPath, "01_SingleAgent"); + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + string agentResponse = string.Empty; + bool inputSent = false; + + // Read output from logs queue + string? line; + while ((line = this.ReadLogLine(logs, testTimeoutCts.Token)) != null) + { + // Look for the agent's response. Unlike the interactive mode, we won't actually see a line + // that starts with "Joker: ". Instead, we'll see a line that looks like "You: Joker: ..." because + // the standard input is *not* echoed back to standard output. + if (line.Contains("Joker: ", StringComparison.OrdinalIgnoreCase)) + { + // This will give us the first line of the agent's response, which is all we need to verify that the agent is working. + agentResponse = line.Substring("Joker: ".Length).Trim(); + break; + } + else if (!inputSent) + { + // Send input to stdin after we've started seeing output from the app + await this.WriteInputAsync(process, "Tell me a joke about a pirate.", testTimeoutCts.Token); + inputSent = true; + } + } + + Assert.True(inputSent, "Input was not sent to the agent"); + Assert.NotEmpty(agentResponse); + + // Send exit command + await this.WriteInputAsync(process, "exit", testTimeoutCts.Token); + }); + } + + [Fact] + public async Task SingleAgentOrchestrationChainingSampleValidationAsync() + { + using CancellationTokenSource testTimeoutCts = this.CreateTestTimeoutCts(); + string samplePath = Path.Combine(s_samplesPath, "02_AgentOrchestration_Chaining"); + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + // Console app runs automatically, just wait for completion + string? line; + bool foundSuccess = false; + + while ((line = this.ReadLogLine(logs, testTimeoutCts.Token)) != null) + { + if (line.Contains("Orchestration completed successfully!", StringComparison.OrdinalIgnoreCase)) + { + foundSuccess = true; + } + + if (line.Contains("Result:", StringComparison.OrdinalIgnoreCase)) + { + string result = line.Substring("Result:".Length).Trim(); + Assert.NotEmpty(result); + break; + } + + // Check for failure + if (line.Contains("Orchestration failed!", StringComparison.OrdinalIgnoreCase)) + { + Assert.Fail("Orchestration failed."); + } + } + + Assert.True(foundSuccess, "Orchestration did not complete successfully."); + }); + } + + [Fact] + public async Task MultiAgentConcurrencySampleValidationAsync() + { + using CancellationTokenSource testTimeoutCts = this.CreateTestTimeoutCts(); + string samplePath = Path.Combine(s_samplesPath, "03_AgentOrchestration_Concurrency"); + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + // Send input to stdin + await this.WriteInputAsync(process, "What is temperature?", testTimeoutCts.Token); + + // Read output from logs queue + StringBuilder output = new(); + string? line; + bool foundSuccess = false; + bool foundPhysicist = false; + bool foundChemist = false; + + while ((line = this.ReadLogLine(logs, testTimeoutCts.Token)) != null) + { + output.AppendLine(line); + + if (line.Contains("Orchestration completed successfully!", StringComparison.OrdinalIgnoreCase)) + { + foundSuccess = true; + } + + if (line.Contains("Physicist's response:", StringComparison.OrdinalIgnoreCase)) + { + foundPhysicist = true; + } + + if (line.Contains("Chemist's response:", StringComparison.OrdinalIgnoreCase)) + { + foundChemist = true; + } + + // Check for failure + if (line.Contains("Orchestration failed!", StringComparison.OrdinalIgnoreCase)) + { + Assert.Fail("Orchestration failed."); + } + + // Stop reading once we have both responses + if (foundSuccess && foundPhysicist && foundChemist) + { + break; + } + } + + Assert.True(foundSuccess, "Orchestration did not complete successfully."); + Assert.True(foundPhysicist, "Physicist response not found."); + Assert.True(foundChemist, "Chemist response not found."); + }); + } + + [Fact] + public async Task MultiAgentConditionalSampleValidationAsync() + { + using CancellationTokenSource testTimeoutCts = this.CreateTestTimeoutCts(); + string samplePath = Path.Combine(s_samplesPath, "04_AgentOrchestration_Conditionals"); + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + // Test with legitimate email + await this.TestSpamDetectionAsync( + process: process, + logs: logs, + emailId: "email-001", + emailContent: "Hi John. I wanted to follow up on our meeting yesterday about the quarterly report. Could you please send me the updated figures by Friday? Thanks!", + expectedSpam: false, + testTimeoutCts.Token); + + // Restart the process for the second test + await process.WaitForExitAsync(); + }); + + // Run second test with spam email + using CancellationTokenSource testTimeoutCts2 = this.CreateTestTimeoutCts(); + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + await this.TestSpamDetectionAsync( + process, + logs, + emailId: "email-002", + emailContent: "URGENT! You've won $1,000,000! Click here now to claim your prize! Limited time offer! Don't miss out!", + expectedSpam: true, + testTimeoutCts2.Token); + }); + } + + private async Task TestSpamDetectionAsync( + Process process, + BlockingCollection logs, + string emailId, + string emailContent, + bool expectedSpam, + CancellationToken cancellationToken) + { + // Send email content to stdin + await this.WriteInputAsync(process, emailContent, cancellationToken); + + // Read output from logs queue + string? line; + bool foundSuccess = false; + + while ((line = this.ReadLogLine(logs, cancellationToken)) != null) + { + if (line.Contains("Email sent", StringComparison.OrdinalIgnoreCase)) + { + Assert.False(expectedSpam, "Email was sent, but was expected to be marked as spam."); + } + + if (line.Contains("Email marked as spam", StringComparison.OrdinalIgnoreCase)) + { + Assert.True(expectedSpam, "Email was marked as spam, but was expected to be sent."); + } + + if (line.Contains("Orchestration completed successfully!", StringComparison.OrdinalIgnoreCase)) + { + foundSuccess = true; + break; + } + + // Check for failure + if (line.Contains("Orchestration failed!", StringComparison.OrdinalIgnoreCase)) + { + Assert.Fail("Orchestration failed."); + } + } + + Assert.True(foundSuccess, "Orchestration did not complete successfully."); + } + + [Fact] + public async Task SingleAgentOrchestrationHITLSampleValidationAsync() + { + string samplePath = Path.Combine(s_samplesPath, "05_AgentOrchestration_HITL"); + + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + using CancellationTokenSource testTimeoutCts = this.CreateTestTimeoutCts(); + + // Start the HITL orchestration following the happy path from README + await this.WriteInputAsync(process, "The Future of Artificial Intelligence", testTimeoutCts.Token); + await this.WriteInputAsync(process, "3", testTimeoutCts.Token); + await this.WriteInputAsync(process, "72", testTimeoutCts.Token); + + // Read output from logs queue + string? line; + bool rejectionSent = false; + bool approvalSent = false; + bool contentPublished = false; + + while ((line = this.ReadLogLine(logs, testTimeoutCts.Token)) != null) + { + // Look for notification that content is ready. The first time we see this, we should send a rejection. + // The second time we see this, we should send approval. + if (line.Contains("Content is ready for review", StringComparison.OrdinalIgnoreCase)) + { + if (!rejectionSent) + { + // Prompt: Approve? (y/n): + await this.WriteInputAsync(process, "n", testTimeoutCts.Token); + + // Prompt: Feedback (optional): + await this.WriteInputAsync( + process, + "The article needs more technical depth and better examples. Rewrite it with less than 300 words.", + testTimeoutCts.Token); + rejectionSent = true; + } + else if (!approvalSent) + { + // Prompt: Approve? (y/n): + await this.WriteInputAsync(process, "y", testTimeoutCts.Token); + + // Prompt: Feedback (optional): + await this.WriteInputAsync(process, "Looks good!", testTimeoutCts.Token); + approvalSent = true; + } + else + { + // This should never happen + Assert.Fail("Unexpected message found."); + } + } + + // Look for success message + if (line.Contains("PUBLISHING: Content has been published", StringComparison.OrdinalIgnoreCase)) + { + contentPublished = true; + break; + } + + // Check for failure + if (line.Contains("Orchestration failed", StringComparison.OrdinalIgnoreCase)) + { + Assert.Fail("Orchestration failed."); + } + } + + Assert.True(rejectionSent, "Wasn't prompted with the first draft."); + Assert.True(approvalSent, "Wasn't prompted with the second draft."); + Assert.True(contentPublished, "Content was not published."); + }); + } + + [Fact] + public async Task LongRunningToolsSampleValidationAsync() + { + string samplePath = Path.Combine(s_samplesPath, "06_LongRunningTools"); + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + // This test takes a bit longer to run due to the multiple agent interactions and the lengthy content generation. + using CancellationTokenSource testTimeoutCts = this.CreateTestTimeoutCts(TimeSpan.FromSeconds(90)); + + // Test starting an agent that schedules a content generation orchestration + await this.WriteInputAsync( + process, + "Start a content generation workflow for the topic 'The Future of Artificial Intelligence'. Keep it less than 300 words.", + testTimeoutCts.Token); + + // Read output from logs queue + bool rejectionSent = false; + bool approvalSent = false; + bool contentPublished = false; + + string? line; + while ((line = this.ReadLogLine(logs, testTimeoutCts.Token)) != null) + { + // Look for notification that content is ready. The first time we see this, we should send a rejection. + // The second time we see this, we should send approval. + if (line.Contains("NOTIFICATION: Please review the following content for approval", StringComparison.OrdinalIgnoreCase)) + { + // Wait for the notification to be fully written to the console + await Task.Delay(TimeSpan.FromSeconds(1), testTimeoutCts.Token); + + if (!rejectionSent) + { + // Reject the content with feedback. Note that we need to send a newline character to the console first before sending the input. + await this.WriteInputAsync( + process, + "\nReject the content with feedback: Make it even shorter.", + testTimeoutCts.Token); + rejectionSent = true; + } + else if (!approvalSent) + { + // Approve the content. Note that we need to send a newline character to the console first before sending the input. + await this.WriteInputAsync( + process, + "\nApprove the content", + testTimeoutCts.Token); + approvalSent = true; + } + else + { + // This should never happen + Assert.Fail("Unexpected message found."); + } + } + + // Look for success message + if (line.Contains("PUBLISHING: Content has been published successfully", StringComparison.OrdinalIgnoreCase)) + { + contentPublished = true; + + // Ask for the status of the workflow to confirm that it completed successfully. + await Task.Delay(TimeSpan.FromSeconds(1), testTimeoutCts.Token); + await this.WriteInputAsync(process, "\nGet the status of the workflow you previously started", testTimeoutCts.Token); + } + + // Check for workflow completion or failure + if (contentPublished) + { + if (line.Contains("Completed", StringComparison.OrdinalIgnoreCase)) + { + break; + } + else if (line.Contains("Failed", StringComparison.OrdinalIgnoreCase)) + { + Assert.Fail("Workflow failed."); + } + } + } + + Assert.True(rejectionSent, "Wasn't prompted with the first draft."); + Assert.True(approvalSent, "Wasn't prompted with the second draft."); + Assert.True(contentPublished, "Content was not published."); + }); + } + + [Fact] + public async Task ReliableStreamingSampleValidationAsync() + { + string samplePath = Path.Combine(s_samplesPath, "07_ReliableStreaming"); + await this.RunSampleTestAsync(samplePath, async (process, logs) => + { + // This test takes a bit longer to run due to the multiple agent interactions and the lengthy content generation. + using CancellationTokenSource testTimeoutCts = this.CreateTestTimeoutCts(TimeSpan.FromSeconds(90)); + + // Test the agent endpoint with a simple prompt + await this.WriteInputAsync(process, "Plan a 5-day trip to Seattle. Include daily activities.", testTimeoutCts.Token); + + // Read output from stdout - should stream in real-time + // NOTE: The sample uses Console.Write() for streaming chunks, which means content may not be line-buffered. + // We test the interrupt/resume flow by: + // 1. Waiting for at least 10 lines of content + // 2. Sending Enter to interrupt + // 3. Verifying we get "Last cursor" output + // 4. Sending Enter again to resume + // 5. Verifying we get more content and that we're not restarting from the beginning + string? line; + bool foundConversationStart = false; + int contentLinesBeforeInterrupt = 0; + int contentLinesAfterResume = 0; + bool foundLastCursor = false; + bool foundResumeMessage = false; + bool interrupted = false; + bool resumed = false; + + // Read output with a reasonable timeout + using CancellationTokenSource readTimeoutCts = this.CreateTestTimeoutCts(); + DateTime? interruptTime = null; + try + { + while ((line = this.ReadLogLine(logs, readTimeoutCts.Token)) != null) + { + // Look for the conversation start message (updated format) + if (line.Contains("Conversation ID", StringComparison.OrdinalIgnoreCase)) + { + foundConversationStart = true; + continue; + } + + // Check if this is a content line (not prompts or status messages) + bool isContentLine = !string.IsNullOrWhiteSpace(line) && + !line.Contains("Conversation ID", StringComparison.OrdinalIgnoreCase) && + !line.Contains("Press [Enter]", StringComparison.OrdinalIgnoreCase) && + !line.Contains("You:", StringComparison.OrdinalIgnoreCase) && + !line.Contains("exit", StringComparison.OrdinalIgnoreCase) && + !line.Contains("Stream cancelled", StringComparison.OrdinalIgnoreCase) && + !line.Contains("Resuming conversation", StringComparison.OrdinalIgnoreCase) && + !line.Contains("Last cursor", StringComparison.OrdinalIgnoreCase); + + // Phase 1: Collect content before interrupt + if (foundConversationStart && !interrupted && isContentLine) + { + contentLinesBeforeInterrupt++; + } + + // Phase 2: Wait for enough content, then interrupt + // Interrupt after 2 lines to maximize chance of catching stream while active + // (streams can complete very quickly, so we need to interrupt early) + if (foundConversationStart && !interrupted && contentLinesBeforeInterrupt >= 2) + { + this._outputHelper.WriteLine($"Interrupting stream after {contentLinesBeforeInterrupt} content lines"); + interrupted = true; + interruptTime = DateTime.Now; + + // Send Enter to interrupt the stream + await this.WriteInputAsync(process, string.Empty, testTimeoutCts.Token); + + // Give the cancellation token a moment to be processed + // Use a longer delay to ensure cancellation propagates + await Task.Delay(TimeSpan.FromMilliseconds(300), testTimeoutCts.Token); + } + + // Phase 3: Look for "Last cursor" message after interrupt + if (interrupted && !resumed && line.Contains("Last cursor", StringComparison.OrdinalIgnoreCase)) + { + foundLastCursor = true; + + // Send Enter again to resume + this._outputHelper.WriteLine("Resuming stream from last cursor"); + await this.WriteInputAsync(process, string.Empty, testTimeoutCts.Token); + resumed = true; + } + + // Phase 4: Look for resume message + if (resumed && line.Contains("Resuming conversation", StringComparison.OrdinalIgnoreCase)) + { + foundResumeMessage = true; + } + + // Phase 5: Collect content after resume + if (resumed && isContentLine) + { + contentLinesAfterResume++; + } + + // Look for completion message - but don't break if we interrupted and haven't found Last cursor yet + // Allow some time after interrupt for the cancellation message to appear + if (line.Contains("Conversation completed", StringComparison.OrdinalIgnoreCase)) + { + // If we interrupted but haven't found Last cursor, wait a bit more + if (interrupted && !foundLastCursor && interruptTime.HasValue) + { + TimeSpan timeSinceInterrupt = DateTime.Now - interruptTime.Value; + if (timeSinceInterrupt < TimeSpan.FromSeconds(2)) + { + // Continue reading for a bit more to catch the cancellation message + this._outputHelper.WriteLine("Stream completed naturally, but waiting for Last cursor message after interrupt..."); + continue; + } + } + + // Only break if we've completed the test or if stream completed without interruption + if (!interrupted || (resumed && foundResumeMessage && contentLinesAfterResume >= 5)) + { + break; + } + } + + // Stop once we've verified the interrupt/resume flow works + if (resumed && foundResumeMessage && contentLinesAfterResume >= 5) + { + this._outputHelper.WriteLine($"Successfully verified interrupt/resume: {contentLinesBeforeInterrupt} lines before, {contentLinesAfterResume} lines after"); + break; + } + } + + // If we interrupted but didn't find Last cursor, wait a bit more for it to appear + if (interrupted && !foundLastCursor && interruptTime.HasValue) + { + TimeSpan timeSinceInterrupt = DateTime.Now - interruptTime.Value; + if (timeSinceInterrupt < TimeSpan.FromSeconds(3)) + { + this._outputHelper.WriteLine("Waiting for Last cursor message after interrupt..."); + using CancellationTokenSource waitCts = new(TimeSpan.FromSeconds(2)); + try + { + while ((line = this.ReadLogLine(logs, waitCts.Token)) != null) + { + if (line.Contains("Last cursor", StringComparison.OrdinalIgnoreCase)) + { + foundLastCursor = true; + if (!resumed) + { + this._outputHelper.WriteLine("Resuming stream from last cursor"); + await this.WriteInputAsync(process, string.Empty, testTimeoutCts.Token); + resumed = true; + } + break; + } + } + } + catch (OperationCanceledException) + { + // Timeout waiting for Last cursor + } + } + } + } + catch (OperationCanceledException) + { + // Timeout - check if we got enough to verify the flow + this._outputHelper.WriteLine($"Read timeout reached. Interrupted: {interrupted}, Resumed: {resumed}, Content before: {contentLinesBeforeInterrupt}, Content after: {contentLinesAfterResume}"); + } + + Assert.True(foundConversationStart, "Conversation start message not found."); + Assert.True(contentLinesBeforeInterrupt >= 2, $"Not enough content before interrupt (got {contentLinesBeforeInterrupt})."); + + // If stream completed before interrupt could take effect, that's a timing issue + // but we should still verify we got the conversation started + if (!interrupted) + { + this._outputHelper.WriteLine("WARNING: Stream completed before interrupt could be sent. This may indicate the stream is too fast."); + } + + Assert.True(interrupted, "Stream was not interrupted (may have completed too quickly)."); + Assert.True(foundLastCursor, "'Last cursor' message not found after interrupt."); + Assert.True(resumed, "Stream was not resumed."); + Assert.True(foundResumeMessage, "Resume message not found."); + Assert.True(contentLinesAfterResume > 0, "No content received after resume (expected to continue from cursor, not restart)."); + }); + } + + private static string GetTargetFramework() + { + string filePath = new Uri(typeof(ConsoleAppSamplesValidation).Assembly.Location).LocalPath; + string directory = Path.GetDirectoryName(filePath)!; + string tfm = Path.GetFileName(directory); + if (tfm.StartsWith("net", StringComparison.OrdinalIgnoreCase)) + { + return tfm; + } + + throw new InvalidOperationException($"Unable to find target framework in path: {filePath}"); + } + + private async Task StartSharedInfrastructureAsync() + { + this._outputHelper.WriteLine("Starting shared infrastructure for console app samples..."); + + // Start DTS emulator + await this.StartDtsEmulatorAsync(); + + // Start Redis + await this.StartRedisAsync(); + + // Wait for infrastructure to be ready + await Task.Delay(TimeSpan.FromSeconds(5)); + } + + private async Task StartDtsEmulatorAsync() + { + // Start DTS emulator if it's not already running + if (!await this.IsDtsEmulatorRunningAsync()) + { + this._outputHelper.WriteLine("Starting DTS emulator..."); + await this.RunCommandAsync("docker", [ + "run", "-d", + "--name", "dts-emulator", + "-p", $"{DtsPort}:8080", + "-e", "DTS_USE_DYNAMIC_TASK_HUBS=true", + "mcr.microsoft.com/dts/dts-emulator:latest" + ]); + } + } + + private async Task StartRedisAsync() + { + if (!await this.IsRedisRunningAsync()) + { + this._outputHelper.WriteLine("Starting Redis..."); + await this.RunCommandAsync("docker", [ + "run", "-d", + "--name", "redis", + "-p", $"{RedisPort}:6379", + "redis:latest" + ]); + } + } + + private async Task IsDtsEmulatorRunningAsync() + { + this._outputHelper.WriteLine($"Checking if DTS emulator is running at http://localhost:{DtsPort}/healthz..."); + + // DTS emulator doesn't support HTTP/1.1, so we need to use HTTP/2.0 + using HttpClient http2Client = new() + { + DefaultRequestVersion = new Version(2, 0), + DefaultVersionPolicy = HttpVersionPolicy.RequestVersionExact + }; + + try + { + using CancellationTokenSource timeoutCts = new(TimeSpan.FromSeconds(30)); + using HttpResponseMessage response = await http2Client.GetAsync(new Uri($"http://localhost:{DtsPort}/healthz"), timeoutCts.Token); + if (response.Content.Headers.ContentLength > 0) + { + string content = await response.Content.ReadAsStringAsync(timeoutCts.Token); + this._outputHelper.WriteLine($"DTS emulator health check response: {content}"); + } + + if (response.IsSuccessStatusCode) + { + this._outputHelper.WriteLine("DTS emulator is running"); + return true; + } + + this._outputHelper.WriteLine($"DTS emulator is not running. Status code: {response.StatusCode}"); + return false; + } + catch (HttpRequestException ex) + { + this._outputHelper.WriteLine($"DTS emulator is not running: {ex.Message}"); + return false; + } + } + + private async Task IsRedisRunningAsync() + { + this._outputHelper.WriteLine($"Checking if Redis is running at localhost:{RedisPort}..."); + + try + { + using CancellationTokenSource timeoutCts = new(TimeSpan.FromSeconds(30)); + ProcessStartInfo startInfo = new() + { + FileName = "docker", + Arguments = "exec redis redis-cli ping", + UseShellExecute = false, + RedirectStandardOutput = true, + RedirectStandardError = true, + CreateNoWindow = true + }; + + using Process process = new() { StartInfo = startInfo }; + if (!process.Start()) + { + this._outputHelper.WriteLine("Failed to start docker exec command"); + return false; + } + + string output = await process.StandardOutput.ReadToEndAsync(timeoutCts.Token); + await process.WaitForExitAsync(timeoutCts.Token); + + if (process.ExitCode == 0 && output.Contains("PONG", StringComparison.OrdinalIgnoreCase)) + { + this._outputHelper.WriteLine("Redis is running"); + return true; + } + + this._outputHelper.WriteLine($"Redis is not running. Exit code: {process.ExitCode}, Output: {output}"); + return false; + } + catch (Exception ex) + { + this._outputHelper.WriteLine($"Redis is not running: {ex.Message}"); + return false; + } + } + + private async Task RunSampleTestAsync(string samplePath, Func, Task> testAction) + { + // Generate a unique TaskHub name for this sample test to prevent cross-test interference + // when multiple tests run together and share the same DTS emulator. + string uniqueTaskHubName = $"sample-{Guid.NewGuid().ToString("N").Substring(0, 6)}"; + + // Start the console app + // Use BlockingCollection to safely read logs asynchronously captured from the process + using BlockingCollection logsContainer = []; + using Process appProcess = this.StartConsoleApp(samplePath, logsContainer, uniqueTaskHubName); + try + { + // Run the test + await testAction(appProcess, logsContainer); + } + catch (OperationCanceledException e) + { + throw new TimeoutException("Core test logic timed out!", e); + } + finally + { + logsContainer.CompleteAdding(); + await this.StopProcessAsync(appProcess); + } + } + + private sealed record OutputLog(DateTime Timestamp, LogLevel Level, string Message); + + /// + /// Writes a line to the process's stdin and flushes it. + /// Logs the input being sent for debugging purposes. + /// + private async Task WriteInputAsync(Process process, string input, CancellationToken cancellationToken) + { + this._outputHelper.WriteLine($"{DateTime.Now:HH:mm:ss.fff} [{process.ProcessName}(in)]: {input}"); + await process.StandardInput.WriteLineAsync(input); + await process.StandardInput.FlushAsync(cancellationToken); + } + + /// + /// Reads a line from the logs queue, filtering for Information level logs (stdout). + /// Returns null if the collection is completed and empty, or if cancellation is requested. + /// + private string? ReadLogLine(BlockingCollection logs, CancellationToken cancellationToken) + { + try + { + while (!cancellationToken.IsCancellationRequested) + { + // Block until a log entry is available or cancellation is requested + // Take will throw OperationCanceledException if cancelled, or InvalidOperationException if collection is completed + OutputLog log = logs.Take(cancellationToken); + + // Check for unhandled exceptions in the logs, which are never expected (but can happen) + if (log.Message.Contains("Unhandled exception")) + { + Assert.Fail("Console app encountered an unhandled exception."); + } + + // Only return Information level logs (stdout), skip Error logs (stderr) + if (log.Level == LogLevel.Information) + { + return log.Message; + } + } + } + catch (OperationCanceledException) + { + // Cancellation requested + return null; + } + catch (InvalidOperationException) + { + // Collection is completed and empty + return null; + } + + return null; + } + + private Process StartConsoleApp(string samplePath, BlockingCollection logs, string taskHubName) + { + ProcessStartInfo startInfo = new() + { + FileName = "dotnet", + Arguments = $"run --framework {s_dotnetTargetFramework}", + WorkingDirectory = samplePath, + UseShellExecute = false, + RedirectStandardOutput = true, + RedirectStandardError = true, + RedirectStandardInput = true, + }; + + string openAiEndpoint = s_configuration["AZURE_OPENAI_ENDPOINT"] ?? + throw new InvalidOperationException("The required AZURE_OPENAI_ENDPOINT env variable is not set."); + string openAiDeployment = s_configuration["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] ?? + throw new InvalidOperationException("The required AZURE_OPENAI_CHAT_DEPLOYMENT_NAME env variable is not set."); + + void SetAndLogEnvironmentVariable(string key, string value) + { + this._outputHelper.WriteLine($"Setting environment variable for {startInfo.FileName} sub-process: {key}={value}"); + startInfo.EnvironmentVariables[key] = value; + } + + // Set required environment variables for the app + SetAndLogEnvironmentVariable("AZURE_OPENAI_ENDPOINT", openAiEndpoint); + SetAndLogEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT", openAiDeployment); + SetAndLogEnvironmentVariable("DURABLE_TASK_SCHEDULER_CONNECTION_STRING", + $"Endpoint=http://localhost:{DtsPort};TaskHub={taskHubName};Authentication=None"); + SetAndLogEnvironmentVariable("REDIS_CONNECTION_STRING", $"localhost:{RedisPort}"); + + Process process = new() { StartInfo = startInfo }; + + // Capture the output and error streams asynchronously + // These events fire asynchronously, so we add to the blocking collection which is thread-safe + process.ErrorDataReceived += (sender, e) => + { + if (e.Data != null) + { + string logMessage = $"{DateTime.Now:HH:mm:ss.fff} [{startInfo.FileName}(err)]: {e.Data}"; + this._outputHelper.WriteLine(logMessage); + Debug.WriteLine(logMessage); + try + { + logs.Add(new OutputLog(DateTime.Now, LogLevel.Error, e.Data)); + } + catch (InvalidOperationException) + { + // Collection is completed, ignore + } + } + }; + + process.OutputDataReceived += (sender, e) => + { + if (e.Data != null) + { + string logMessage = $"{DateTime.Now:HH:mm:ss.fff} [{startInfo.FileName}(out)]: {e.Data}"; + this._outputHelper.WriteLine(logMessage); + Debug.WriteLine(logMessage); + try + { + logs.Add(new OutputLog(DateTime.Now, LogLevel.Information, e.Data)); + } + catch (InvalidOperationException) + { + // Collection is completed, ignore + } + } + }; + + if (!process.Start()) + { + throw new InvalidOperationException("Failed to start the console app"); + } + + process.BeginErrorReadLine(); + process.BeginOutputReadLine(); + + return process; + } + + private async Task RunCommandAsync(string command, string[] args) + { + await this.RunCommandAsync(command, workingDirectory: null, args: args); + } + + private async Task RunCommandAsync(string command, string? workingDirectory, string[] args) + { + ProcessStartInfo startInfo = new() + { + FileName = command, + Arguments = string.Join(" ", args), + WorkingDirectory = workingDirectory, + UseShellExecute = false, + RedirectStandardOutput = true, + RedirectStandardError = true, + CreateNoWindow = true + }; + + this._outputHelper.WriteLine($"Running command: {command} {string.Join(" ", args)}"); + + using Process process = new() { StartInfo = startInfo }; + process.ErrorDataReceived += (sender, e) => this._outputHelper.WriteLine($"[{command}(err)]: {e.Data}"); + process.OutputDataReceived += (sender, e) => this._outputHelper.WriteLine($"[{command}(out)]: {e.Data}"); + if (!process.Start()) + { + throw new InvalidOperationException("Failed to start the command"); + } + process.BeginErrorReadLine(); + process.BeginOutputReadLine(); + + using CancellationTokenSource cancellationTokenSource = new(TimeSpan.FromMinutes(1)); + await process.WaitForExitAsync(cancellationTokenSource.Token); + + this._outputHelper.WriteLine($"Command completed with exit code: {process.ExitCode}"); + } + + private async Task StopProcessAsync(Process process) + { + try + { + if (!process.HasExited) + { + this._outputHelper.WriteLine($"{DateTime.Now:HH:mm:ss.fff} Killing process {process.ProcessName}#{process.Id}"); + process.Kill(entireProcessTree: true); + + using CancellationTokenSource timeoutCts = new(TimeSpan.FromSeconds(10)); + await process.WaitForExitAsync(timeoutCts.Token); + this._outputHelper.WriteLine($"{DateTime.Now:HH:mm:ss.fff} Process exited: {process.Id}"); + } + } + catch (Exception ex) + { + this._outputHelper.WriteLine($"{DateTime.Now:HH:mm:ss.fff} Failed to stop process: {ex.Message}"); + } + } + + private CancellationTokenSource CreateTestTimeoutCts(TimeSpan? timeout = null) + { + TimeSpan testTimeout = Debugger.IsAttached ? TimeSpan.FromMinutes(5) : timeout ?? TimeSpan.FromSeconds(60); + return new CancellationTokenSource(testTimeout); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.DurableTask.IntegrationTests/TestHelper.cs b/dotnet/tests/Microsoft.Agents.AI.DurableTask.IntegrationTests/TestHelper.cs index 15526621d1..8022e71119 100644 --- a/dotnet/tests/Microsoft.Agents.AI.DurableTask.IntegrationTests/TestHelper.cs +++ b/dotnet/tests/Microsoft.Agents.AI.DurableTask.IntegrationTests/TestHelper.cs @@ -76,10 +76,14 @@ private static TestHelper BuildAndStartTestHelper( { TestLoggerProvider loggerProvider = new(outputHelper); + // Generate a unique TaskHub name for this test instance to prevent cross-test interference + // when multiple tests run together and share the same DTS emulator. + string uniqueTaskHubName = $"test-{Guid.NewGuid().ToString("N").Substring(0, 6)}"; + IHost host = Host.CreateDefaultBuilder() .ConfigureServices((ctx, services) => { - string dtsConnectionString = GetDurableTaskSchedulerConnectionString(ctx.Configuration); + string dtsConnectionString = GetDurableTaskSchedulerConnectionString(ctx.Configuration, uniqueTaskHubName); // Register durable agents using the caller-supplied registration action and // apply the default chat client for agents that don't supply one themselves. @@ -107,11 +111,46 @@ private static TestHelper BuildAndStartTestHelper( return new TestHelper(loggerProvider, host, client); } - private static string GetDurableTaskSchedulerConnectionString(IConfiguration configuration) + private static string GetDurableTaskSchedulerConnectionString(IConfiguration configuration, string? taskHubName = null) { // The default value is for local development using the Durable Task Scheduler emulator. - return configuration["DURABLE_TASK_SCHEDULER_CONNECTION_STRING"] - ?? "Endpoint=http://localhost:8080;TaskHub=default;Authentication=None"; + string? connectionString = configuration["DURABLE_TASK_SCHEDULER_CONNECTION_STRING"]; + + if (connectionString != null) + { + // If a connection string is provided, replace the TaskHub name if a custom one is specified + if (taskHubName != null) + { + // Replace TaskHub in the connection string + if (connectionString.Contains("TaskHub=", StringComparison.OrdinalIgnoreCase)) + { + // Find and replace the TaskHub value + int taskHubIndex = connectionString.IndexOf("TaskHub=", StringComparison.OrdinalIgnoreCase); + int taskHubValueStart = taskHubIndex + "TaskHub=".Length; + int taskHubValueEnd = connectionString.IndexOf(';', taskHubValueStart); + if (taskHubValueEnd == -1) + { + taskHubValueEnd = connectionString.Length; + } + + connectionString = string.Concat( + connectionString.AsSpan(0, taskHubValueStart), + taskHubName, + connectionString.AsSpan(taskHubValueEnd)); + } + else + { + // Append TaskHub if it doesn't exist + connectionString += $";TaskHub={taskHubName}"; + } + } + + return connectionString; + } + + // Default connection string with unique TaskHub name + string defaultTaskHub = taskHubName ?? "default"; + return $"Endpoint=http://localhost:8080;TaskHub={defaultTaskHub};Authentication=None"; } internal static ChatClient GetAzureOpenAIChatClient(IConfiguration configuration) diff --git a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/AsyncStreamingChatCompletionUpdateCollectionResultTests.cs b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/AsyncStreamingChatCompletionUpdateCollectionResultTests.cs new file mode 100644 index 0000000000..899d329d5b --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/AsyncStreamingChatCompletionUpdateCollectionResultTests.cs @@ -0,0 +1,109 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.ClientModel; +using System.Collections.Generic; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; +using OpenAI.Chat; + +namespace Microsoft.Agents.AI.OpenAI.UnitTests.ChatClient; + +/// +/// Unit tests for the class. +/// +public sealed class AsyncStreamingChatCompletionUpdateCollectionResultTests +{ + /// + /// Verify that GetContinuationToken returns null. + /// + [Fact] + public void GetContinuationToken_ReturnsNull() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + AsyncCollectionResult collectionResult = new AsyncStreamingChatCompletionUpdateCollectionResult(updates); + + // Act + ContinuationToken? token = collectionResult.GetContinuationToken(null!); + + // Assert + Assert.Null(token); + } + + /// + /// Verify that GetRawPagesAsync returns a single page. + /// + [Fact] + public async Task GetRawPagesAsync_ReturnsSinglePageAsync() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + AsyncCollectionResult collectionResult = new AsyncStreamingChatCompletionUpdateCollectionResult(updates); + + // Act + List pages = []; + await foreach (ClientResult page in collectionResult.GetRawPagesAsync()) + { + pages.Add(page); + } + + // Assert + Assert.Single(pages); + } + + /// + /// Verify that iterating through the collection yields streaming updates. + /// + [Fact] + public async Task IterateCollection_YieldsUpdatesAsync() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + AsyncCollectionResult collectionResult = new AsyncStreamingChatCompletionUpdateCollectionResult(updates); + + // Act + List results = []; + await foreach (StreamingChatCompletionUpdate update in collectionResult) + { + results.Add(update); + } + + // Assert + Assert.Single(results); + } + + /// + /// Verify that iterating through the collection with multiple updates yields all updates. + /// + [Fact] + public async Task IterateCollection_WithMultipleUpdates_YieldsAllUpdatesAsync() + { + // Arrange + IAsyncEnumerable updates = CreateMultipleTestUpdatesAsync(); + AsyncCollectionResult collectionResult = new AsyncStreamingChatCompletionUpdateCollectionResult(updates); + + // Act + List results = []; + await foreach (StreamingChatCompletionUpdate update in collectionResult) + { + results.Add(update); + } + + // Assert + Assert.Equal(3, results.Count); + } + + private static async IAsyncEnumerable CreateTestUpdatesAsync() + { + yield return new AgentResponseUpdate(ChatRole.Assistant, "test"); + await Task.CompletedTask; + } + + private static async IAsyncEnumerable CreateMultipleTestUpdatesAsync() + { + yield return new AgentResponseUpdate(ChatRole.Assistant, "first"); + yield return new AgentResponseUpdate(ChatRole.Assistant, "second"); + yield return new AgentResponseUpdate(ChatRole.Assistant, "third"); + await Task.CompletedTask; + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/AsyncStreamingResponseUpdateCollectionResultTests.cs b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/AsyncStreamingResponseUpdateCollectionResultTests.cs new file mode 100644 index 0000000000..d6bb87596c --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/AsyncStreamingResponseUpdateCollectionResultTests.cs @@ -0,0 +1,190 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.ClientModel; +using System.Collections.Generic; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; +using OpenAI.Responses; + +namespace Microsoft.Agents.AI.OpenAI.UnitTests.ChatClient; + +/// +/// Unit tests for the class. +/// +public sealed class AsyncStreamingResponseUpdateCollectionResultTests +{ + /// + /// Verify that GetContinuationToken returns null. + /// + [Fact] + public void GetContinuationToken_ReturnsNull() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + AsyncCollectionResult collectionResult = new AsyncStreamingResponseUpdateCollectionResult(updates); + + // Act + ContinuationToken? token = collectionResult.GetContinuationToken(null!); + + // Assert + Assert.Null(token); + } + + /// + /// Verify that GetRawPagesAsync returns a single page. + /// + [Fact] + public async Task GetRawPagesAsync_ReturnsSinglePageAsync() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + AsyncCollectionResult collectionResult = new AsyncStreamingResponseUpdateCollectionResult(updates); + + // Act + List pages = []; + await foreach (ClientResult page in collectionResult.GetRawPagesAsync()) + { + pages.Add(page); + } + + // Assert + Assert.Single(pages); + } + + /// + /// Verify that iterating through the collection yields streaming updates when RawRepresentation is a StreamingResponseUpdate. + /// + [Fact] + public async Task IterateCollection_WithStreamingResponseUpdateRawRepresentation_YieldsUpdatesAsync() + { + // Arrange + StreamingResponseUpdate rawUpdate = CreateStreamingResponseUpdate(); + IAsyncEnumerable updates = CreateTestUpdatesWithRawRepresentationAsync(rawUpdate); + AsyncCollectionResult collectionResult = new AsyncStreamingResponseUpdateCollectionResult(updates); + + // Act + List results = []; + await foreach (StreamingResponseUpdate update in collectionResult) + { + results.Add(update); + } + + // Assert + Assert.Single(results); + Assert.Same(rawUpdate, results[0]); + } + + /// + /// Verify that iterating through the collection yields updates when RawRepresentation is a ChatResponseUpdate containing a StreamingResponseUpdate. + /// + [Fact] + public async Task IterateCollection_WithChatResponseUpdateContainingStreamingResponseUpdate_YieldsUpdatesAsync() + { + // Arrange + StreamingResponseUpdate rawUpdate = CreateStreamingResponseUpdate(); + ChatResponseUpdate chatResponseUpdate = new() { RawRepresentation = rawUpdate }; + IAsyncEnumerable updates = CreateTestUpdatesWithChatResponseUpdateAsync(chatResponseUpdate); + AsyncCollectionResult collectionResult = new AsyncStreamingResponseUpdateCollectionResult(updates); + + // Act + List results = []; + await foreach (StreamingResponseUpdate update in collectionResult) + { + results.Add(update); + } + + // Assert + Assert.Single(results); + Assert.Same(rawUpdate, results[0]); + } + + /// + /// Verify that iterating through the collection skips updates when RawRepresentation is not a StreamingResponseUpdate. + /// + [Fact] + public async Task IterateCollection_WithNonStreamingResponseUpdateRawRepresentation_SkipsUpdateAsync() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + AsyncCollectionResult collectionResult = new AsyncStreamingResponseUpdateCollectionResult(updates); + + // Act + List results = []; + await foreach (StreamingResponseUpdate update in collectionResult) + { + results.Add(update); + } + + // Assert + Assert.Empty(results); + } + + /// + /// Verify that iterating through the collection skips updates when RawRepresentation is a ChatResponseUpdate without StreamingResponseUpdate. + /// + [Fact] + public async Task IterateCollection_WithChatResponseUpdateWithoutStreamingResponseUpdate_SkipsUpdateAsync() + { + // Arrange + ChatResponseUpdate chatResponseUpdate = new() { RawRepresentation = "not a streaming update" }; + IAsyncEnumerable updates = CreateTestUpdatesWithChatResponseUpdateAsync(chatResponseUpdate); + AsyncCollectionResult collectionResult = new AsyncStreamingResponseUpdateCollectionResult(updates); + + // Act + List results = []; + await foreach (StreamingResponseUpdate update in collectionResult) + { + results.Add(update); + } + + // Assert + Assert.Empty(results); + } + + private static async IAsyncEnumerable CreateTestUpdatesAsync() + { + yield return new AgentResponseUpdate(ChatRole.Assistant, "test"); + await Task.CompletedTask; + } + + private static async IAsyncEnumerable CreateTestUpdatesWithRawRepresentationAsync(object rawRepresentation) + { + AgentResponseUpdate update = new(ChatRole.Assistant, "test") + { + RawRepresentation = rawRepresentation + }; + yield return update; + await Task.CompletedTask; + } + + private static async IAsyncEnumerable CreateTestUpdatesWithChatResponseUpdateAsync(ChatResponseUpdate chatResponseUpdate) + { + AgentResponseUpdate update = new(ChatRole.Assistant, "test") + { + RawRepresentation = chatResponseUpdate + }; + yield return update; + await Task.CompletedTask; + } + + private static StreamingResponseUpdate CreateStreamingResponseUpdate() + { + const string Json = """ + { + "type": "response.output_item.added", + "sequence_number": 1, + "output_index": 0, + "item": { + "id": "item_abc123", + "type": "message", + "status": "in_progress", + "role": "assistant", + "content": [] + } + } + """; + + return System.ClientModel.Primitives.ModelReaderWriter.Read(BinaryData.FromString(Json))!; + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/StreamingUpdatePipelineResponseTests.cs b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/StreamingUpdatePipelineResponseTests.cs new file mode 100644 index 0000000000..866bba5700 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/ChatClient/StreamingUpdatePipelineResponseTests.cs @@ -0,0 +1,154 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Threading.Tasks; + +namespace Microsoft.Agents.AI.OpenAI.UnitTests.ChatClient; + +/// +/// Unit tests for the class. +/// +public sealed class StreamingUpdatePipelineResponseTests +{ + /// + /// Verify that Status property returns 200. + /// + [Fact] + public void Status_ReturnsOkStatus() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + + // Act + int status = response.Status; + + // Assert + Assert.Equal(200, status); + } + + /// + /// Verify that ReasonPhrase property returns "OK". + /// + [Fact] + public void ReasonPhrase_ReturnsOk() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + + // Act + string reasonPhrase = response.ReasonPhrase; + + // Assert + Assert.Equal("OK", reasonPhrase); + } + + /// + /// Verify that ContentStream getter returns null. + /// + [Fact] + public void ContentStream_Get_ReturnsNull() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + + // Act + System.IO.Stream? contentStream = response.ContentStream; + + // Assert + Assert.Null(contentStream); + } + + /// + /// Verify that ContentStream setter is a no-op. + /// + [Fact] + public void ContentStream_Set_IsNoOp() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + var testStream = new System.IO.MemoryStream(); + + // Act + response.ContentStream = testStream; + + // Assert + Assert.Null(response.ContentStream); + + testStream.Dispose(); + } + + /// + /// Verify that Content property returns empty BinaryData. + /// + [Fact] + public void Content_ReturnsEmptyBinaryData() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + + // Act + BinaryData content = response.Content; + + // Assert + Assert.NotNull(content); + Assert.Equal(string.Empty, content.ToString()); + } + + /// + /// Verify that BufferContent throws NotSupportedException. + /// + [Fact] + public void BufferContent_ThrowsNotSupportedException() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + + // Act & Assert + var exception = Assert.Throws(() => response.BufferContent()); + Assert.Contains("Buffering content is not supported", exception.Message); + } + + /// + /// Verify that BufferContentAsync throws NotSupportedException. + /// + [Fact] + public async Task BufferContentAsync_ThrowsNotSupportedExceptionAsync() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + + // Act & Assert + var exception = await Assert.ThrowsAsync( + async () => await response.BufferContentAsync()); + Assert.Contains("Buffering content asynchronously is not supported", exception.Message); + } + + /// + /// Verify that Dispose does not throw. + /// + [Fact] + public void Dispose_DoesNotThrow() + { + // Arrange + IAsyncEnumerable updates = CreateTestUpdatesAsync(); + PipelineResponse response = new StreamingUpdatePipelineResponse(updates); + + // Act & Assert + response.Dispose(); + } + + private static async IAsyncEnumerable CreateTestUpdatesAsync() + { + yield return new AgentResponseUpdate(Microsoft.Extensions.AI.ChatRole.Assistant, "test"); + await Task.CompletedTask; + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs index d29535eddb..db22ab090b 100644 --- a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AIAgentWithOpenAIExtensionsTests.cs @@ -7,6 +7,7 @@ using System.Threading.Tasks; using Moq; using Moq.Protected; +using OpenAI.Responses; using ChatMessage = Microsoft.Extensions.AI.ChatMessage; using ChatRole = Microsoft.Extensions.AI.ChatRole; using OpenAIChatMessage = OpenAI.Chat.ChatMessage; @@ -208,4 +209,167 @@ private static async IAsyncEnumerable ToAsyncEnumerableAsyn yield return await Task.FromResult(update); } } + + #region ResponseItem overload tests + + /// + /// Verify that RunAsync with ResponseItem throws ArgumentNullException when agent is null. + /// + [Fact] + public async Task RunAsync_ResponseItem_WithNullAgent_ThrowsArgumentNullExceptionAsync() + { + // Arrange + AIAgent? agent = null; + IEnumerable messages = [ResponseItem.CreateUserMessageItem("Test message")]; + + // Act & Assert + var exception = await Assert.ThrowsAsync( + () => agent!.RunAsync(messages)); + + Assert.Equal("agent", exception.ParamName); + } + + /// + /// Verify that RunAsync with ResponseItem throws ArgumentNullException when messages is null. + /// + [Fact] + public async Task RunAsync_ResponseItem_WithNullMessages_ThrowsArgumentNullExceptionAsync() + { + // Arrange + var mockAgent = new Mock(); + IEnumerable? messages = null; + + // Act & Assert + var exception = await Assert.ThrowsAsync( + () => mockAgent.Object.RunAsync(messages!)); + + Assert.Equal("messages", exception.ParamName); + } + + /// + /// Verify that the RunAsync with ResponseItem extension method calls the underlying agent's RunAsync with converted messages and parameters. + /// + [Fact] + public async Task RunAsync_ResponseItem_CallsUnderlyingAgentAsync() + { + // Arrange + var mockAgent = new Mock(); + var mockThread = new Mock(); + var options = new AgentRunOptions(); + var cancellationToken = new CancellationToken(false); + const string TestMessageText = "Hello, assistant!"; + const string ResponseText = "This is the assistant's response."; + IEnumerable responseItemMessages = [ResponseItem.CreateUserMessageItem(TestMessageText)]; + + var responseMessage = new ChatMessage(ChatRole.Assistant, [new TextContent(ResponseText)]); + + mockAgent + .Protected() + .Setup>("RunCoreAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) + .ReturnsAsync(new AgentResponse([responseMessage])); + + // Act + ResponseResult result = await mockAgent.Object.RunAsync(responseItemMessages, mockThread.Object, options, cancellationToken); + + // Assert + mockAgent.Protected() + .Verify("RunCoreAsync", + Times.Once(), + ItExpr.IsAny>(), + mockThread.Object, + options, + cancellationToken + ); + + Assert.NotNull(result); + } + + /// + /// Verify that RunStreamingAsync with ResponseItem throws ArgumentNullException when agent is null. + /// + [Fact] + public void RunStreamingAsync_ResponseItem_WithNullAgent_ThrowsArgumentNullException() + { + // Arrange + AIAgent? agent = null; + IEnumerable messages = [ResponseItem.CreateUserMessageItem("Test message")]; + + // Act & Assert + Assert.Throws( + "agent", + () => agent!.RunStreamingAsync(messages)); + } + + /// + /// Verify that RunStreamingAsync with ResponseItem throws ArgumentNullException when messages is null. + /// + [Fact] + public void RunStreamingAsync_ResponseItem_WithNullMessages_ThrowsArgumentNullException() + { + // Arrange + var mockAgent = new Mock(); + IEnumerable? messages = null; + + // Act & Assert + var exception = Assert.Throws( + () => mockAgent.Object.RunStreamingAsync(messages!)); + + Assert.Equal("messages", exception.ParamName); + } + + /// + /// Verify that the RunStreamingAsync with ResponseItem extension method calls the underlying agent's RunStreamingAsync with converted messages and parameters. + /// + [Fact] + public async Task RunStreamingAsync_ResponseItem_CallsUnderlyingAgentAsync() + { + // Arrange + var mockAgent = new Mock(); + var mockThread = new Mock(); + var options = new AgentRunOptions(); + var cancellationToken = new CancellationToken(false); + const string TestMessageText = "Hello, assistant!"; + const string ResponseText1 = "This is "; + const string ResponseText2 = "the assistant's response."; + IEnumerable responseItemMessages = [ResponseItem.CreateUserMessageItem(TestMessageText)]; + + var responseUpdates = new List + { + new(ChatRole.Assistant, ResponseText1), + new(ChatRole.Assistant, ResponseText2) + }; + + mockAgent + .Protected() + .Setup>("RunCoreStreamingAsync", + ItExpr.IsAny>(), + ItExpr.IsAny(), + ItExpr.IsAny(), + ItExpr.IsAny()) + .Returns(ToAsyncEnumerableAsync(responseUpdates)); + + // Act + var result = mockAgent.Object.RunStreamingAsync(responseItemMessages, mockThread.Object, options, cancellationToken); + var updateCount = 0; + await foreach (var update in result) + { + updateCount++; + } + + // Assert + mockAgent.Protected() + .Verify("RunCoreStreamingAsync", + Times.Once(), + ItExpr.IsAny>(), + mockThread.Object, + options, + cancellationToken + ); + } + + #endregion } diff --git a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AgentResponseExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AgentResponseExtensionsTests.cs new file mode 100644 index 0000000000..b2b0a99002 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/AgentResponseExtensionsTests.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using OpenAI.Chat; +using ChatMessage = Microsoft.Extensions.AI.ChatMessage; +using ChatRole = Microsoft.Extensions.AI.ChatRole; +using TextContent = Microsoft.Extensions.AI.TextContent; + +namespace Microsoft.Agents.AI.OpenAI.UnitTests.Extensions; + +/// +/// Unit tests for the AgentResponseExtensions class that provides OpenAI extension methods. +/// +public sealed class AgentResponseExtensionsTests +{ + /// + /// Verify that AsOpenAIChatCompletion throws ArgumentNullException when response is null. + /// + [Fact] + public void AsOpenAIChatCompletion_WithNullResponse_ThrowsArgumentNullException() + { + // Arrange + AgentResponse? response = null; + + // Act & Assert + var exception = Assert.Throws( + () => response!.AsOpenAIChatCompletion()); + + Assert.Equal("response", exception.ParamName); + } + + /// + /// Verify that AsOpenAIChatCompletion returns the RawRepresentation when it is a ChatCompletion. + /// + [Fact] + public void AsOpenAIChatCompletion_WithChatCompletionRawRepresentation_ReturnsChatCompletion() + { + // Arrange + ChatCompletion chatCompletion = ModelReaderWriterHelper.CreateChatCompletion("assistant_id", "Hello"); + var responseMessage = new ChatMessage(ChatRole.Assistant, [new TextContent("Hello")]); + var agentResponse = new AgentResponse([responseMessage]) + { + RawRepresentation = chatCompletion + }; + + // Act + ChatCompletion result = agentResponse.AsOpenAIChatCompletion(); + + // Assert + Assert.NotNull(result); + Assert.Same(chatCompletion, result); + } + + /// + /// Verify that AsOpenAIChatCompletion converts a ChatResponse when RawRepresentation is not a ChatCompletion. + /// + [Fact] + public void AsOpenAIChatCompletion_WithNonChatCompletionRawRepresentation_ConvertsChatResponse() + { + // Arrange + const string ResponseText = "This is a test response."; + var responseMessage = new ChatMessage(ChatRole.Assistant, [new TextContent(ResponseText)]); + var agentResponse = new AgentResponse([responseMessage]); + + // Act + ChatCompletion result = agentResponse.AsOpenAIChatCompletion(); + + // Assert + Assert.NotNull(result); + Assert.Single(result.Content); + Assert.Equal(ResponseText, result.Content[0].Text); + } + + /// + /// Verify that AsOpenAIResponse throws ArgumentNullException when response is null. + /// + [Fact] + public void AsOpenAIResponse_WithNullResponse_ThrowsArgumentNullException() + { + // Arrange + AgentResponse? response = null; + + // Act & Assert + var exception = Assert.Throws( + () => response!.AsOpenAIResponse()); + + Assert.Equal("response", exception.ParamName); + } + + /// + /// Verify that AsOpenAIResponse converts a ChatResponse when RawRepresentation is not a ResponseResult. + /// + [Fact] + public void AsOpenAIResponse_WithNonResponseResultRawRepresentation_ConvertsChatResponse() + { + // Arrange + const string ResponseText = "This is a test response."; + var responseMessage = new ChatMessage(ChatRole.Assistant, [new TextContent(ResponseText)]); + var agentResponse = new AgentResponse([responseMessage]); + + // Act + var result = agentResponse.AsOpenAIResponse(); + + // Assert + Assert.NotNull(result); + } +} + +/// +/// Helper class for creating OpenAI model objects using ModelReaderWriter. +/// +internal static class ModelReaderWriterHelper +{ + public static ChatCompletion CreateChatCompletion(string id, string contentText) + { + string json = $$""" + { + "id": "{{id}}", + "object": "chat.completion", + "created": 1700000000, + "model": "gpt-4", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{{contentText}}" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 10, + "total_tokens": 20 + } + } + """; + + return System.ClientModel.Primitives.ModelReaderWriter.Read(BinaryData.FromString(json))!; + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/OpenAIAssistantClientExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/OpenAIAssistantClientExtensionsTests.cs index 8400adfbcc..44a4b73b52 100644 --- a/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/OpenAIAssistantClientExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.OpenAI.UnitTests/Extensions/OpenAIAssistantClientExtensionsTests.cs @@ -23,7 +23,7 @@ public sealed class OpenAIAssistantClientExtensionsTests /// Verify that CreateAIAgent with clientFactory parameter correctly applies the factory. /// [Fact] - public void CreateAIAgent_WithClientFactory_AppliesFactoryCorrectly() + public async Task CreateAIAgentAsync_WithClientFactory_AppliesFactoryCorrectlyAsync() { // Arrange var assistantClient = new TestAssistantClient(); @@ -31,7 +31,7 @@ public void CreateAIAgent_WithClientFactory_AppliesFactoryCorrectly() const string ModelId = "test-model"; // Act - var agent = assistantClient.CreateAIAgent( + var agent = await assistantClient.CreateAIAgentAsync( ModelId, instructions: "Test instructions", name: "Test Agent", @@ -53,7 +53,7 @@ public void CreateAIAgent_WithClientFactory_AppliesFactoryCorrectly() /// Verify that CreateAIAgent with clientFactory using AsBuilder pattern works correctly. /// [Fact] - public void CreateAIAgent_WithClientFactoryUsingAsBuilder_AppliesFactoryCorrectly() + public async Task CreateAIAgentAsync_WithClientFactoryUsingAsBuilder_AppliesFactoryCorrectlyAsync() { // Arrange var assistantClient = new TestAssistantClient(); @@ -62,7 +62,7 @@ public void CreateAIAgent_WithClientFactoryUsingAsBuilder_AppliesFactoryCorrectl const string ModelId = "test-model"; // Act - var agent = assistantClient.CreateAIAgent( + var agent = await assistantClient.CreateAIAgentAsync( ModelId, instructions: "Test instructions", clientFactory: (innerClient) => @@ -83,7 +83,7 @@ public void CreateAIAgent_WithClientFactoryUsingAsBuilder_AppliesFactoryCorrectl /// Verify that CreateAIAgent with options and clientFactory parameter correctly applies the factory. /// [Fact] - public void CreateAIAgent_WithOptionsAndClientFactory_AppliesFactoryCorrectly() + public async Task CreateAIAgentAsync_WithOptionsAndClientFactory_AppliesFactoryCorrectlyAsync() { // Arrange var assistantClient = new TestAssistantClient(); @@ -97,7 +97,7 @@ public void CreateAIAgent_WithOptionsAndClientFactory_AppliesFactoryCorrectly() }; // Act - var agent = assistantClient.CreateAIAgent( + var agent = await assistantClient.CreateAIAgentAsync( ModelId, options, clientFactory: (innerClient) => testChatClient); @@ -117,14 +117,14 @@ public void CreateAIAgent_WithOptionsAndClientFactory_AppliesFactoryCorrectly() /// Verify that CreateAIAgent without clientFactory works normally. /// [Fact] - public void CreateAIAgent_WithoutClientFactory_WorksNormally() + public async Task CreateAIAgentAsync_WithoutClientFactory_WorksNormallyAsync() { // Arrange var assistantClient = new TestAssistantClient(); const string ModelId = "test-model"; // Act - var agent = assistantClient.CreateAIAgent( + var agent = await assistantClient.CreateAIAgentAsync( ModelId, instructions: "Test instructions", name: "Test Agent"); @@ -142,14 +142,14 @@ public void CreateAIAgent_WithoutClientFactory_WorksNormally() /// Verify that CreateAIAgent with null clientFactory works normally. /// [Fact] - public void CreateAIAgent_WithNullClientFactory_WorksNormally() + public async Task CreateAIAgentAsync_WithNullClientFactory_WorksNormallyAsync() { // Arrange var assistantClient = new TestAssistantClient(); const string ModelId = "test-model"; // Act - var agent = assistantClient.CreateAIAgent( + var agent = await assistantClient.CreateAIAgentAsync( ModelId, instructions: "Test instructions", name: "Test Agent", @@ -168,11 +168,11 @@ public void CreateAIAgent_WithNullClientFactory_WorksNormally() /// Verify that CreateAIAgent throws ArgumentNullException when client is null. /// [Fact] - public void CreateAIAgent_WithNullClient_ThrowsArgumentNullException() + public async Task CreateAIAgentAsync_WithNullClient_ThrowsArgumentNullExceptionAsync() { // Act & Assert - var exception = Assert.Throws(() => - ((AssistantClient)null!).CreateAIAgent("test-model")); + var exception = await Assert.ThrowsAsync(() => + ((AssistantClient)null!).CreateAIAgentAsync("test-model")); Assert.Equal("client", exception.ParamName); } @@ -181,14 +181,14 @@ public void CreateAIAgent_WithNullClient_ThrowsArgumentNullException() /// Verify that CreateAIAgent throws ArgumentNullException when model is null. /// [Fact] - public void CreateAIAgent_WithNullModel_ThrowsArgumentNullException() + public async Task CreateAIAgentAsync_WithNullModel_ThrowsArgumentNullExceptionAsync() { // Arrange var assistantClient = new TestAssistantClient(); // Act & Assert - var exception = Assert.Throws(() => - assistantClient.CreateAIAgent(null!)); + var exception = await Assert.ThrowsAsync(() => + assistantClient.CreateAIAgentAsync(null!)); Assert.Equal("model", exception.ParamName); } @@ -197,14 +197,14 @@ public void CreateAIAgent_WithNullModel_ThrowsArgumentNullException() /// Verify that CreateAIAgent with options throws ArgumentNullException when options is null. /// [Fact] - public void CreateAIAgent_WithNullOptions_ThrowsArgumentNullException() + public async Task CreateAIAgentAsync_WithNullOptions_ThrowsArgumentNullExceptionAsync() { // Arrange var assistantClient = new TestAssistantClient(); // Act & Assert - var exception = Assert.Throws(() => - assistantClient.CreateAIAgent("test-model", (ChatClientAgentOptions)null!)); + var exception = await Assert.ThrowsAsync(() => + assistantClient.CreateAIAgentAsync("test-model", (ChatClientAgentOptions)null!)); Assert.Equal("options", exception.ParamName); } @@ -286,33 +286,6 @@ public void AsAIAgent_WithAssistantAndOptionsWithNullFields_FallsBackToAssistant Assert.Equal("Original Instructions", agent.Instructions); } - /// - /// Verify that GetAIAgent with agentId and options works correctly. - /// - [Fact] - public void GetAIAgent_WithAgentIdAndOptions_WorksCorrectly() - { - // Arrange - var assistantClient = new TestAssistantClient(); - const string AgentId = "asst_abc123"; - - var options = new ChatClientAgentOptions - { - Name = "Override Name", - Description = "Override Description", - ChatOptions = new() { Instructions = "Override Instructions" } - }; - - // Act - var agent = assistantClient.GetAIAgent(AgentId, options); - - // Assert - Assert.NotNull(agent); - Assert.Equal("Override Name", agent.Name); - Assert.Equal("Override Description", agent.Description); - Assert.Equal("Override Instructions", agent.Instructions); - } - /// /// Verify that GetAIAgentAsync with agentId and options works correctly. /// @@ -423,23 +396,6 @@ public void AsAIAgent_WithNullOptions_ThrowsArgumentNullException() Assert.Equal("options", exception.ParamName); } - /// - /// Verify that GetAIAgent throws ArgumentException when agentId is empty. - /// - [Fact] - public void GetAIAgent_WithEmptyAgentId_ThrowsArgumentException() - { - // Arrange - var assistantClient = new TestAssistantClient(); - var options = new ChatClientAgentOptions(); - - // Act & Assert - var exception = Assert.Throws(() => - assistantClient.GetAIAgent(string.Empty, options)); - - Assert.Equal("agentId", exception.ParamName); - } - /// /// Verify that GetAIAgentAsync throws ArgumentException when agentId is empty. /// @@ -461,7 +417,7 @@ public async Task GetAIAgentAsync_WithEmptyAgentId_ThrowsArgumentExceptionAsync( /// Verify that CreateAIAgent with services parameter correctly passes it through to the ChatClientAgent. /// [Fact] - public void CreateAIAgent_WithServices_PassesServicesToAgent() + public async Task CreateAIAgentAsync_WithServices_PassesServicesToAgentAsync() { // Arrange var assistantClient = new TestAssistantClient(); @@ -469,7 +425,7 @@ public void CreateAIAgent_WithServices_PassesServicesToAgent() const string ModelId = "test-model"; // Act - var agent = assistantClient.CreateAIAgent( + var agent = await assistantClient.CreateAIAgentAsync( ModelId, instructions: "Test instructions", name: "Test Agent", @@ -490,7 +446,7 @@ public void CreateAIAgent_WithServices_PassesServicesToAgent() /// Verify that CreateAIAgent with options and services parameter correctly passes it through to the ChatClientAgent. /// [Fact] - public void CreateAIAgent_WithOptionsAndServices_PassesServicesToAgent() + public async Task CreateAIAgentAsync_WithOptionsAndServices_PassesServicesToAgentAsync() { // Arrange var assistantClient = new TestAssistantClient(); @@ -503,7 +459,7 @@ public void CreateAIAgent_WithOptionsAndServices_PassesServicesToAgent() }; // Act - var agent = assistantClient.CreateAIAgent(ModelId, options, services: serviceProvider); + var agent = await assistantClient.CreateAIAgentAsync(ModelId, options, services: serviceProvider); // Assert Assert.NotNull(agent); @@ -570,7 +526,7 @@ public async Task GetAIAgentAsync_WithServices_PassesServicesToAgentAsync() /// Verify that CreateAIAgent with both clientFactory and services works correctly. /// [Fact] - public void CreateAIAgent_WithClientFactoryAndServices_AppliesBothCorrectly() + public async Task CreateAIAgentAsync_WithClientFactoryAndServices_AppliesBothCorrectlyAsync() { // Arrange var assistantClient = new TestAssistantClient(); @@ -579,7 +535,7 @@ public void CreateAIAgent_WithClientFactoryAndServices_AppliesBothCorrectly() const string ModelId = "test-model"; // Act - var agent = assistantClient.CreateAIAgent( + var agent = await assistantClient.CreateAIAgentAsync( ModelId, instructions: "Test instructions", name: "Test Agent", @@ -613,6 +569,387 @@ public void CreateAIAgent_WithClientFactoryAndServices_AppliesBothCorrectly() return property?.GetValue(client) as IServiceProvider; } + /// + /// Verify that CreateAIAgentAsync with HostedCodeInterpreterTool properly adds CodeInterpreter tool definition. + /// + [Fact] + public async Task CreateAIAgentAsync_WithHostedCodeInterpreterTool_CreatesAgentWithToolAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + const string ModelId = "test-model"; + var options = new ChatClientAgentOptions + { + Name = "Test Agent", + ChatOptions = new ChatOptions + { + Instructions = "Test instructions", + Tools = [new HostedCodeInterpreterTool()] + } + }; + + // Act + var agent = await assistantClient.CreateAIAgentAsync(ModelId, options); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + } + + /// + /// Verify that CreateAIAgentAsync with HostedCodeInterpreterTool with HostedFileContent input properly creates agent. + /// + [Fact] + public async Task CreateAIAgentAsync_WithHostedCodeInterpreterToolAndHostedFileContent_CreatesAgentWithToolResourcesAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + const string ModelId = "test-model"; + var codeInterpreterTool = new HostedCodeInterpreterTool + { + Inputs = [new HostedFileContent("test-file-id")] + }; + var options = new ChatClientAgentOptions + { + Name = "Test Agent", + ChatOptions = new ChatOptions + { + Instructions = "Test instructions", + Tools = [codeInterpreterTool] + } + }; + + // Act + var agent = await assistantClient.CreateAIAgentAsync(ModelId, options); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + } + + /// + /// Verify that CreateAIAgentAsync with HostedFileSearchTool properly adds FileSearch tool definition. + /// + [Fact] + public async Task CreateAIAgentAsync_WithHostedFileSearchTool_CreatesAgentWithToolAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + const string ModelId = "test-model"; + var options = new ChatClientAgentOptions + { + Name = "Test Agent", + ChatOptions = new ChatOptions + { + Instructions = "Test instructions", + Tools = [new HostedFileSearchTool()] + } + }; + + // Act + var agent = await assistantClient.CreateAIAgentAsync(ModelId, options); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + } + + /// + /// Verify that CreateAIAgentAsync with HostedFileSearchTool with HostedVectorStoreContent input properly creates agent. + /// + [Fact] + public async Task CreateAIAgentAsync_WithHostedFileSearchToolAndHostedVectorStoreContent_CreatesAgentWithToolResourcesAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + const string ModelId = "test-model"; + var fileSearchTool = new HostedFileSearchTool + { + MaximumResultCount = 10, + Inputs = [new HostedVectorStoreContent("test-vector-store-id")] + }; + var options = new ChatClientAgentOptions + { + Name = "Test Agent", + ChatOptions = new ChatOptions + { + Instructions = "Test instructions", + Tools = [fileSearchTool] + } + }; + + // Act + var agent = await assistantClient.CreateAIAgentAsync(ModelId, options); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + } + + /// + /// Verify that CreateAIAgentAsync with multiple tools including functions properly creates agent. + /// + [Fact] + public async Task CreateAIAgentAsync_WithMixedTools_CreatesAgentWithAllToolsAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + const string ModelId = "test-model"; + var testFunction = AIFunctionFactory.Create(() => "test", "TestFunction", "A test function"); + var options = new ChatClientAgentOptions + { + Name = "Test Agent", + ChatOptions = new ChatOptions + { + Instructions = "Test instructions", + Tools = [new HostedCodeInterpreterTool(), new HostedFileSearchTool(), testFunction] + } + }; + + // Act + var agent = await assistantClient.CreateAIAgentAsync(ModelId, options); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + } + + /// + /// Verify that CreateAIAgentAsync with function tools properly categorizes them as other tools. + /// + [Fact] + public async Task CreateAIAgentAsync_WithFunctionTools_CategorizesAsOtherToolsAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + const string ModelId = "test-model"; + var testFunction = AIFunctionFactory.Create(() => "test", "TestFunction", "A test function"); + var options = new ChatClientAgentOptions + { + Name = "Test Agent", + ChatOptions = new ChatOptions + { + Instructions = "Test instructions", + Tools = [testFunction] + } + }; + + // Act + var agent = await assistantClient.CreateAIAgentAsync(ModelId, options); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + } + + /// + /// Verify that AsAIAgent with legacy overload works correctly when assistant instructions are set. + /// + [Fact] + public void AsAIAgent_LegacyOverload_WithAssistantInstructions_SetsInstructions() + { + // Arrange + var assistantClient = new TestAssistantClient(); + var assistant = ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123", "name": "Test Agent", "instructions": "Original Instructions"}"""))!; + + // Act + var agent = assistantClient.AsAIAgent(assistant); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + Assert.Equal("Original Instructions", agent.Instructions); + } + + /// + /// Verify that AsAIAgent with legacy overload works correctly when chatOptions with instructions is provided. + /// + [Fact] + public void AsAIAgent_LegacyOverload_WithChatOptionsInstructions_UsesChatOptionsInstructions() + { + // Arrange + var assistantClient = new TestAssistantClient(); + var assistant = ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123", "name": "Test Agent", "instructions": "Original Instructions"}"""))!; + var chatOptions = new ChatOptions { Instructions = "Override Instructions" }; + + // Act + var agent = assistantClient.AsAIAgent(assistant, chatOptions); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + Assert.Equal("Override Instructions", agent.Instructions); + } + + /// + /// Verify that AsAIAgent with legacy overload and ClientResult works correctly. + /// + [Fact] + public void AsAIAgent_LegacyOverload_WithClientResult_WorksCorrectly() + { + // Arrange + var assistantClient = new TestAssistantClient(); + var assistant = ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123", "name": "Test Agent", "instructions": "Original Instructions"}"""))!; + var clientResult = ClientResult.FromValue(assistant, new FakePipelineResponse()); + + // Act + var agent = assistantClient.AsAIAgent(clientResult); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Test Agent", agent.Name); + } + + /// + /// Verify that AsAIAgent with legacy overload throws ArgumentNullException when assistant client is null. + /// + [Fact] + public void AsAIAgent_LegacyOverload_WithNullAssistantClient_ThrowsArgumentNullException() + { + // Arrange + AssistantClient? assistantClient = null; + var assistant = ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123"}"""))!; + + // Act & Assert + var exception = Assert.Throws(() => + assistantClient!.AsAIAgent(assistant)); + + Assert.Equal("assistantClient", exception.ParamName); + } + + /// + /// Verify that AsAIAgent with legacy overload throws ArgumentNullException when assistantMetadata is null. + /// + [Fact] + public void AsAIAgent_LegacyOverload_WithNullAssistantMetadata_ThrowsArgumentNullException() + { + // Arrange + var assistantClient = new TestAssistantClient(); + + // Act & Assert + var exception = Assert.Throws(() => + assistantClient.AsAIAgent((Assistant)null!)); + + Assert.Equal("assistantMetadata", exception.ParamName); + } + + /// + /// Verify that AsAIAgent with legacy overload throws ArgumentNullException when clientResult is null. + /// + [Fact] + public void AsAIAgent_LegacyOverload_WithNullClientResult_ThrowsArgumentNullException() + { + // Arrange + var assistantClient = new TestAssistantClient(); + + // Act & Assert + var exception = Assert.Throws(() => + assistantClient.AsAIAgent(null!, chatOptions: null)); + + Assert.Equal("assistantClientResult", exception.ParamName); + } + + /// + /// Verify that GetAIAgentAsync with legacy overload works correctly. + /// + [Fact] + public async Task GetAIAgentAsync_LegacyOverload_WorksCorrectlyAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + const string AgentId = "asst_abc123"; + + // Act + var agent = await assistantClient.GetAIAgentAsync(AgentId); + + // Assert + Assert.NotNull(agent); + Assert.Equal("Original Name", agent.Name); + } + + /// + /// Verify that GetAIAgentAsync with legacy overload throws ArgumentNullException when assistantClient is null. + /// + [Fact] + public async Task GetAIAgentAsync_LegacyOverload_WithNullAssistantClient_ThrowsArgumentNullExceptionAsync() + { + // Arrange + AssistantClient? assistantClient = null; + + // Act & Assert + var exception = await Assert.ThrowsAsync(() => + assistantClient!.GetAIAgentAsync("asst_abc123")); + + Assert.Equal("assistantClient", exception.ParamName); + } + + /// + /// Verify that GetAIAgentAsync with legacy overload throws ArgumentException when agentId is empty. + /// + [Fact] + public async Task GetAIAgentAsync_LegacyOverload_WithEmptyAgentId_ThrowsArgumentExceptionAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + + // Act & Assert + var exception = await Assert.ThrowsAsync(() => + assistantClient.GetAIAgentAsync(string.Empty)); + + Assert.Equal("agentId", exception.ParamName); + } + + /// + /// Verify that GetAIAgentAsync with options throws ArgumentNullException when assistantClient is null. + /// + [Fact] + public async Task GetAIAgentAsync_WithOptions_WithNullAssistantClient_ThrowsArgumentNullExceptionAsync() + { + // Arrange + AssistantClient? assistantClient = null; + var options = new ChatClientAgentOptions(); + + // Act & Assert + var exception = await Assert.ThrowsAsync(() => + assistantClient!.GetAIAgentAsync("asst_abc123", options)); + + Assert.Equal("assistantClient", exception.ParamName); + } + + /// + /// Verify that GetAIAgentAsync with options throws ArgumentNullException when options is null. + /// + [Fact] + public async Task GetAIAgentAsync_WithOptions_WithNullOptions_ThrowsArgumentNullExceptionAsync() + { + // Arrange + var assistantClient = new TestAssistantClient(); + + // Act & Assert + var exception = await Assert.ThrowsAsync(() => + assistantClient.GetAIAgentAsync("asst_abc123", (ChatClientAgentOptions)null!)); + + Assert.Equal("options", exception.ParamName); + } + + /// + /// Verify that AsAIAgent with options throws ArgumentNullException when assistantClient is null. + /// + [Fact] + public void AsAIAgent_WithOptions_WithNullAssistantClient_ThrowsArgumentNullException() + { + // Arrange + AssistantClient? assistantClient = null; + var assistant = ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123"}"""))!; + var options = new ChatClientAgentOptions(); + + // Act & Assert + var exception = Assert.Throws(() => + assistantClient!.AsAIAgent(assistant, options)); + + Assert.Equal("assistantClient", exception.ParamName); + } + /// /// Creates a test AssistantClient implementation for testing. /// @@ -622,14 +959,9 @@ public TestAssistantClient() { } - public override ClientResult CreateAssistant(string model, AssistantCreationOptions? options = null, CancellationToken cancellationToken = default) + public override Task> CreateAssistantAsync(string model, AssistantCreationOptions? options = null, CancellationToken cancellationToken = default) { - return ClientResult.FromValue(ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123"}""")), new FakePipelineResponse())!; - } - - public override ClientResult GetAssistant(string assistantId, CancellationToken cancellationToken = default) - { - return ClientResult.FromValue(ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123", "name": "Original Name", "description": "Original Description", "instructions": "Original Instructions"}""")), new FakePipelineResponse())!; + return Task.FromResult>(ClientResult.FromValue(ModelReaderWriter.Read(BinaryData.FromString("""{"id": "asst_abc123"}""")), new FakePipelineResponse())!); } public override async Task> GetAssistantAsync(string assistantId, CancellationToken cancellationToken = default) diff --git a/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs b/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs index ed012669d6..316ea14057 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Purview.UnitTests/PurviewWrapperTests.cs @@ -18,14 +18,13 @@ namespace Microsoft.Agents.AI.Purview.UnitTests; public sealed class PurviewWrapperTests : IDisposable { private readonly Mock _mockProcessor; - private readonly IChannelHandler _channelHandler; + private readonly IBackgroundJobRunner _backgroundJobRunner; private readonly PurviewSettings _settings; private readonly PurviewWrapper _wrapper; public PurviewWrapperTests() { this._mockProcessor = new Mock(); - this._channelHandler = Mock.Of(); this._settings = new PurviewSettings("TestApp") { TenantId = "tenant-123", @@ -33,7 +32,8 @@ public PurviewWrapperTests() BlockedPromptMessage = "Prompt blocked by policy", BlockedResponseMessage = "Response blocked by policy" }; - this._wrapper = new PurviewWrapper(this._mockProcessor.Object, this._settings, NullLogger.Instance, this._channelHandler); + this._backgroundJobRunner = Mock.Of(); + this._wrapper = new PurviewWrapper(this._mockProcessor.Object, this._settings, NullLogger.Instance, this._backgroundJobRunner); } #region ProcessChatContentAsync Tests @@ -151,7 +151,7 @@ public async Task ProcessChatContentAsync_WithIgnoreExceptions_ContinuesOnPrompt IgnoreExceptions = true, PurviewAppLocation = new PurviewAppLocation(PurviewLocationType.Application, "app-123") }; - var wrapper = new PurviewWrapper(this._mockProcessor.Object, settingsWithIgnore, NullLogger.Instance, this._channelHandler); + var wrapper = new PurviewWrapper(this._mockProcessor.Object, settingsWithIgnore, NullLogger.Instance, this._backgroundJobRunner); var messages = new List { @@ -371,7 +371,7 @@ public async Task ProcessAgentContentAsync_WithIgnoreExceptions_ContinuesOnError IgnoreExceptions = true, PurviewAppLocation = new PurviewAppLocation(PurviewLocationType.Application, "app-123") }; - var wrapper = new PurviewWrapper(this._mockProcessor.Object, settingsWithIgnore, NullLogger.Instance, this._channelHandler); + var wrapper = new PurviewWrapper(this._mockProcessor.Object, settingsWithIgnore, NullLogger.Instance, this._backgroundJobRunner); var messages = new List { diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs index 43039a7b76..e0c5417674 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/AgentExtensionsTests.cs @@ -277,6 +277,48 @@ public async Task CreateFromAgent_InvokeWithComplexResponseFromAgentAsync_Return Assert.Equal("Complex response", result.ToString()); } + [Fact] + public async Task CreateFromAgent_InvokeWithAdditionalProperties_PropagatesAdditionalPropertiesToChildAgentAsync() + { + // Arrange + var expectedResponse = new AgentResponse + { + AgentId = "agent-123", + ResponseId = "response-456", + CreatedAt = DateTimeOffset.UtcNow, + Messages = { new ChatMessage(ChatRole.Assistant, "Complex response") } + }; + + var testAgent = new TestAgent("TestAgent", "Test description", expectedResponse); + var aiFunction = testAgent.AsAIFunction(); + + // Use reflection to set the protected CurrentContext property + var context = new FunctionInvocationContext() + { + Options = new() + { + AdditionalProperties = new AdditionalPropertiesDictionary + { + { "customProperty1", "value1" }, + { "customProperty2", 42 } + } + } + }; + SetFunctionInvokingChatClientCurrentContext(context); + + // Act + var arguments = new AIFunctionArguments() { ["query"] = "Test query" }; + var result = await aiFunction.InvokeAsync(arguments); + + // Assert + Assert.NotNull(result); + Assert.Equal("Complex response", result.ToString()); + Assert.NotNull(testAgent.ReceivedAgentRunOptions); + Assert.NotNull(testAgent.ReceivedAgentRunOptions!.AdditionalProperties); + Assert.Equal("value1", testAgent.ReceivedAgentRunOptions!.AdditionalProperties["customProperty1"]); + Assert.Equal(42, testAgent.ReceivedAgentRunOptions!.AdditionalProperties["customProperty2"]); + } + [Theory] [InlineData("MyAgent", "MyAgent")] [InlineData("Agent123", "Agent123")] @@ -302,6 +344,22 @@ public void CreateFromAgent_SanitizesAgentName(string agentName, string expected Assert.Equal(expectedFunctionName, result.Name); } + /// + /// Uses reflection to set the protected static CurrentContext property on FunctionInvokingChatClient. + /// + private static void SetFunctionInvokingChatClientCurrentContext(FunctionInvocationContext? context) + { + // Access the private static field _currentContext which is an AsyncLocal + var currentContextField = typeof(FunctionInvokingChatClient).GetField( + "_currentContext", + System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Static); + + if (currentContextField?.GetValue(null) is AsyncLocal asyncLocal) + { + asyncLocal.Value = context; + } + } + /// /// Test implementation of AIAgent for testing purposes. /// @@ -334,6 +392,7 @@ public override ValueTask DeserializeThreadAsync(JsonElement serial public override string? Description { get; } public List ReceivedMessages { get; } = []; + public AgentRunOptions? ReceivedAgentRunOptions { get; private set; } public CancellationToken LastCancellationToken { get; private set; } public int RunAsyncCallCount { get; private set; } @@ -346,6 +405,7 @@ protected override Task RunCoreAsync( this.RunAsyncCallCount++; this.LastCancellationToken = cancellationToken; this.ReceivedMessages.AddRange(messages); + this.ReceivedAgentRunOptions = options; if (this._exceptionToThrow is not null) { diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentContinuationTokenTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentContinuationTokenTests.cs index a2add9634b..080fd18a95 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentContinuationTokenTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentContinuationTokenTests.cs @@ -5,7 +5,7 @@ using System.Text.Json; using Microsoft.Extensions.AI; -namespace Microsoft.Agents.AI.UnitTests.ChatClient; +namespace Microsoft.Agents.AI.UnitTests; public class ChatClientAgentContinuationTokenTests { diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentOptionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentOptionsTests.cs index 896a4ceba5..8502550d2c 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentOptionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentOptionsTests.cs @@ -23,7 +23,7 @@ public void DefaultConstructor_InitializesWithNullValues() Assert.Null(options.Name); Assert.Null(options.Description); Assert.Null(options.ChatOptions); - Assert.Null(options.ChatMessageStoreFactory); + Assert.Null(options.ChatHistoryProviderFactory); Assert.Null(options.AIContextProviderFactory); } @@ -37,7 +37,7 @@ public void Constructor_WithNullValues_SetsPropertiesCorrectly() Assert.Null(options.Name); Assert.Null(options.Description); Assert.Null(options.AIContextProviderFactory); - Assert.Null(options.ChatMessageStoreFactory); + Assert.Null(options.ChatHistoryProviderFactory); Assert.NotNull(options.ChatOptions); Assert.Null(options.ChatOptions.Instructions); Assert.Null(options.ChatOptions.Tools); @@ -117,8 +117,8 @@ public void Clone_CreatesDeepCopyWithSameValues() const string Description = "Test description"; var tools = new List { AIFunctionFactory.Create(() => "test") }; - static ValueTask ChatMessageStoreFactoryAsync( - ChatClientAgentOptions.ChatMessageStoreFactoryContext ctx, CancellationToken ct) => new(new Mock().Object); + static ValueTask ChatHistoryProviderFactoryAsync( + ChatClientAgentOptions.ChatHistoryProviderFactoryContext ctx, CancellationToken ct) => new(new Mock().Object); static ValueTask AIContextProviderFactoryAsync( ChatClientAgentOptions.AIContextProviderFactoryContext ctx, CancellationToken ct) => new(new Mock().Object); @@ -129,7 +129,7 @@ static ValueTask AIContextProviderFactoryAsync( Description = Description, ChatOptions = new() { Tools = tools }, Id = "test-id", - ChatMessageStoreFactory = ChatMessageStoreFactoryAsync, + ChatHistoryProviderFactory = ChatHistoryProviderFactoryAsync, AIContextProviderFactory = AIContextProviderFactoryAsync }; @@ -141,7 +141,7 @@ static ValueTask AIContextProviderFactoryAsync( Assert.Equal(original.Id, clone.Id); Assert.Equal(original.Name, clone.Name); Assert.Equal(original.Description, clone.Description); - Assert.Same(original.ChatMessageStoreFactory, clone.ChatMessageStoreFactory); + Assert.Same(original.ChatHistoryProviderFactory, clone.ChatHistoryProviderFactory); Assert.Same(original.AIContextProviderFactory, clone.AIContextProviderFactory); // ChatOptions should be cloned, not the same reference @@ -170,7 +170,7 @@ public void Clone_WithoutProvidingChatOptions_ClonesCorrectly() Assert.Equal(original.Name, clone.Name); Assert.Equal(original.Description, clone.Description); Assert.Null(original.ChatOptions); - Assert.Null(clone.ChatMessageStoreFactory); + Assert.Null(clone.ChatHistoryProviderFactory); Assert.Null(clone.AIContextProviderFactory); } diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs index 546dc258cd..7395b22ebf 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentTests.cs @@ -222,10 +222,10 @@ public async Task RunAsyncSetsAuthorNameOnAllResponseMessagesAsync(string? autho } /// - /// Verify that RunAsync works with existing thread and can retreive messages if the thread has a MessageStore. + /// Verify that RunAsync works with existing thread and can retreive messages if the thread has a ChatHistoryProvider. /// [Fact] - public async Task RunAsyncRetrievesMessagesFromThreadWhenThreadStoresMessagesThreadAsync() + public async Task RunAsyncRetrievesMessagesFromThreadWhenThreadHasChatHistoryProviderAsync() { // Arrange Mock mockService = new(); @@ -310,302 +310,6 @@ public async Task RunAsyncWorksWithEmptyMessagesWhenNoMessagesProvidedAsync() Assert.Empty(capturedMessages); } - /// - /// Verify that RunAsync does not throw when providing a thread with a ThreadId and a Conversationid - /// via ChatOptions and the two are the same. - /// - [Fact] - public async Task RunAsyncDoesNotThrowWhenSpecifyingTwoSameThreadIdsAsync() - { - // Arrange - var chatOptions = new ChatOptions { ConversationId = "ConvId" }; - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.Is(opts => opts.ConversationId == "ConvId"), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); - - ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); - - ChatClientAgentThread thread = new() { ConversationId = "ConvId" }; - - // Act & Assert - var response = await agent.RunAsync([new(ChatRole.User, "test")], thread, options: new ChatClientAgentRunOptions(chatOptions)); - Assert.NotNull(response); - } - - /// - /// Verify that RunAsync throws when providing a thread with a ThreadId and a Conversationid - /// via ChatOptions and the two are different. - /// - [Fact] - public async Task RunAsyncThrowsWhenSpecifyingTwoDifferentThreadIdsAsync() - { - // Arrange - var chatOptions = new ChatOptions { ConversationId = "ConvId" }; - Mock mockService = new(); - - ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); - - ChatClientAgentThread thread = new() { ConversationId = "ThreadId" }; - - // Act & Assert - await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread, options: new ChatClientAgentRunOptions(chatOptions))); - } - - /// - /// Verify that RunAsync clones the ChatOptions when providing a thread with a ThreadId and a ChatOptions. - /// - [Fact] - public async Task RunAsyncClonesChatOptionsToAddThreadIdAsync() - { - // Arrange - var chatOptions = new ChatOptions { MaxOutputTokens = 100 }; - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.Is(opts => opts.MaxOutputTokens == 100 && opts.ConversationId == "ConvId"), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); - - ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); - - ChatClientAgentThread thread = new() { ConversationId = "ConvId" }; - - // Act - await agent.RunAsync([new(ChatRole.User, "test")], thread, options: new ChatClientAgentRunOptions(chatOptions)); - - // Assert - Assert.Null(chatOptions.ConversationId); - } - - /// - /// Verify that RunAsync throws if a thread is provided that uses a conversation id already, but the service does not return one on invoke. - /// - [Fact] - public async Task RunAsyncThrowsForMissingConversationIdWithConversationIdThreadAsync() - { - // Arrange - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); - - ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); - - ChatClientAgentThread thread = new() { ConversationId = "ConvId" }; - - // Act & Assert - await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread)); - } - - /// - /// Verify that RunAsync sets the ConversationId on the thread when the service returns one. - /// - [Fact] - public async Task RunAsyncSetsConversationIdOnThreadWhenReturnedByChatClientAsync() - { - // Arrange - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); - ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); - ChatClientAgentThread thread = new(); - - // Act - await agent.RunAsync([new(ChatRole.User, "test")], thread); - - // Assert - Assert.Equal("ConvId", thread.ConversationId); - } - - /// - /// Verify that RunAsync uses the ChatMessageStore factory when the chat client returns no conversation id. - /// - [Fact] - public async Task RunAsyncUsesChatMessageStoreWhenNoConversationIdReturnedByChatClientAsync() - { - // Arrange - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); - Mock>> mockFactory = new(); - mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(new InMemoryChatMessageStore()); - ChatClientAgent agent = new(mockService.Object, options: new() - { - ChatOptions = new() { Instructions = "test instructions" }, - ChatMessageStoreFactory = mockFactory.Object - }); - - // Act - ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; - await agent.RunAsync([new(ChatRole.User, "test")], thread); - - // Assert - var messageStore = Assert.IsType(thread!.MessageStore); - Assert.Equal(2, messageStore.Count); - Assert.Equal("test", messageStore[0].Text); - Assert.Equal("response", messageStore[1].Text); - mockFactory.Verify(f => f(It.IsAny(), It.IsAny()), Times.Once); - } - - /// - /// Verify that RunAsync uses the default InMemoryChatMessageStore when the chat client returns no conversation id. - /// - [Fact] - public async Task RunAsyncUsesDefaultInMemoryChatMessageStoreWhenNoConversationIdReturnedByChatClientAsync() - { - // Arrange - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); - ChatClientAgent agent = new(mockService.Object, options: new() - { - ChatOptions = new() { Instructions = "test instructions" }, - }); - - // Act - ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; - await agent.RunAsync([new(ChatRole.User, "test")], thread); - - // Assert - var messageStore = Assert.IsType(thread!.MessageStore); - Assert.Equal(2, messageStore.Count); - Assert.Equal("test", messageStore[0].Text); - Assert.Equal("response", messageStore[1].Text); - } - - /// - /// Verify that RunAsync uses the ChatMessageStore factory when the chat client returns no conversation id. - /// - [Fact] - public async Task RunAsyncUsesChatMessageStoreFactoryWhenProvidedAndNoConversationIdReturnedByChatClientAsync() - { - // Arrange - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); - - Mock mockChatMessageStore = new(); - mockChatMessageStore.Setup(s => s.InvokingAsync( - It.IsAny(), - It.IsAny())).ReturnsAsync([new ChatMessage(ChatRole.User, "Existing Chat History")]); - mockChatMessageStore.Setup(s => s.InvokedAsync( - It.IsAny(), - It.IsAny())).Returns(new ValueTask()); - - Mock>> mockFactory = new(); - mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(mockChatMessageStore.Object); - - ChatClientAgent agent = new(mockService.Object, options: new() - { - ChatOptions = new() { Instructions = "test instructions" }, - ChatMessageStoreFactory = mockFactory.Object - }); - - // Act - ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; - await agent.RunAsync([new(ChatRole.User, "test")], thread); - - // Assert - Assert.IsType(thread!.MessageStore, exactMatch: false); - mockService.Verify( - x => x.GetResponseAsync( - It.Is>(msgs => msgs.Count() == 2 && msgs.Any(m => m.Text == "Existing Chat History") && msgs.Any(m => m.Text == "test")), - It.IsAny(), - It.IsAny()), - Times.Once); - mockChatMessageStore.Verify(s => s.InvokingAsync( - It.Is(x => x.RequestMessages.Count() == 1), - It.IsAny()), - Times.Once); - mockChatMessageStore.Verify(s => s.InvokedAsync( - It.Is(x => x.RequestMessages.Count() == 1 && x.ChatMessageStoreMessages.Count() == 1 && x.ResponseMessages!.Count() == 1), - It.IsAny()), - Times.Once); - mockFactory.Verify(f => f(It.IsAny(), It.IsAny()), Times.Once); - } - - /// - /// Verify that RunAsync notifies the ChatMessageStore on failure. - /// - [Fact] - public async Task RunAsyncNotifiesChatMessageStoreOnFailureAsync() - { - // Arrange - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny())).Throws(new InvalidOperationException("Test Error")); - - Mock mockChatMessageStore = new(); - - Mock>> mockFactory = new(); - mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(mockChatMessageStore.Object); - - ChatClientAgent agent = new(mockService.Object, options: new() - { - ChatOptions = new() { Instructions = "test instructions" }, - ChatMessageStoreFactory = mockFactory.Object - }); - - // Act - ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; - await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread)); - - // Assert - Assert.IsType(thread!.MessageStore, exactMatch: false); - mockChatMessageStore.Verify(s => s.InvokedAsync( - It.Is(x => x.RequestMessages.Count() == 1 && x.ResponseMessages == null && x.InvokeException!.Message == "Test Error"), - It.IsAny()), - Times.Once); - mockFactory.Verify(f => f(It.IsAny(), It.IsAny()), Times.Once); - } - - /// - /// Verify that RunAsync throws when a ChatMessageStore Factory is provided and the chat client returns a conversation id. - /// - [Fact] - public async Task RunAsyncThrowsWhenChatMessageStoreFactoryProvidedAndConversationIdReturnedByChatClientAsync() - { - // Arrange - Mock mockService = new(); - mockService.Setup( - s => s.GetResponseAsync( - It.IsAny>(), - It.IsAny(), - It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); - Mock>> mockFactory = new(); - mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(new InMemoryChatMessageStore()); - ChatClientAgent agent = new(mockService.Object, options: new() - { - ChatOptions = new() { Instructions = "test instructions" }, - ChatMessageStoreFactory = mockFactory.Object - }); - - // Act & Assert - ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; - var exception = await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread)); - Assert.Equal("Only the ConversationId or MessageStore may be set, but not both and switching from one to another is not supported.", exception.Message); - } - /// /// Verify that RunAsync invokes any provided AIContextProvider and uses the result. /// @@ -668,11 +372,11 @@ public async Task RunAsyncInvokesAIContextProviderAndUsesResultAsync() Assert.Contains(capturedTools, t => t.Name == "context provider function"); // Verify that the thread was updated with the ai context provider, input and response messages - var messageStore = Assert.IsType(thread!.MessageStore); - Assert.Equal(3, messageStore.Count); - Assert.Equal("user message", messageStore[0].Text); - Assert.Equal("context provider message", messageStore[1].Text); - Assert.Equal("response", messageStore[2].Text); + var chatHistoryProvider = Assert.IsType(thread!.ChatHistoryProvider); + Assert.Equal(3, chatHistoryProvider.Count); + Assert.Equal("user message", chatHistoryProvider[0].Text); + Assert.Equal("context provider message", chatHistoryProvider[1].Text); + Assert.Equal("response", chatHistoryProvider[2].Text); mockProvider.Verify(p => p.InvokingAsync(It.IsAny(), It.IsAny()), Times.Once); mockProvider.Verify(p => p.InvokedAsync(It.Is(x => @@ -1566,10 +1270,10 @@ public async Task VerifyChatClientAgentStreamingAsync() } /// - /// Verify that RunStreamingAsync uses the ChatMessageStore factory when the chat client returns no conversation id. + /// Verify that RunStreamingAsync uses the ChatHistoryProvider factory when the chat client returns no conversation id. /// [Fact] - public async Task RunStreamingAsyncUsesChatMessageStoreWhenNoConversationIdReturnedByChatClientAsync() + public async Task RunStreamingAsyncUsesChatHistoryProviderWhenNoConversationIdReturnedByChatClientAsync() { // Arrange Mock mockService = new(); @@ -1583,12 +1287,12 @@ public async Task RunStreamingAsyncUsesChatMessageStoreWhenNoConversationIdRetur It.IsAny>(), It.IsAny(), It.IsAny())).Returns(ToAsyncEnumerableAsync(returnUpdates)); - Mock>> mockFactory = new(); - mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(new InMemoryChatMessageStore()); + Mock>> mockFactory = new(); + mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(new InMemoryChatHistoryProvider()); ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" }, - ChatMessageStoreFactory = mockFactory.Object + ChatHistoryProviderFactory = mockFactory.Object }); // Act @@ -1596,18 +1300,18 @@ public async Task RunStreamingAsyncUsesChatMessageStoreWhenNoConversationIdRetur await agent.RunStreamingAsync([new(ChatRole.User, "test")], thread).ToListAsync(); // Assert - var messageStore = Assert.IsType(thread!.MessageStore); - Assert.Equal(2, messageStore.Count); - Assert.Equal("test", messageStore[0].Text); - Assert.Equal("what?", messageStore[1].Text); - mockFactory.Verify(f => f(It.IsAny(), It.IsAny()), Times.Once); + var chatHistoryProvider = Assert.IsType(thread!.ChatHistoryProvider); + Assert.Equal(2, chatHistoryProvider.Count); + Assert.Equal("test", chatHistoryProvider[0].Text); + Assert.Equal("what?", chatHistoryProvider[1].Text); + mockFactory.Verify(f => f(It.IsAny(), It.IsAny()), Times.Once); } /// - /// Verify that RunStreamingAsync throws when a ChatMessageStore factory is provided and the chat client returns a conversation id. + /// Verify that RunStreamingAsync throws when a factory is provided and the chat client returns a conversation id. /// [Fact] - public async Task RunStreamingAsyncThrowsWhenChatMessageStoreFactoryProvidedAndConversationIdReturnedByChatClientAsync() + public async Task RunStreamingAsyncThrowsWhenChatHistoryProviderFactoryProvidedAndConversationIdReturnedByChatClientAsync() { // Arrange Mock mockService = new(); @@ -1621,18 +1325,18 @@ public async Task RunStreamingAsyncThrowsWhenChatMessageStoreFactoryProvidedAndC It.IsAny>(), It.IsAny(), It.IsAny())).Returns(ToAsyncEnumerableAsync(returnUpdates)); - Mock>> mockFactory = new(); - mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(new InMemoryChatMessageStore()); + Mock>> mockFactory = new(); + mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(new InMemoryChatHistoryProvider()); ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" }, - ChatMessageStoreFactory = mockFactory.Object + ChatHistoryProviderFactory = mockFactory.Object }); // Act & Assert ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; var exception = await Assert.ThrowsAsync(async () => await agent.RunStreamingAsync([new(ChatRole.User, "test")], thread).ToListAsync()); - Assert.Equal("Only the ConversationId or MessageStore may be set, but not both and switching from one to another is not supported.", exception.Message); + Assert.Equal("Only the ConversationId or ChatHistoryProvider may be set, but not both and switching from one to another is not supported.", exception.Message); } /// @@ -1704,11 +1408,11 @@ public async Task RunStreamingAsyncInvokesAIContextProviderAndUsesResultAsync() Assert.Contains(capturedTools, t => t.Name == "context provider function"); // Verify that the thread was updated with the input, ai context provider, and response messages - var messageStore = Assert.IsType(thread!.MessageStore); - Assert.Equal(3, messageStore.Count); - Assert.Equal("user message", messageStore[0].Text); - Assert.Equal("context provider message", messageStore[1].Text); - Assert.Equal("response", messageStore[2].Text); + var chatHistoryProvider = Assert.IsType(thread!.ChatHistoryProvider); + Assert.Equal(3, chatHistoryProvider.Count); + Assert.Equal("user message", chatHistoryProvider[0].Text); + Assert.Equal("context provider message", chatHistoryProvider[1].Text); + Assert.Equal("response", chatHistoryProvider[2].Text); mockProvider.Verify(p => p.InvokingAsync(It.IsAny(), It.IsAny()), Times.Once); mockProvider.Verify(p => p.InvokedAsync(It.Is(x => diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentThreadTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentThreadTests.cs index 57af3b6449..ef5eb19c37 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentThreadTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgentThreadTests.cs @@ -24,7 +24,7 @@ public void ConstructorSetsDefaults() // Assert Assert.Null(thread.ConversationId); - Assert.Null(thread.MessageStore); + Assert.Null(thread.ChatHistoryProvider); } [Fact] @@ -39,52 +39,52 @@ public void SetConversationIdRoundtrips() // Assert Assert.Equal(ConversationId, thread.ConversationId); - Assert.Null(thread.MessageStore); + Assert.Null(thread.ChatHistoryProvider); } [Fact] - public void SetChatMessageStoreRoundtrips() + public void SetChatHistoryProviderRoundtrips() { // Arrange var thread = new ChatClientAgentThread(); - var messageStore = new InMemoryChatMessageStore(); + var chatHistoryProvider = new InMemoryChatHistoryProvider(); // Act - thread.MessageStore = messageStore; + thread.ChatHistoryProvider = chatHistoryProvider; // Assert - Assert.Same(messageStore, thread.MessageStore); + Assert.Same(chatHistoryProvider, thread.ChatHistoryProvider); Assert.Null(thread.ConversationId); } [Fact] - public void SetConversationIdThrowsWhenMessageStoreIsSet() + public void SetConversationIdThrowsWhenChatHistoryProviderIsSet() { // Arrange var thread = new ChatClientAgentThread { - MessageStore = new InMemoryChatMessageStore() + ChatHistoryProvider = new InMemoryChatHistoryProvider() }; // Act & Assert var exception = Assert.Throws(() => thread.ConversationId = "new-thread-id"); - Assert.Equal("Only the ConversationId or MessageStore may be set, but not both and switching from one to another is not supported.", exception.Message); - Assert.NotNull(thread.MessageStore); + Assert.Equal("Only the ConversationId or ChatHistoryProvider may be set, but not both and switching from one to another is not supported.", exception.Message); + Assert.NotNull(thread.ChatHistoryProvider); } [Fact] - public void SetChatMessageStoreThrowsWhenConversationIdIsSet() + public void SetChatHistoryProviderThrowsWhenConversationIdIsSet() { // Arrange var thread = new ChatClientAgentThread { ConversationId = "existing-thread-id" }; - var store = new InMemoryChatMessageStore(); + var provider = new InMemoryChatHistoryProvider(); // Act & Assert - var exception = Assert.Throws(() => thread.MessageStore = store); - Assert.Equal("Only the ConversationId or MessageStore may be set, but not both and switching from one to another is not supported.", exception.Message); + var exception = Assert.Throws(() => thread.ChatHistoryProvider = provider); + Assert.Equal("Only the ConversationId or ChatHistoryProvider may be set, but not both and switching from one to another is not supported.", exception.Message); Assert.NotNull(thread.ConversationId); } @@ -98,7 +98,7 @@ public async Task VerifyDeserializeWithMessagesAsync() // Arrange var json = JsonSerializer.Deserialize(""" { - "storeState": { "messages": [{"authorName": "testAuthor"}] } + "chatHistoryProviderState": { "messages": [{"authorName": "testAuthor"}] } } """, TestJsonSerializerContext.Default.JsonElement); @@ -108,10 +108,10 @@ public async Task VerifyDeserializeWithMessagesAsync() // Assert Assert.Null(thread.ConversationId); - var messageStore = thread.MessageStore as InMemoryChatMessageStore; - Assert.NotNull(messageStore); - Assert.Single(messageStore); - Assert.Equal("testAuthor", messageStore[0].AuthorName); + var chatHistoryProvider = thread.ChatHistoryProvider as InMemoryChatHistoryProvider; + Assert.NotNull(chatHistoryProvider); + Assert.Single(chatHistoryProvider); + Assert.Equal("testAuthor", chatHistoryProvider[0].AuthorName); } [Fact] @@ -129,7 +129,7 @@ public async Task VerifyDeserializeWithIdAsync() // Assert Assert.Equal("TestConvId", thread.ConversationId); - Assert.Null(thread.MessageStore); + Assert.Null(thread.ChatHistoryProvider); } [Fact] @@ -148,7 +148,7 @@ public async Task VerifyDeserializeWithAIContextProviderAsync() var thread = await ChatClientAgentThread.DeserializeAsync(json, aiContextProviderFactory: (_, _, _) => new(mockProvider.Object)); // Assert - Assert.Null(thread.MessageStore); + Assert.Null(thread.ChatHistoryProvider); Assert.Same(thread.AIContextProvider, mockProvider.Object); } @@ -185,7 +185,7 @@ public void VerifyThreadSerializationWithId() Assert.True(json.TryGetProperty("conversationId", out var idProperty)); Assert.Equal("TestConvId", idProperty.GetString()); - Assert.False(json.TryGetProperty("storeState", out _)); + Assert.False(json.TryGetProperty("chatHistoryProviderState", out _)); } /// @@ -195,8 +195,8 @@ public void VerifyThreadSerializationWithId() public void VerifyThreadSerializationWithMessages() { // Arrange - InMemoryChatMessageStore store = [new(ChatRole.User, "TestContent") { AuthorName = "TestAuthor" }]; - var thread = new ChatClientAgentThread { MessageStore = store }; + InMemoryChatHistoryProvider provider = [new(ChatRole.User, "TestContent") { AuthorName = "TestAuthor" }]; + var thread = new ChatClientAgentThread { ChatHistoryProvider = provider }; // Act var json = thread.Serialize(); @@ -206,10 +206,10 @@ public void VerifyThreadSerializationWithMessages() Assert.False(json.TryGetProperty("conversationId", out _)); - Assert.True(json.TryGetProperty("storeState", out var storeStateProperty)); - Assert.Equal(JsonValueKind.Object, storeStateProperty.ValueKind); + Assert.True(json.TryGetProperty("chatHistoryProviderState", out var chatHistoryProviderStateProperty)); + Assert.Equal(JsonValueKind.Object, chatHistoryProviderStateProperty.ValueKind); - Assert.True(storeStateProperty.TryGetProperty("messages", out var messagesProperty)); + Assert.True(chatHistoryProviderStateProperty.TryGetProperty("messages", out var messagesProperty)); Assert.Equal(JsonValueKind.Array, messagesProperty.ValueKind); Assert.Single(messagesProperty.EnumerateArray()); @@ -260,15 +260,15 @@ public void VerifyThreadSerializationWithCustomOptions() JsonSerializerOptions options = new() { PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower }; options.TypeInfoResolverChain.Add(AgentAbstractionsJsonUtilities.DefaultOptions.TypeInfoResolver!); - var storeStateElement = JsonSerializer.SerializeToElement( + var chatHistoryProviderStateElement = JsonSerializer.SerializeToElement( new Dictionary { ["Key"] = "TestValue" }, TestJsonSerializerContext.Default.DictionaryStringObject); - var messageStoreMock = new Mock(); - messageStoreMock + var chatHistoryProviderMock = new Mock(); + chatHistoryProviderMock .Setup(m => m.Serialize(options)) - .Returns(storeStateElement); - thread.MessageStore = messageStoreMock.Object; + .Returns(chatHistoryProviderStateElement); + thread.ChatHistoryProvider = chatHistoryProviderMock.Object; // Act var json = thread.Serialize(options); @@ -278,13 +278,13 @@ public void VerifyThreadSerializationWithCustomOptions() Assert.False(json.TryGetProperty("conversationId", out var idProperty)); - Assert.True(json.TryGetProperty("storeState", out var storeStateProperty)); - Assert.Equal(JsonValueKind.Object, storeStateProperty.ValueKind); + Assert.True(json.TryGetProperty("chatHistoryProviderState", out var chatHistoryProviderStateProperty)); + Assert.Equal(JsonValueKind.Object, chatHistoryProviderStateProperty.ValueKind); - Assert.True(storeStateProperty.TryGetProperty("Key", out var keyProperty)); + Assert.True(chatHistoryProviderStateProperty.TryGetProperty("Key", out var keyProperty)); Assert.Equal("TestValue", keyProperty.GetString()); - messageStoreMock.Verify(m => m.Serialize(options), Times.Once); + chatHistoryProviderMock.Verify(m => m.Serialize(options), Times.Once); } #endregion Serialize Tests @@ -311,19 +311,19 @@ public void GetService_RequestingAIContextProvider_ReturnsAIContextProvider() } [Fact] - public void GetService_RequestingChatMessageStore_ReturnsChatMessageStore() + public void GetService_RequestingChatHistoryProvider_ReturnsChatHistoryProvider() { // Arrange var thread = new ChatClientAgentThread(); - var messageStore = new InMemoryChatMessageStore(); - thread.MessageStore = messageStore; + var chatHistoryProvider = new InMemoryChatHistoryProvider(); + thread.ChatHistoryProvider = chatHistoryProvider; // Act - var result = thread.GetService(typeof(ChatMessageStore)); + var result = thread.GetService(typeof(ChatHistoryProvider)); // Assert Assert.NotNull(result); - Assert.Same(messageStore, result); + Assert.Same(chatHistoryProvider, result); } #endregion diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs index 79af3add1d..018da2b6db 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_BackgroundResponsesTests.cs @@ -336,11 +336,11 @@ public async Task RunAsync_WhenContinuationTokenProvided_SkipsThreadMessagePopul // Arrange List capturedMessages = []; - // Create a mock message store that would normally provide messages - var mockMessageStore = new Mock(); - mockMessageStore - .Setup(ms => ms.InvokingAsync(It.IsAny(), It.IsAny())) - .ReturnsAsync([new(ChatRole.User, "Message from message store")]); + // Create a mock chat history provider that would normally provide messages + var mockChatHistoryProvider = new Mock(); + mockChatHistoryProvider + .Setup(ms => ms.InvokingAsync(It.IsAny(), It.IsAny())) + .ReturnsAsync([new(ChatRole.User, "Message from chat history provider")]); // Create a mock AI context provider that would normally provide context var mockContextProvider = new Mock(); @@ -364,10 +364,10 @@ public async Task RunAsync_WhenContinuationTokenProvided_SkipsThreadMessagePopul ChatClientAgent agent = new(mockChatClient.Object); - // Create a thread with both message store and AI context provider + // Create a thread with both chat history provider and AI context provider ChatClientAgentThread thread = new() { - MessageStore = mockMessageStore.Object, + ChatHistoryProvider = mockChatHistoryProvider.Object, AIContextProvider = mockContextProvider.Object }; @@ -384,9 +384,9 @@ public async Task RunAsync_WhenContinuationTokenProvided_SkipsThreadMessagePopul // With continuation token, thread message population should be skipped Assert.Empty(capturedMessages); - // Verify that message store was never called due to continuation token - mockMessageStore.Verify( - ms => ms.InvokingAsync(It.IsAny(), It.IsAny()), + // Verify that chat history provider was never called due to continuation token + mockChatHistoryProvider.Verify( + ms => ms.InvokingAsync(It.IsAny(), It.IsAny()), Times.Never); // Verify that AI context provider was never called due to continuation token @@ -401,11 +401,11 @@ public async Task RunStreamingAsync_WhenContinuationTokenProvided_SkipsThreadMes // Arrange List capturedMessages = []; - // Create a mock message store that would normally provide messages - var mockMessageStore = new Mock(); - mockMessageStore - .Setup(ms => ms.InvokingAsync(It.IsAny(), It.IsAny())) - .ReturnsAsync([new(ChatRole.User, "Message from message store")]); + // Create a mock chat history provider that would normally provide messages + var mockChatHistoryProvider = new Mock(); + mockChatHistoryProvider + .Setup(ms => ms.InvokingAsync(It.IsAny(), It.IsAny())) + .ReturnsAsync([new(ChatRole.User, "Message from chat history provider")]); // Create a mock AI context provider that would normally provide context var mockContextProvider = new Mock(); @@ -429,10 +429,10 @@ public async Task RunStreamingAsync_WhenContinuationTokenProvided_SkipsThreadMes ChatClientAgent agent = new(mockChatClient.Object); - // Create a thread with both message store and AI context provider + // Create a thread with both chat history provider and AI context provider ChatClientAgentThread thread = new() { - MessageStore = mockMessageStore.Object, + ChatHistoryProvider = mockChatHistoryProvider.Object, AIContextProvider = mockContextProvider.Object }; @@ -448,9 +448,9 @@ public async Task RunStreamingAsync_WhenContinuationTokenProvided_SkipsThreadMes // With continuation token, thread message population should be skipped Assert.Empty(capturedMessages); - // Verify that message store was never called due to continuation token - mockMessageStore.Verify( - ms => ms.InvokingAsync(It.IsAny(), It.IsAny()), + // Verify that chat history provider was never called due to continuation token + mockChatHistoryProvider.Verify( + ms => ms.InvokingAsync(It.IsAny(), It.IsAny()), Times.Never); // Verify that AI context provider was never called due to continuation token @@ -610,7 +610,7 @@ public async Task RunStreamingAsync_WhenResponseUpdatesPresentInContinuationToke } [Fact] - public async Task RunStreamingAsync_WhenResumingStreaming_UsesUpdatesFromInitialRunForContextProviderAndMessageStoreAsync() + public async Task RunStreamingAsync_WhenResumingStreaming_UsesUpdatesFromInitialRunForContextProviderAndChatHistoryProviderAsync() { // Arrange ChatResponseUpdate[] returnUpdates = @@ -630,11 +630,11 @@ public async Task RunStreamingAsync_WhenResumingStreaming_UsesUpdatesFromInitial ChatClientAgent agent = new(mockChatClient.Object); - List capturedMessagesAddedToStore = []; - var mockMessageStore = new Mock(); - mockMessageStore - .Setup(ms => ms.InvokedAsync(It.IsAny(), It.IsAny())) - .Callback((ctx, ct) => capturedMessagesAddedToStore.AddRange(ctx.ResponseMessages ?? [])) + List capturedMessagesAddedToProvider = []; + var mockChatHistoryProvider = new Mock(); + mockChatHistoryProvider + .Setup(ms => ms.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, ct) => capturedMessagesAddedToProvider.AddRange(ctx.ResponseMessages ?? [])) .Returns(new ValueTask()); AIContextProvider.InvokedContext? capturedInvokedContext = null; @@ -646,7 +646,7 @@ public async Task RunStreamingAsync_WhenResumingStreaming_UsesUpdatesFromInitial ChatClientAgentThread thread = new() { - MessageStore = mockMessageStore.Object, + ChatHistoryProvider = mockChatHistoryProvider.Object, AIContextProvider = mockContextProvider.Object }; @@ -662,9 +662,9 @@ public async Task RunStreamingAsync_WhenResumingStreaming_UsesUpdatesFromInitial await agent.RunStreamingAsync(thread, options: runOptions).ToListAsync(); // Assert - mockMessageStore.Verify(ms => ms.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); - Assert.Single(capturedMessagesAddedToStore); - Assert.Contains("once upon a time", capturedMessagesAddedToStore[0].Text); + mockChatHistoryProvider.Verify(ms => ms.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + Assert.Single(capturedMessagesAddedToProvider); + Assert.Contains("once upon a time", capturedMessagesAddedToProvider[0].Text); mockContextProvider.Verify(cp => cp.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); Assert.NotNull(capturedInvokedContext?.ResponseMessages); @@ -673,7 +673,7 @@ public async Task RunStreamingAsync_WhenResumingStreaming_UsesUpdatesFromInitial } [Fact] - public async Task RunStreamingAsync_WhenResumingStreaming_UsesInputMessagesFromInitialRunForContextProviderAndMessageStoreAsync() + public async Task RunStreamingAsync_WhenResumingStreaming_UsesInputMessagesFromInitialRunForContextProviderAndChatHistoryProviderAsync() { // Arrange Mock mockChatClient = new(); @@ -686,11 +686,11 @@ public async Task RunStreamingAsync_WhenResumingStreaming_UsesInputMessagesFromI ChatClientAgent agent = new(mockChatClient.Object); - List capturedMessagesAddedToStore = []; - var mockMessageStore = new Mock(); - mockMessageStore - .Setup(ms => ms.InvokedAsync(It.IsAny(), It.IsAny())) - .Callback((ctx, ct) => capturedMessagesAddedToStore.AddRange(ctx.RequestMessages)) + List capturedMessagesAddedToProvider = []; + var mockChatHistoryProvider = new Mock(); + mockChatHistoryProvider + .Setup(ms => ms.InvokedAsync(It.IsAny(), It.IsAny())) + .Callback((ctx, ct) => capturedMessagesAddedToProvider.AddRange(ctx.RequestMessages)) .Returns(new ValueTask()); AIContextProvider.InvokedContext? capturedInvokedContext = null; @@ -702,7 +702,7 @@ public async Task RunStreamingAsync_WhenResumingStreaming_UsesInputMessagesFromI ChatClientAgentThread thread = new() { - MessageStore = mockMessageStore.Object, + ChatHistoryProvider = mockChatHistoryProvider.Object, AIContextProvider = mockContextProvider.Object }; @@ -718,9 +718,9 @@ public async Task RunStreamingAsync_WhenResumingStreaming_UsesInputMessagesFromI await agent.RunStreamingAsync(thread, options: runOptions).ToListAsync(); // Assert - mockMessageStore.Verify(ms => ms.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); - Assert.Single(capturedMessagesAddedToStore); - Assert.Contains("Tell me a story", capturedMessagesAddedToStore[0].Text); + mockChatHistoryProvider.Verify(ms => ms.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); + Assert.Single(capturedMessagesAddedToProvider); + Assert.Contains("Tell me a story", capturedMessagesAddedToProvider[0].Text); mockContextProvider.Verify(cp => cp.InvokedAsync(It.IsAny(), It.IsAny()), Times.Once); Assert.NotNull(capturedInvokedContext?.RequestMessages); diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_ChatHistoryManagementTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_ChatHistoryManagementTests.cs new file mode 100644 index 0000000000..3e995b3cee --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_ChatHistoryManagementTests.cs @@ -0,0 +1,371 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; +using Moq; +using Xunit.Sdk; + +namespace Microsoft.Agents.AI.UnitTests; + +/// +/// Contains unit tests that verify the chat history management functionality of the class, +/// e.g. that it correctly reads and updates chat history in any available or that +/// it uses conversation id correctly for service managed chat history. +/// +public class ChatClientAgent_ChatHistoryManagementTests +{ + #region ConversationId Tests + + /// + /// Verify that RunAsync does not throw when providing a ConversationId via both AgentThread and + /// via ChatOptions and the two are the same. + /// + [Fact] + public async Task RunAsync_DoesNotThrow_WhenSpecifyingTwoSameConversationIdsAsync() + { + // Arrange + var chatOptions = new ChatOptions { ConversationId = "ConvId" }; + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.Is(opts => opts.ConversationId == "ConvId"), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); + + ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); + + ChatClientAgentThread thread = new() { ConversationId = "ConvId" }; + + // Act & Assert + var response = await agent.RunAsync([new(ChatRole.User, "test")], thread, options: new ChatClientAgentRunOptions(chatOptions)); + Assert.NotNull(response); + } + + /// + /// Verify that RunAsync throws when providing a ConversationId via both AgentThread and + /// via ChatOptions and the two are different. + /// + [Fact] + public async Task RunAsync_Throws_WhenSpecifyingTwoDifferentConversationIdsAsync() + { + // Arrange + var chatOptions = new ChatOptions { ConversationId = "ConvId" }; + Mock mockService = new(); + + ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); + + ChatClientAgentThread thread = new() { ConversationId = "ThreadId" }; + + // Act & Assert + await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread, options: new ChatClientAgentRunOptions(chatOptions))); + } + + /// + /// Verify that RunAsync clones the ChatOptions when providing a thread with a ConversationId and a ChatOptions. + /// + [Fact] + public async Task RunAsync_ClonesChatOptions_ToAddConversationIdAsync() + { + // Arrange + var chatOptions = new ChatOptions { MaxOutputTokens = 100 }; + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.Is(opts => opts.MaxOutputTokens == 100 && opts.ConversationId == "ConvId"), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); + + ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); + + ChatClientAgentThread thread = new() { ConversationId = "ConvId" }; + + // Act + await agent.RunAsync([new(ChatRole.User, "test")], thread, options: new ChatClientAgentRunOptions(chatOptions)); + + // Assert + Assert.Null(chatOptions.ConversationId); + } + + /// + /// Verify that RunAsync throws if a thread is provided that uses a conversation id already, but the service does not return one on invoke. + /// + [Fact] + public async Task RunAsync_Throws_ForMissingConversationIdWithConversationIdThreadAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); + + ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); + + ChatClientAgentThread thread = new() { ConversationId = "ConvId" }; + + // Act & Assert + await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread)); + } + + /// + /// Verify that RunAsync sets the ConversationId on the thread when the service returns one. + /// + [Fact] + public async Task RunAsync_SetsConversationIdOnThread_WhenReturnedByChatClientAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); + ChatClientAgent agent = new(mockService.Object, options: new() { ChatOptions = new() { Instructions = "test instructions" } }); + ChatClientAgentThread thread = new(); + + // Act + await agent.RunAsync([new(ChatRole.User, "test")], thread); + + // Assert + Assert.Equal("ConvId", thread.ConversationId); + } + + #endregion + + #region ChatHistoryProvider Tests + + /// + /// Verify that RunAsync uses the default InMemoryChatHistoryProvider when the chat client returns no conversation id. + /// + [Fact] + public async Task RunAsync_UsesDefaultInMemoryChatHistoryProvider_WhenNoConversationIdReturnedByChatClientAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); + ChatClientAgent agent = new(mockService.Object, options: new() + { + ChatOptions = new() { Instructions = "test instructions" }, + }); + + // Act + ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; + await agent.RunAsync([new(ChatRole.User, "test")], thread); + + // Assert + InMemoryChatHistoryProvider chatHistoryProvider = Assert.IsType(thread!.ChatHistoryProvider); + Assert.Equal(2, chatHistoryProvider.Count); + Assert.Equal("test", chatHistoryProvider[0].Text); + Assert.Equal("response", chatHistoryProvider[1].Text); + } + + /// + /// Verify that RunAsync uses the ChatHistoryProvider factory when the chat client returns no conversation id. + /// + [Fact] + public async Task RunAsync_UsesChatHistoryProviderFactory_WhenProvidedAndNoConversationIdReturnedByChatClientAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); + + Mock mockChatHistoryProvider = new(); + mockChatHistoryProvider.Setup(s => s.InvokingAsync( + It.IsAny(), + It.IsAny())).ReturnsAsync([new ChatMessage(ChatRole.User, "Existing Chat History")]); + mockChatHistoryProvider.Setup(s => s.InvokedAsync( + It.IsAny(), + It.IsAny())).Returns(new ValueTask()); + + Mock>> mockFactory = new(); + mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(mockChatHistoryProvider.Object); + + ChatClientAgent agent = new(mockService.Object, options: new() + { + ChatOptions = new() { Instructions = "test instructions" }, + ChatHistoryProviderFactory = mockFactory.Object + }); + + // Act + ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; + await agent.RunAsync([new(ChatRole.User, "test")], thread); + + // Assert + Assert.IsType(thread!.ChatHistoryProvider, exactMatch: false); + mockService.Verify( + x => x.GetResponseAsync( + It.Is>(msgs => msgs.Count() == 2 && msgs.Any(m => m.Text == "Existing Chat History") && msgs.Any(m => m.Text == "test")), + It.IsAny(), + It.IsAny()), + Times.Once); + mockChatHistoryProvider.Verify(s => s.InvokingAsync( + It.Is(x => x.RequestMessages.Count() == 1), + It.IsAny()), + Times.Once); + mockChatHistoryProvider.Verify(s => s.InvokedAsync( + It.Is(x => x.RequestMessages.Count() == 1 && x.ChatHistoryProviderMessages != null && x.ChatHistoryProviderMessages.Count() == 1 && x.ResponseMessages!.Count() == 1), + It.IsAny()), + Times.Once); + mockFactory.Verify(f => f(It.IsAny(), It.IsAny()), Times.Once); + } + + /// + /// Verify that RunAsync notifies the ChatHistoryProvider on failure. + /// + [Fact] + public async Task RunAsync_NotifiesChatHistoryProvider_OnFailureAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).Throws(new InvalidOperationException("Test Error")); + + Mock mockChatHistoryProvider = new(); + + Mock>> mockFactory = new(); + mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(mockChatHistoryProvider.Object); + + ChatClientAgent agent = new(mockService.Object, options: new() + { + ChatOptions = new() { Instructions = "test instructions" }, + ChatHistoryProviderFactory = mockFactory.Object + }); + + // Act + ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; + await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread)); + + // Assert + Assert.IsType(thread!.ChatHistoryProvider, exactMatch: false); + mockChatHistoryProvider.Verify(s => s.InvokedAsync( + It.Is(x => x.RequestMessages.Count() == 1 && x.ResponseMessages == null && x.InvokeException!.Message == "Test Error"), + It.IsAny()), + Times.Once); + mockFactory.Verify(f => f(It.IsAny(), It.IsAny()), Times.Once); + } + + /// + /// Verify that RunAsync throws when a ChatHistoryProvider Factory is provided and the chat client returns a conversation id. + /// + [Fact] + public async Task RunAsync_Throws_WhenChatHistoryProviderFactoryProvidedAndConversationIdReturnedByChatClientAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")]) { ConversationId = "ConvId" }); + Mock>> mockFactory = new(); + mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(new InMemoryChatHistoryProvider()); + ChatClientAgent agent = new(mockService.Object, options: new() + { + ChatOptions = new() { Instructions = "test instructions" }, + ChatHistoryProviderFactory = mockFactory.Object + }); + + // Act & Assert + ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; + InvalidOperationException exception = await Assert.ThrowsAsync(() => agent.RunAsync([new(ChatRole.User, "test")], thread)); + Assert.Equal("Only the ConversationId or ChatHistoryProvider may be set, but not both and switching from one to another is not supported.", exception.Message); + } + + #endregion + + #region ChatHistoryProvider Override Tests + + /// + /// Tests that RunAsync uses an override ChatHistoryProvider provided via AdditionalProperties instead of the provider from a factory + /// if one is supplied. + /// + [Fact] + public async Task RunAsync_UsesOverrideChatHistoryProvider_WhenProvidedViaAdditionalPropertiesAsync() + { + // Arrange + Mock mockService = new(); + mockService.Setup( + s => s.GetResponseAsync( + It.IsAny>(), + It.IsAny(), + It.IsAny())).ReturnsAsync(new ChatResponse([new(ChatRole.Assistant, "response")])); + + // Arrange a chat history provider to override the factory provided one. + Mock mockOverrideChatHistoryProvider = new(); + mockOverrideChatHistoryProvider.Setup(s => s.InvokingAsync( + It.IsAny(), + It.IsAny())).ReturnsAsync([new ChatMessage(ChatRole.User, "Existing Chat History")]); + mockOverrideChatHistoryProvider.Setup(s => s.InvokedAsync( + It.IsAny(), + It.IsAny())).Returns(new ValueTask()); + + // Arrange a chat history provider to provide to the agent via a factory at construction time. + // This one shouldn't be used since it is being overridden. + Mock mockFactoryChatHistoryProvider = new(); + mockFactoryChatHistoryProvider.Setup(s => s.InvokingAsync( + It.IsAny(), + It.IsAny())).ThrowsAsync(FailException.ForFailure("Base ChatHistoryProvider shouldn't be used.")); + mockFactoryChatHistoryProvider.Setup(s => s.InvokedAsync( + It.IsAny(), + It.IsAny())).Throws(FailException.ForFailure("Base ChatHistoryProvider shouldn't be used.")); + + Mock>> mockFactory = new(); + mockFactory.Setup(f => f(It.IsAny(), It.IsAny())).ReturnsAsync(mockFactoryChatHistoryProvider.Object); + + ChatClientAgent agent = new(mockService.Object, options: new() + { + ChatOptions = new() { Instructions = "test instructions" }, + ChatHistoryProviderFactory = mockFactory.Object + }); + + // Act + ChatClientAgentThread? thread = await agent.GetNewThreadAsync() as ChatClientAgentThread; + AdditionalPropertiesDictionary additionalProperties = new(); + additionalProperties.Add(mockOverrideChatHistoryProvider.Object); + await agent.RunAsync([new(ChatRole.User, "test")], thread, options: new AgentRunOptions { AdditionalProperties = additionalProperties }); + + // Assert + Assert.Same(mockFactoryChatHistoryProvider.Object, thread!.ChatHistoryProvider); + mockService.Verify( + x => x.GetResponseAsync( + It.Is>(msgs => msgs.Count() == 2 && msgs.Any(m => m.Text == "Existing Chat History") && msgs.Any(m => m.Text == "test")), + It.IsAny(), + It.IsAny()), + Times.Once); + mockOverrideChatHistoryProvider.Verify(s => s.InvokingAsync( + It.Is(x => x.RequestMessages.Count() == 1), + It.IsAny()), + Times.Once); + mockOverrideChatHistoryProvider.Verify(s => s.InvokedAsync( + It.Is(x => x.RequestMessages.Count() == 1 && x.ChatHistoryProviderMessages != null && x.ChatHistoryProviderMessages.Count() == 1 && x.ResponseMessages!.Count() == 1), + It.IsAny()), + Times.Once); + + mockFactoryChatHistoryProvider.Verify(s => s.InvokingAsync( + It.IsAny(), + It.IsAny()), + Times.Never); + mockFactoryChatHistoryProvider.Verify(s => s.InvokedAsync( + It.IsAny(), + It.IsAny()), + Times.Never); + } + + #endregion +} diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_DeserializeThreadTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_DeserializeThreadTests.cs index 98e5b0ed1a..6b9a4c89e2 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_DeserializeThreadTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_DeserializeThreadTests.cs @@ -5,7 +5,7 @@ using Microsoft.Extensions.AI; using Moq; -namespace Microsoft.Agents.AI.UnitTests.ChatClient; +namespace Microsoft.Agents.AI.UnitTests; /// /// Contains unit tests for the ChatClientAgent.DeserializeThread methods. @@ -46,25 +46,25 @@ public async Task DeserializeThread_UsesAIContextProviderFactory_IfProvidedAsync } [Fact] - public async Task DeserializeThread_UsesChatMessageStoreFactory_IfProvidedAsync() + public async Task DeserializeThread_UsesChatHistoryProviderFactory_IfProvidedAsync() { // Arrange var mockChatClient = new Mock(); - var mockMessageStore = new Mock(); + var mockChatHistoryProvider = new Mock(); var factoryCalled = false; var agent = new ChatClientAgent(mockChatClient.Object, new ChatClientAgentOptions { ChatOptions = new() { Instructions = "Test instructions" }, - ChatMessageStoreFactory = (_, _) => + ChatHistoryProviderFactory = (_, _) => { factoryCalled = true; - return new ValueTask(mockMessageStore.Object); + return new ValueTask(mockChatHistoryProvider.Object); } }); var json = JsonSerializer.Deserialize(""" { - "storeState": { } + "chatHistoryProviderState": { } } """, TestJsonSerializerContext.Default.JsonElement); @@ -72,9 +72,9 @@ public async Task DeserializeThread_UsesChatMessageStoreFactory_IfProvidedAsync( var thread = await agent.DeserializeThreadAsync(json); // Assert - Assert.True(factoryCalled, "ChatMessageStoreFactory was not called."); + Assert.True(factoryCalled, "ChatHistoryProviderFactory was not called."); Assert.IsType(thread); var typedThread = (ChatClientAgentThread)thread; - Assert.Same(mockMessageStore.Object, typedThread.MessageStore); + Assert.Same(mockChatHistoryProvider.Object, typedThread.ChatHistoryProvider); } } diff --git a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_GetNewThreadTests.cs b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_GetNewThreadTests.cs index e6cc7e90e9..b582465a4d 100644 --- a/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_GetNewThreadTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.UnitTests/ChatClient/ChatClientAgent_GetNewThreadTests.cs @@ -4,7 +4,7 @@ using Microsoft.Extensions.AI; using Moq; -namespace Microsoft.Agents.AI.UnitTests.ChatClient; +namespace Microsoft.Agents.AI.UnitTests; /// /// Contains unit tests for the ChatClientAgent.GetNewThreadAsync methods. @@ -39,19 +39,19 @@ public async Task GetNewThread_UsesAIContextProviderFactory_IfProvidedAsync() } [Fact] - public async Task GetNewThread_UsesChatMessageStoreFactory_IfProvidedAsync() + public async Task GetNewThread_UsesChatHistoryProviderFactory_IfProvidedAsync() { // Arrange var mockChatClient = new Mock(); - var mockMessageStore = new Mock(); + var mockChatHistoryProvider = new Mock(); var factoryCalled = false; var agent = new ChatClientAgent(mockChatClient.Object, new ChatClientAgentOptions { ChatOptions = new() { Instructions = "Test instructions" }, - ChatMessageStoreFactory = (_, _) => + ChatHistoryProviderFactory = (_, _) => { factoryCalled = true; - return new ValueTask(mockMessageStore.Object); + return new ValueTask(mockChatHistoryProvider.Object); } }); @@ -59,27 +59,27 @@ public async Task GetNewThread_UsesChatMessageStoreFactory_IfProvidedAsync() var thread = await agent.GetNewThreadAsync(); // Assert - Assert.True(factoryCalled, "ChatMessageStoreFactory was not called."); + Assert.True(factoryCalled, "ChatHistoryProviderFactory was not called."); Assert.IsType(thread); var typedThread = (ChatClientAgentThread)thread; - Assert.Same(mockMessageStore.Object, typedThread.MessageStore); + Assert.Same(mockChatHistoryProvider.Object, typedThread.ChatHistoryProvider); } [Fact] - public async Task GetNewThread_UsesChatMessageStore_FromTypedOverloadAsync() + public async Task GetNewThread_UsesChatHistoryProvider_FromTypedOverloadAsync() { // Arrange var mockChatClient = new Mock(); - var mockMessageStore = new Mock(); + var mockChatHistoryProvider = new Mock(); var agent = new ChatClientAgent(mockChatClient.Object); // Act - var thread = await agent.GetNewThreadAsync(mockMessageStore.Object); + var thread = await agent.GetNewThreadAsync(mockChatHistoryProvider.Object); // Assert Assert.IsType(thread); var typedThread = (ChatClientAgentThread)thread; - Assert.Same(mockMessageStore.Object, typedThread.MessageStore); + Assert.Same(mockChatHistoryProvider.Object, typedThread.ChatHistoryProvider); } [Fact] diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs index afd9b18fb9..80d4c57da8 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.IntegrationTests/Framework/WorkflowHarness.cs @@ -55,7 +55,7 @@ public async Task ResumeAsync(ExternalResponse response) { Console.WriteLine("\nRESUMING WORKFLOW..."); Assert.NotNull(this._lastCheckpoint); - Checkpointed run = await InProcessExecution.ResumeStreamAsync(workflow, this._lastCheckpoint, this.GetCheckpointManager(), runId); + Checkpointed run = await InProcessExecution.ResumeStreamAsync(workflow, this._lastCheckpoint, this.GetCheckpointManager()); IReadOnlyList workflowEvents = await MonitorAndDisposeWorkflowRunAsync(run, response).ToArrayAsync(); return new WorkflowEvents(workflowEvents); } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/ExecutorRouteGeneratorTests.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/ExecutorRouteGeneratorTests.cs new file mode 100644 index 0000000000..c48ba9ffdf --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/ExecutorRouteGeneratorTests.cs @@ -0,0 +1,1287 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using FluentAssertions; + +namespace Microsoft.Agents.AI.Workflows.Generators.UnitTests; + +/// +/// Tests for the ExecutorRouteGenerator source generator. +/// +public class ExecutorRouteGeneratorTests +{ + #region Single Handler Tests + + [Fact] + public void SingleHandler_VoidReturn_GeneratesCorrectRoute() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) + { + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder)"); + generated.Should().Contain(".AddHandler(this.HandleMessage)"); + } + + [Fact] + public void SingleHandler_ValueTaskReturn_GeneratesCorrectRoute() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private ValueTask HandleMessageAsync(string message, IWorkflowContext context) + { + return default; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain(".AddHandler(this.HandleMessageAsync)"); + } + + [Fact] + public void SingleHandler_WithOutput_GeneratesCorrectRoute() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private ValueTask HandleMessageAsync(string message, IWorkflowContext context) + { + return new ValueTask(42); + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain(".AddHandler(this.HandleMessageAsync)"); + } + + [Fact] + public void SingleHandler_WithCancellationToken_GeneratesCorrectRoute() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private ValueTask HandleMessageAsync(string message, IWorkflowContext context, CancellationToken ct) + { + return default; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain(".AddHandler(this.HandleMessageAsync)"); + } + + #endregion + + #region Multiple Handler Tests + + [Fact] + public void MultipleHandlers_GeneratesAllRoutes() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleString(string message, IWorkflowContext context) { } + + [MessageHandler] + private void HandleInt(int message, IWorkflowContext context) { } + + [MessageHandler] + private ValueTask HandleDoubleAsync(double message, IWorkflowContext context) + { + return new ValueTask("result"); + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain(".AddHandler(this.HandleString)"); + generated.Should().Contain(".AddHandler(this.HandleInt)"); + generated.Should().Contain(".AddHandler(this.HandleDoubleAsync)"); + } + + #endregion + + #region Yield and Send Type Tests + + [Fact] + public void Handler_WithYieldTypes_GeneratesConfigureYieldTypes() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class OutputMessage { } + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler(Yield = new[] { typeof(OutputMessage) })] + private void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("protected override ISet ConfigureYieldTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.OutputMessage))"); + } + + [Fact] + public void Handler_WithSendTypes_GeneratesConfigureSentTypes() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class SendMessage { } + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler(Send = new[] { typeof(SendMessage) })] + private void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("protected override ISet ConfigureSentTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.SendMessage))"); + } + + [Fact] + public void ClassLevel_SendsMessageAttribute_GeneratesConfigureSentTypes() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class BroadcastMessage { } + + [SendsMessage(typeof(BroadcastMessage))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("protected override ISet ConfigureSentTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.BroadcastMessage))"); + } + + [Fact] + public void ClassLevel_YieldsOutputAttribute_GeneratesConfigureYieldTypes() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class YieldedMessage { } + + [YieldsOutput(typeof(YieldedMessage))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("protected override ISet ConfigureYieldTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.YieldedMessage))"); + } + + #endregion + + #region Nested Class Tests + + [Fact] + public void NestedClass_SingleLevel_GeneratesCorrectPartialHierarchy() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class OuterClass + { + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Verify partial declarations are present + generated.Should().Contain("partial class OuterClass"); + generated.Should().Contain("partial class TestExecutor"); + + // Verify proper nesting structure with braces + // The outer class should open before the inner class + var outerIndex = generated.IndexOf("partial class OuterClass", StringComparison.Ordinal); + var innerIndex = generated.IndexOf("partial class TestExecutor", StringComparison.Ordinal); + outerIndex.Should().BeLessThan(innerIndex, "outer class should appear before inner class"); + + // Verify handler registration is present + generated.Should().Contain(".AddHandler(this.HandleMessage)"); + } + + [Fact] + public void NestedClass_TwoLevels_GeneratesCorrectPartialHierarchy() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class Outer + { + public partial class Inner + { + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Verify all three partial declarations are present in correct order + generated.Should().Contain("partial class Outer"); + generated.Should().Contain("partial class Inner"); + generated.Should().Contain("partial class TestExecutor"); + + var outerIndex = generated.IndexOf("partial class Outer", StringComparison.Ordinal); + var innerIndex = generated.IndexOf("partial class Inner", StringComparison.Ordinal); + var executorIndex = generated.IndexOf("partial class TestExecutor", StringComparison.Ordinal); + + outerIndex.Should().BeLessThan(innerIndex, "Outer should appear before Inner"); + innerIndex.Should().BeLessThan(executorIndex, "Inner should appear before TestExecutor"); + + // Verify handler registration + generated.Should().Contain(".AddHandler(this.HandleMessage)"); + } + + [Fact] + public void NestedClass_ThreeLevels_GeneratesCorrectPartialHierarchy() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class Level1 + { + public partial class Level2 + { + public partial class Level3 + { + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(int message, IWorkflowContext context) { } + } + } + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // All four partial class declarations should be present + generated.Should().Contain("partial class Level1"); + generated.Should().Contain("partial class Level2"); + generated.Should().Contain("partial class Level3"); + generated.Should().Contain("partial class TestExecutor"); + + // Verify correct ordering + var level1Index = generated.IndexOf("partial class Level1", StringComparison.Ordinal); + var level2Index = generated.IndexOf("partial class Level2", StringComparison.Ordinal); + var level3Index = generated.IndexOf("partial class Level3", StringComparison.Ordinal); + var executorIndex = generated.IndexOf("partial class TestExecutor", StringComparison.Ordinal); + + level1Index.Should().BeLessThan(level2Index); + level2Index.Should().BeLessThan(level3Index); + level3Index.Should().BeLessThan(executorIndex); + + // Verify handler registration + generated.Should().Contain(".AddHandler(this.HandleMessage)"); + } + + [Fact] + public void NestedClass_WithoutNamespace_GeneratesCorrectly() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + public partial class OuterClass + { + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Should not contain namespace declaration + generated.Should().NotContain("namespace "); + + // Should still have proper partial hierarchy + generated.Should().Contain("partial class OuterClass"); + generated.Should().Contain("partial class TestExecutor"); + generated.Should().Contain(".AddHandler(this.HandleMessage)"); + } + + [Fact] + public void NestedClass_GeneratedCodeCompiles() + { + // This test verifies that the generated code actually compiles by checking + // for compilation errors in the output (beyond our generator diagnostics) + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class Outer + { + public partial class Inner + { + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private ValueTask HandleMessage(int message, IWorkflowContext context) + { + return new ValueTask("result"); + } + } + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + // No generator diagnostics + result.RunResult.Diagnostics.Should().BeEmpty(); + + // Check that the combined compilation (source + generated) has no errors + var compilationDiagnostics = result.OutputCompilation.GetDiagnostics() + .Where(d => d.Severity == CodeAnalysis.DiagnosticSeverity.Error) + .ToList(); + + compilationDiagnostics.Should().BeEmpty( + "generated code for nested classes should compile without errors"); + } + + [Fact] + public void NestedClass_BraceBalancing_IsCorrect() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class Outer + { + public partial class Inner + { + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Count braces - they should be balanced + var openBraces = generated.Count(c => c == '{'); + var closeBraces = generated.Count(c => c == '}'); + + openBraces.Should().Be(closeBraces, "generated code should have balanced braces"); + + // For Outer.Inner.TestExecutor, we expect: + // - 1 for Outer class + // - 1 for Inner class + // - 1 for TestExecutor class + // - 1 for ConfigureRoutes method + // = 4 pairs minimum + openBraces.Should().BeGreaterThanOrEqualTo(4, "should have braces for all nested classes and method"); + } + + #endregion + + #region Multi-File Partial Class Tests + + [Fact] + public void PartialClass_SplitAcrossFiles_GeneratesCorrectly() + { + // File 1: The "main" partial with constructor and base class + var file1 = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + // Some other business logic could be here + public void DoSomething() { } + } + """; + + // File 2: Another partial with [MessageHandler] methods + var file2 = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor + { + [MessageHandler] + private void HandleString(string message, IWorkflowContext context) { } + + [MessageHandler] + private ValueTask HandleIntAsync(int message, IWorkflowContext context) + { + return default; + } + } + """; + + // Run generator with both files + var result = GeneratorTestHelper.RunGenerator(file1, file2); + + // Should generate one file for the executor + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Should have both handlers registered + generated.Should().Contain(".AddHandler(this.HandleString)"); + generated.Should().Contain(".AddHandler(this.HandleIntAsync)"); + + // Verify the generated code compiles with all three partials combined + var compilationErrors = result.OutputCompilation.GetDiagnostics() + .Where(d => d.Severity == CodeAnalysis.DiagnosticSeverity.Error) + .ToList(); + + compilationErrors.Should().BeEmpty( + "generated partial should compile correctly with the other partial files"); + } + + [Fact] + public void PartialClass_HandlersInBothFiles_GeneratesAllHandlers() + { + // File 1: Partial with one handler + var file1 = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleFromFile1(string message, IWorkflowContext context) { } + } + """; + + // File 2: Another partial with another handler + var file2 = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor + { + [MessageHandler] + private void HandleFromFile2(int message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(file1, file2); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Both handlers from different files should be registered + generated.Should().Contain(".AddHandler(this.HandleFromFile1)"); + generated.Should().Contain(".AddHandler(this.HandleFromFile2)"); + } + + [Fact] + public void PartialClass_SendsYieldsInBothFiles_GeneratesAlOverrides() + { + // File 1: Partial with one handler + var file1 = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + [YieldsOutput(typeof(string))] + [SendsMessage(typeof(int))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleFromFile1(string message, IWorkflowContext context) { } + } + """; + + // File 2: Another partial with another handler + var file2 = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + [YieldsOutput(typeof(int))] + [SendsMessage(typeof(string))] + public partial class TestExecutor + { + [MessageHandler] + private void HandleFromFile2(int message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(file1, file2); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Verify ConfigureSentTypes override + var sendsStart = generated.IndexOf("protected override ISet ConfigureSentTypes()", StringComparison.Ordinal); + sendsStart.Should().NotBe(-1, "should generate ConfigureSentTypes override"); + + var sendsEnd = generated.IndexOf("}", sendsStart, StringComparison.Ordinal); + sendsEnd.Should().NotBe(-1, "should close ConfigureSentTypes override"); + + generated.Substring(sendsStart, sendsEnd - sendsStart).Should().ContainAll( + "types.Add(typeof(string));", + "types.Add(typeof(int));"); + + // Verify ConfigureYieldTypes override + var yieldsStart = generated.IndexOf("protected override ISet ConfigureYieldTypes()", StringComparison.Ordinal); + yieldsStart.Should().NotBe(-1, "should generate ConfigureYieldTypes override"); + + var yieldsEnd = generated.IndexOf("}", yieldsStart, StringComparison.Ordinal); + yieldsEnd.Should().NotBe(-1, "should close ConfigureYieldTypes override"); + + generated.Substring(yieldsStart, yieldsEnd - yieldsStart).Should().ContainAll( + "types.Add(typeof(string));", + "types.Add(typeof(int));"); + } + + #endregion + + #region Diagnostic Tests + + [Fact] + public void NonPartialClass_ProducesDiagnosticAndNoSource() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + // Should produce MAFGENWF003 diagnostic + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF003"); + + // Should NOT generate any source (to avoid CS0260) + result.RunResult.GeneratedTrees.Should().BeEmpty( + "non-partial classes should not have source generated to avoid CS0260 compiler error"); + } + + [Fact] + public void NonExecutorClass_ProducesDiagnostic() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class NotAnExecutor + { + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF004"); + } + + [Fact] + public void StaticHandler_ProducesDiagnostic() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private static void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF007"); + } + + [Fact] + public void MissingWorkflowContext_ProducesDiagnostic() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF005"); + } + + [Fact] + public void WrongSecondParameter_ProducesDiagnostic() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + [MessageHandler] + private void HandleMessage(string message, string notContext) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF001"); + } + + #endregion + + #region No Generation Tests + + [Fact] + public void ClassWithManualConfigureRoutes_DoesNotGenerate() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + + [MessageHandler] + private void HandleMessage(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + // Should produce diagnostic but not generate code + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF006"); + result.RunResult.GeneratedTrees.Should().BeEmpty(); + } + + [Fact] + public void ClassWithNoMessageHandlers_DoesNotGenerate() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + private void SomeOtherMethod(string message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().BeEmpty(); + } + + #endregion + + #region Protocol-Only Generation Tests + + [Fact] + public void ProtocolOnly_SendsMessage_WithManualRoutes_GeneratesConfigureSentTypes() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class BroadcastMessage { } + + [SendsMessage(typeof(BroadcastMessage))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Should NOT generate ConfigureRoutes (user has manual implementation) + generated.Should().NotContain("protected override RouteBuilder ConfigureRoutes"); + + // Should generate ConfigureSentTypes + generated.Should().Contain("protected override ISet ConfigureSentTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.BroadcastMessage))"); + } + + [Fact] + public void ProtocolOnly_YieldsOutput_WithManualRoutes_GeneratesConfigureYieldTypes() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class OutputMessage { } + + [YieldsOutput(typeof(OutputMessage))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Should NOT generate ConfigureRoutes (user has manual implementation) + generated.Should().NotContain("protected override RouteBuilder ConfigureRoutes"); + + // Should generate ConfigureYieldTypes + generated.Should().Contain("protected override ISet ConfigureYieldTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.OutputMessage))"); + } + + [Fact] + public void ProtocolOnly_BothAttributes_WithManualRoutes_GeneratesBothOverrides() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class SendMessage { } + public class YieldMessage { } + + [SendsMessage(typeof(SendMessage))] + [YieldsOutput(typeof(YieldMessage))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Should NOT generate ConfigureRoutes + generated.Should().NotContain("protected override RouteBuilder ConfigureRoutes"); + + // Should generate both protocol overrides + generated.Should().Contain("protected override ISet ConfigureSentTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.SendMessage))"); + generated.Should().Contain("protected override ISet ConfigureYieldTypes()"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.YieldMessage))"); + } + + [Fact] + public void ProtocolOnly_MultipleSendsMessageAttributes_GeneratesAllTypes() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class MessageA { } + public class MessageB { } + public class MessageC { } + + [SendsMessage(typeof(MessageA))] + [SendsMessage(typeof(MessageB))] + [SendsMessage(typeof(MessageC))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.MessageA))"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.MessageB))"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.MessageC))"); + } + + [Fact] + public void ProtocolOnly_NonPartialClass_ProducesDiagnostic() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class BroadcastMessage { } + + [SendsMessage(typeof(BroadcastMessage))] + public class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + // Should produce MAFGENWF003 diagnostic (class must be partial) + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF003"); + result.RunResult.GeneratedTrees.Should().BeEmpty(); + } + + [Fact] + public void ProtocolOnly_NonExecutorClass_ProducesDiagnostic() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class BroadcastMessage { } + + [SendsMessage(typeof(BroadcastMessage))] + public partial class NotAnExecutor + { + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + // Should produce MAFGENWF004 diagnostic (must derive from Executor) + result.RunResult.Diagnostics.Should().Contain(d => d.Id == "MAFGENWF004"); + result.RunResult.GeneratedTrees.Should().BeEmpty(); + } + + [Fact] + public void ProtocolOnly_NestedClass_GeneratesCorrectPartialHierarchy() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class BroadcastMessage { } + + public partial class OuterClass + { + [SendsMessage(typeof(BroadcastMessage))] + public partial class TestExecutor : Executor + { + public TestExecutor() : base("test") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + result.RunResult.Diagnostics.Should().BeEmpty(); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + + // Verify partial declarations are present + generated.Should().Contain("partial class OuterClass"); + generated.Should().Contain("partial class TestExecutor"); + + // Verify protocol types are generated + generated.Should().Contain("types.Add(typeof(global::TestNamespace.BroadcastMessage))"); + } + + [Fact] + public void ProtocolOnly_GenericExecutor_GeneratesCorrectly() + { + var source = """ + using System; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public class BroadcastMessage { } + + [SendsMessage(typeof(BroadcastMessage))] + public partial class GenericExecutor : Executor where T : class + { + public GenericExecutor() : base("generic") { } + + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + return routeBuilder; + } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("partial class GenericExecutor"); + generated.Should().Contain("types.Add(typeof(global::TestNamespace.BroadcastMessage))"); + } + + #endregion + + #region Generic Executor Tests + + [Fact] + public void GenericExecutor_GeneratesCorrectly() + { + var source = """ + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Agents.AI.Workflows; + + namespace TestNamespace; + + public partial class GenericExecutor : Executor where T : class + { + public GenericExecutor() : base("generic") { } + + [MessageHandler] + private void HandleMessage(T message, IWorkflowContext context) { } + } + """; + + var result = GeneratorTestHelper.RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1); + + var generated = result.RunResult.GeneratedTrees[0].ToString(); + generated.Should().Contain("partial class GenericExecutor"); + generated.Should().Contain(".AddHandler(this.HandleMessage)"); + } + + #endregion +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/GeneratorTestHelper.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/GeneratorTestHelper.cs new file mode 100644 index 0000000000..f631fc8551 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/GeneratorTestHelper.cs @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Collections.Immutable; +using System.IO; +using System.Linq; +using System.Reflection; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.CSharp; + +namespace Microsoft.Agents.AI.Workflows.Generators.UnitTests; + +/// +/// Helper class for testing the ExecutorRouteGenerator. +/// +public static class GeneratorTestHelper +{ + /// + /// Runs the ExecutorRouteGenerator on the provided source code and returns the result. + /// + public static GeneratorRunResult RunGenerator(string source) => RunGenerator([source]); + + /// + /// Runs the ExecutorRouteGenerator on multiple source files and returns the result. + /// Use this to test scenarios with partial classes split across files. + /// + public static GeneratorRunResult RunGenerator(params string[] sources) + { + var syntaxTrees = sources.Select(s => CSharpSyntaxTree.ParseText(s)).ToArray(); + + var references = GetMetadataReferences(); + + var compilation = CSharpCompilation.Create( + assemblyName: "TestAssembly", + syntaxTrees: syntaxTrees, + references: references, + options: new CSharpCompilationOptions(OutputKind.DynamicallyLinkedLibrary)); + + var generator = new ExecutorRouteGenerator(); + + GeneratorDriver driver = CSharpGeneratorDriver.Create(generator); + driver = driver.RunGeneratorsAndUpdateCompilation(compilation, out var outputCompilation, out var diagnostics); + + var runResult = driver.GetRunResult(); + + return new GeneratorRunResult( + runResult, + outputCompilation, + diagnostics); + } + + /// + /// Runs the generator and asserts that it produces exactly one generated file with the expected content. + /// + public static void AssertGeneratesSource(string source, string expectedGeneratedSource) + { + var result = RunGenerator(source); + + result.RunResult.GeneratedTrees.Should().HaveCount(1, "expected exactly one generated file"); + + var generatedSource = result.RunResult.GeneratedTrees[0].ToString(); + generatedSource.Should().Contain(expectedGeneratedSource); + } + + /// + /// Runs the generator and asserts that no source is generated. + /// + public static void AssertGeneratesNoSource(string source) + { + var result = RunGenerator(source); + result.RunResult.GeneratedTrees.Should().BeEmpty("expected no generated files"); + } + + /// + /// Runs the generator and asserts that a specific diagnostic is produced. + /// + public static void AssertProducesDiagnostic(string source, string diagnosticId) + { + var result = RunGenerator(source); + + var generatorDiagnostics = result.RunResult.Diagnostics; + generatorDiagnostics.Should().Contain(d => d.Id == diagnosticId, + $"expected diagnostic {diagnosticId} to be produced"); + } + + /// + /// Runs the generator and asserts that compilation succeeds with no errors. + /// + public static void AssertCompilationSucceeds(string source) + { + var result = RunGenerator(source); + + var errors = result.OutputCompilation.GetDiagnostics() + .Where(d => d.Severity == DiagnosticSeverity.Error) + .ToList(); + + errors.Should().BeEmpty("compilation should succeed without errors"); + } + + private static ImmutableArray GetMetadataReferences() + { + var assemblies = new[] + { + typeof(object).Assembly, // System.Runtime + typeof(Attribute).Assembly, // System.Runtime + typeof(ValueTask).Assembly, // System.Threading.Tasks.Extensions + typeof(CancellationToken).Assembly, // System.Threading + typeof(ISet<>).Assembly, // System.Collections + typeof(Executor).Assembly, // Microsoft.Agents.AI.Workflows + }; + + var references = new List(); + + foreach (var assembly in assemblies) + { + references.Add(MetadataReference.CreateFromFile(assembly.Location)); + } + + // Add netstandard reference + var netstandardAssembly = Assembly.Load("netstandard, Version=2.0.0.0"); + references.Add(MetadataReference.CreateFromFile(netstandardAssembly.Location)); + + // Add System.Runtime reference for core types + var runtimeAssemblyPath = Path.GetDirectoryName(typeof(object).Assembly.Location)!; + var systemRuntimePath = Path.Combine(runtimeAssemblyPath, "System.Runtime.dll"); + if (File.Exists(systemRuntimePath)) + { + references.Add(MetadataReference.CreateFromFile(systemRuntimePath)); + } + + return [.. references.Distinct()]; + } +} + +/// +/// Contains the results of running the generator. +/// +public record GeneratorRunResult( + GeneratorDriverRunResult RunResult, + Compilation OutputCompilation, + ImmutableArray Diagnostics); diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/Microsoft.Agents.AI.Workflows.Generators.UnitTests.csproj b/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/Microsoft.Agents.AI.Workflows.Generators.UnitTests.csproj new file mode 100644 index 0000000000..81b91bf17d --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Generators.UnitTests/Microsoft.Agents.AI.Workflows.Generators.UnitTests.csproj @@ -0,0 +1,23 @@ + + + + + net10.0 + + $(NoWarn);RCS1118 + + + + + + + + + + + + + + diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/05_Simple_Workflow_Checkpointing.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/05_Simple_Workflow_Checkpointing.cs index aab5fd0958..7216d44208 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/05_Simple_Workflow_Checkpointing.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/05_Simple_Workflow_Checkpointing.cs @@ -42,7 +42,7 @@ await environment.StreamAsync(workflow, NumberSignal.Init, checkpointManager) { await handle.DisposeAsync().ConfigureAwait(false); - checkpointed = await environment.ResumeStreamAsync(workflow, targetCheckpoint, checkpointManager, runId: handle.RunId, cancellationToken: CancellationToken.None) + checkpointed = await environment.ResumeStreamAsync(workflow, targetCheckpoint, checkpointManager, cancellationToken: CancellationToken.None) .ConfigureAwait(false); handle = checkpointed.Run; } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/13_Subworkflow_Checkpointing.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/13_Subworkflow_Checkpointing.cs new file mode 100644 index 0000000000..113731e679 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/Sample/13_Subworkflow_Checkpointing.cs @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Workflows.Sample; + +internal static class Step13EntryPoint +{ + public static Workflow SubworkflowInstance + { + get + { + OutputMessagesExecutor output = new(new ChatProtocolExecutorOptions() { StringMessageChatRole = ChatRole.User }); + return new WorkflowBuilder(output).WithOutputFrom(output).Build(); + } + } + + public static Workflow WorkflowInstance + { + get + { + ExecutorBinding subworkflow = SubworkflowInstance.BindAsExecutor("EchoSubworkflow"); + return new WorkflowBuilder(subworkflow).WithOutputFrom(subworkflow).Build(); + } + } + + public static async ValueTask RunAsAgentAsync(TextWriter writer, string input, IWorkflowExecutionEnvironment environment, AgentThread? thread) + { + AIAgent hostAgent = WorkflowInstance.AsAgent("echo-workflow", "EchoW", executionEnvironment: environment, includeWorkflowOutputsInResponse: true); + + thread ??= await hostAgent.GetNewThreadAsync(); + AgentResponse response; + ResponseContinuationToken? continuationToken = null; + do + { + response = await hostAgent.RunAsync(input, thread, new AgentRunOptions { ContinuationToken = continuationToken }); + } while ((continuationToken = response.ContinuationToken) is { }); + + foreach (ChatMessage message in response.Messages) + { + writer.WriteLine($"{message.AuthorName}: {message.Text}"); + } + + return thread; + } + + public static async ValueTask RunAsync(TextWriter writer, string input, IWorkflowExecutionEnvironment environment, CheckpointManager checkpointManager, CheckpointInfo? resumeFrom) + { + await using Checkpointed checkpointed = await BeginAsync(); + StreamingRun run = checkpointed.Run; + + await run.TrySendMessageAsync(new TurnToken()); + + CheckpointInfo? lastCheckpoint = null; + await foreach (WorkflowEvent evt in run.WatchStreamAsync()) + { + if (evt is WorkflowOutputEvent output) + { + if (output.Data is List messages) + { + foreach (ChatMessage message in messages) + { + writer.WriteLine($"{output.SourceId}: {message.Text}"); + } + } + else + { + Debug.Fail($"Unexpected output type: {(output.Data == null ? "null" : output.Data?.GetType().Name)}"); + } + } + else if (evt is SuperStepCompletedEvent stepCompleted) + { + lastCheckpoint = stepCompleted.CompletionInfo?.Checkpoint; + } + } + + return lastCheckpoint!; + + async ValueTask> BeginAsync() + { + if (resumeFrom == null) + { + return await environment.StreamAsync(WorkflowInstance, input, checkpointManager); + } + + Checkpointed checkpointed = await environment.ResumeStreamAsync(WorkflowInstance, resumeFrom, checkpointManager); + await checkpointed.Run.TrySendMessageAsync(input); + return checkpointed; + } + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SampleSmokeTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SampleSmokeTest.cs index dbe3a56d06..214333f11e 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SampleSmokeTest.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/SampleSmokeTest.cs @@ -371,6 +371,72 @@ IEnumerable EchoesForInput(string input) Action CreateValidator(string expected) => actual => actual.Should().Be(expected); } + + [Theory] + [InlineData(ExecutionEnvironment.InProcess_Lockstep)] + [InlineData(ExecutionEnvironment.InProcess_OffThread)] + [InlineData(ExecutionEnvironment.InProcess_Concurrent)] + internal async Task Test_RunSample_Step13Async(ExecutionEnvironment environment) + { + IWorkflowExecutionEnvironment executionEnvironment = environment.ToWorkflowExecutionEnvironment(); + + CheckpointManager checkpointManager = CheckpointManager.CreateInMemory(); + CheckpointInfo? resumeFrom = null; + + await RunAndValidateAsync(1); + + // this should crash before fix + await RunAndValidateAsync(2); + + async ValueTask RunAndValidateAsync(int step) + { + using StringWriter writer = new(); + string input = $"[{step}] Hello, World!"; + + resumeFrom = await Step13EntryPoint.RunAsync(writer, input, executionEnvironment, checkpointManager, resumeFrom); + + string result = writer.ToString(); + string[] lines = result.Split([Environment.NewLine], StringSplitOptions.RemoveEmptyEntries); + + const string ExpectedSource = "EchoSubworkflow"; + Assert.Collection(lines, + line => Assert.Contains($"{ExpectedSource}: {input}", line) + ); + } + } + + [Theory] + [InlineData(ExecutionEnvironment.InProcess_Lockstep)] + [InlineData(ExecutionEnvironment.InProcess_OffThread)] + [InlineData(ExecutionEnvironment.InProcess_Concurrent)] + internal async Task Test_RunSample_Step13aAsync(ExecutionEnvironment environment) + { + IWorkflowExecutionEnvironment executionEnvironment = environment.ToWorkflowExecutionEnvironment(); + AgentThread? thread = null; + + await RunAndValidateAsync(1); + + // this should crash before fix + await RunAndValidateAsync(2); + + async ValueTask RunAndValidateAsync(int step) + { + using StringWriter writer = new(); + string input = $"[{step}] Hello, World!"; + + thread = await Step13EntryPoint.RunAsAgentAsync(writer, input, executionEnvironment, thread); + + string result = writer.ToString(); + string[] lines = result.Split([Environment.NewLine], StringSplitOptions.RemoveEmptyEntries); + + // We expect to get the message that was passed in directly; since we are passing it in as a string, there is no associated + // author information. The ExpectedSource is empty string. + const string ExpectedSource = ""; + Assert.Collection(lines, + line => Assert.Contains($"{ExpectedSource}: {input}", line) + ); + } + } } internal sealed class VerifyingPlaybackResponder diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs index b971736b74..cf63843def 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestEchoAgent.cs @@ -26,7 +26,7 @@ public override ValueTask GetNewThreadAsync(CancellationToken cance private static ChatMessage UpdateThread(ChatMessage message, InMemoryAgentThread? thread = null) { - thread?.MessageStore.Add(message); + thread?.ChatHistoryProvider.Add(message); return message; } diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs index 57375b8341..b90bd30c54 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.UnitTests/TestRunContext.cs @@ -68,6 +68,9 @@ public ValueTask PostAsync(ExternalRequest request) } internal Dictionary> QueuedMessages { get; } = []; + + internal Dictionary> QueuedOutputs { get; } = []; + public ValueTask SendMessageAsync(string sourceId, object message, string? targetId = null, CancellationToken cancellationToken = default) { if (!this.QueuedMessages.TryGetValue(sourceId, out List? deliveryQueue)) @@ -79,6 +82,17 @@ public ValueTask SendMessageAsync(string sourceId, object message, string? targe return default; } + public ValueTask YieldOutputAsync(string sourceId, object output, CancellationToken cancellationToken = default) + { + if (!this.QueuedOutputs.TryGetValue(sourceId, out List? outputQueue)) + { + this.QueuedOutputs[sourceId] = outputQueue = []; + } + + outputQueue.Add(output); + return default; + } + ValueTask IRunnerContext.AdvanceAsync(CancellationToken cancellationToken) => throw new NotImplementedException(); @@ -104,8 +118,11 @@ public ValueTask> GetStartingExecutorInputTypesAsync(Cancellat public ValueTask ForwardWorkflowEventAsync(WorkflowEvent workflowEvent, CancellationToken cancellationToken = default) => this.AddEventAsync(workflowEvent, cancellationToken); - public ValueTask SendMessageAsync(string senderId, [System.Diagnostics.CodeAnalysis.DisallowNull] TMessage message, CancellationToken cancellationToken = default) - => this.SendMessageAsync(senderId, message, cancellationToken); + ValueTask ISuperStepJoinContext.SendMessageAsync(string senderId, [System.Diagnostics.CodeAnalysis.DisallowNull] TMessage message, CancellationToken cancellationToken) + => this.SendMessageAsync(senderId, message, cancellationToken: cancellationToken); + + ValueTask ISuperStepJoinContext.YieldOutputAsync(string senderId, [System.Diagnostics.CodeAnalysis.DisallowNull] TOutput output, CancellationToken cancellationToken) + => this.YieldOutputAsync(senderId, output, cancellationToken); ValueTask ISuperStepJoinContext.AttachSuperstepAsync(ISuperStepRunner superStepRunner, CancellationToken cancellationToken) => new(string.Empty); ValueTask ISuperStepJoinContext.DetachSuperstepAsync(string joinId) => new(false); diff --git a/dotnet/tests/OpenAIAssistant.IntegrationTests/OpenAIAssistantClientExtensionsTests.cs b/dotnet/tests/OpenAIAssistant.IntegrationTests/OpenAIAssistantClientExtensionsTests.cs index 0c8d6b80dd..02f5f36a76 100644 --- a/dotnet/tests/OpenAIAssistant.IntegrationTests/OpenAIAssistantClientExtensionsTests.cs +++ b/dotnet/tests/OpenAIAssistant.IntegrationTests/OpenAIAssistantClientExtensionsTests.cs @@ -48,7 +48,7 @@ public async Task CreateAIAgentAsync_WithAIFunctionTool_InvokesFunctionAsync(str Tools = [weatherFunction] } }), - "CreateWithChatClientAgentOptionsSync" => this._assistantClient.CreateAIAgent( + "CreateWithChatClientAgentOptionsSync" => await this._assistantClient.CreateAIAgentAsync( model: s_config.ChatModelId!, options: new ChatClientAgentOptions() { @@ -115,7 +115,7 @@ public async Task CreateAIAgentAsync_WithHostedCodeInterpreter_RunsCodeAsync(str Tools = [codeInterpreterTool] } }), - "CreateWithChatClientAgentOptionsSync" => this._assistantClient.CreateAIAgent( + "CreateWithChatClientAgentOptionsSync" => await this._assistantClient.CreateAIAgentAsync( model: s_config.ChatModelId!, options: new ChatClientAgentOptions() { @@ -193,7 +193,7 @@ You are a helpful agent that can help fetch data from files you know about. Tools = [fileSearchTool] } }), - "CreateWithChatClientAgentOptionsSync" => this._assistantClient.CreateAIAgent( + "CreateWithChatClientAgentOptionsSync" => await this._assistantClient.CreateAIAgentAsync( model: s_config.ChatModelId!, options: new ChatClientAgentOptions() { diff --git a/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs b/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs index 0fb9745d2d..f085187a21 100644 --- a/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs +++ b/dotnet/tests/OpenAIChatCompletion.IntegrationTests/OpenAIChatCompletionFixture.cs @@ -32,12 +32,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) { var typedThread = (ChatClientAgentThread)thread; - if (typedThread.MessageStore is null) + if (typedThread.ChatHistoryProvider is null) { return []; } - return (await typedThread.MessageStore.InvokingAsync(new([]))).ToList(); + return (await typedThread.ChatHistoryProvider.InvokingAsync(new([]))).ToList(); } public Task CreateChatClientAgentAsync( diff --git a/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs b/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs index c57e1c460d..9e8db6fb21 100644 --- a/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs +++ b/dotnet/tests/OpenAIResponse.IntegrationTests/OpenAIResponseFixture.cs @@ -50,12 +50,12 @@ public async Task> GetChatHistoryAsync(AgentThread thread) return [.. previousMessages, responseMessage]; } - if (typedThread.MessageStore is null) + if (typedThread.ChatHistoryProvider is null) { return []; } - return (await typedThread.MessageStore.InvokingAsync(new([]))).ToList(); + return (await typedThread.ChatHistoryProvider.InvokingAsync(new([]))).ToList(); } private static ChatMessage ConvertToChatMessage(ResponseItem item) diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md index a05b0a72a5..dab06792e0 100644 --- a/python/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -7,20 +7,42 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.0.0b260116] - 2026-01-16 + +### Added + +- **agent-framework-azure-ai**: Create/Get Agent API for Azure V1 ([#3192](https://github.com/microsoft/agent-framework/pull/3192)) +- **agent-framework-core**: Create/Get Agent API for OpenAI Assistants ([#3208](https://github.com/microsoft/agent-framework/pull/3208)) +- **agent-framework-ag-ui**: Support service-managed thread on AG-UI ([#3136](https://github.com/microsoft/agent-framework/pull/3136)) +- **agent-framework-ag-ui**: Add MCP tool support for AG-UI approval flows ([#3212](https://github.com/microsoft/agent-framework/pull/3212)) +- **samples**: Add AzureAI sample for downloading code interpreter generated files ([#3189](https://github.com/microsoft/agent-framework/pull/3189)) + +### Changed + +- **agent-framework-core**: [BREAKING] Rename `create_agent` to `as_agent` ([#3249](https://github.com/microsoft/agent-framework/pull/3249)) +- **agent-framework-core**: [BREAKING] Rename `WorkflowOutputEvent.source_executor_id` to `executor_id` for API consistency ([#3166](https://github.com/microsoft/agent-framework/pull/3166)) + +### Fixed + +- **agent-framework-core**: Properly configure structured outputs based on new options dict ([#3213](https://github.com/microsoft/agent-framework/pull/3213)) +- **agent-framework-core**: Correct `FunctionResultContent` ordering in `WorkflowAgent.merge_updates` ([#3168](https://github.com/microsoft/agent-framework/pull/3168)) +- **agent-framework-azurefunctions**: Update `DurableAIAgent` and fix integration tests ([#3241](https://github.com/microsoft/agent-framework/pull/3241)) +- **agent-framework-azure-ai**: Create/Get Agent API fixes and example improvements ([#3246](https://github.com/microsoft/agent-framework/pull/3246)) + ## [1.0.0b260114] - 2026-01-14 ### Added - **agent-framework-azure-ai**: Create/Get Agent API for Azure V2 ([#3059](https://github.com/microsoft/agent-framework/pull/3059)) by @moonbox3 -- **agent-framework-declarative**: Add declarative workflow runtime ([#2815](https://github.com/microsoft/agent-framework/pull/2815)) by @emattson -- **agent-framework-ag-ui**: Add dependencies param to ag-ui FastAPI endpoint ([#3191](https://github.com/microsoft/agent-framework/pull/3191)) by @emattson +- **agent-framework-declarative**: Add declarative workflow runtime ([#2815](https://github.com/microsoft/agent-framework/pull/2815)) by @moonbox3 +- **agent-framework-ag-ui**: Add dependencies param to ag-ui FastAPI endpoint ([#3191](https://github.com/microsoft/agent-framework/pull/3191)) by @moonbox3 - **agent-framework-ag-ui**: Add Pydantic request model and OpenAPI tags support to AG-UI FastAPI endpoint ([#2522](https://github.com/microsoft/agent-framework/pull/2522)) by @claude89757 - **agent-framework-core**: Add tool call/result content types and update connectors and samples ([#2971](https://github.com/microsoft/agent-framework/pull/2971)) by @moonbox3 -- **agent-framework-core**: Add more specific exceptions to Workflow ([#3188](https://github.com/microsoft/agent-framework/pull/3188)) by @taochenms +- **agent-framework-core**: Add more specific exceptions to Workflow ([#3188](https://github.com/microsoft/agent-framework/pull/3188)) by @TaoChenOSU ### Changed -- **agent-framework-core**: [BREAKING] Refactor orchestrations ([#3023](https://github.com/microsoft/agent-framework/pull/3023)) by @taochenms +- **agent-framework-core**: [BREAKING] Refactor orchestrations ([#3023](https://github.com/microsoft/agent-framework/pull/3023)) by @TaoChenOSU - **agent-framework-core**: [BREAKING] Introducing Options as TypedDict and Generic ([#3140](https://github.com/microsoft/agent-framework/pull/3140)) by @eavanvalkenburg - **agent-framework-core**: [BREAKING] Removed display_name, renamed context_providers, middleware and AggregateContextProvider ([#3139](https://github.com/microsoft/agent-framework/pull/3139)) by @eavanvalkenburg - **agent-framework-core**: MCP Improvements: improved connection loss behavior, pagination for loading and a param to control representation ([#3154](https://github.com/microsoft/agent-framework/pull/3154)) by @eavanvalkenburg @@ -28,10 +50,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed -- **agent-framework-anthropic**: Fix duplicate ToolCallStartEvent in streaming tool calls ([#3051](https://github.com/microsoft/agent-framework/pull/3051)) by @emattson +- **agent-framework-anthropic**: Fix duplicate ToolCallStartEvent in streaming tool calls ([#3051](https://github.com/microsoft/agent-framework/pull/3051)) by @moonbox3 - **agent-framework-anthropic**: Fix Anthropic streaming response bugs ([#3141](https://github.com/microsoft/agent-framework/pull/3141)) by @eavanvalkenburg -- **agent-framework-ag-ui**: Execute tools with approval_mode, fix shared state, code cleanup ([#3079](https://github.com/microsoft/agent-framework/pull/3079)) by @emattson -- **agent-framework-azure-ai**: Fix AzureAIClient tool call bug for AG-UI use ([#3148](https://github.com/microsoft/agent-framework/pull/3148)) by @emattson +- **agent-framework-ag-ui**: Execute tools with approval_mode, fix shared state, code cleanup ([#3079](https://github.com/microsoft/agent-framework/pull/3079)) by @moonbox3 +- **agent-framework-azure-ai**: Fix AzureAIClient tool call bug for AG-UI use ([#3148](https://github.com/microsoft/agent-framework/pull/3148)) by @moonbox3 - **agent-framework-core**: Fix MCPStreamableHTTPTool to use new streamable_http_client API ([#3088](https://github.com/microsoft/agent-framework/pull/3088)) by @Copilot - **agent-framework-core**: Multiple bug fixes ([#3150](https://github.com/microsoft/agent-framework/pull/3150)) by @eavanvalkenburg @@ -489,7 +511,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 For more information, see the [announcement blog post](https://devblogs.microsoft.com/foundry/introducing-microsoft-agent-framework-the-open-source-engine-for-agentic-ai-apps/). -[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260107...HEAD +[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260116...HEAD +[1.0.0b260116]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260114...python-1.0.0b260116 +[1.0.0b260114]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260107...python-1.0.0b260114 [1.0.0b260107]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b260106...python-1.0.0b260107 [1.0.0b260106]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251223...python-1.0.0b260106 [1.0.0b251223]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251218...python-1.0.0b251223 diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index ba534436d6..00e045fba6 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -31,11 +31,9 @@ AgentThread, BaseAgent, ChatMessage, - Contents, - DataContent, + Content, Role, - TextContent, - UriContent, + normalize_messages, prepend_agent_framework_to_user_agent, ) from agent_framework.observability import use_agent_instrumentation @@ -236,7 +234,7 @@ async def run_stream( Yields: An agent response item. """ - messages = self._normalize_messages(messages) + messages = normalize_messages(messages) a2a_message = self._prepare_message_for_a2a(messages[-1]) response_stream = self.client.send_message(a2a_message) @@ -332,7 +330,7 @@ def _prepare_message_for_a2a(self, message: ChatMessage) -> A2AMessage: A2APart( root=FilePart( file=FileWithBytes( - bytes=_get_uri_data(content.uri), + bytes=_get_uri_data(content.uri), # type: ignore[arg-type] mime_type=content.media_type, ), metadata=content.additional_properties, @@ -361,19 +359,19 @@ def _prepare_message_for_a2a(self, message: ChatMessage) -> A2AMessage: metadata=cast(dict[str, Any], message.additional_properties), ) - def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Contents]: - """Parse A2A Parts into Agent Framework Contents. + def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Content]: + """Parse A2A Parts into Agent Framework Content. Transforms A2A protocol Parts into framework-native Content objects, handling text, file (URI/bytes), and data parts with metadata preservation. """ - contents: list[Contents] = [] + contents: list[Content] = [] for part in parts: inner_part = part.root match inner_part.kind: case "text": contents.append( - TextContent( + Content.from_text( text=inner_part.text, additional_properties=inner_part.metadata, raw_representation=inner_part, @@ -382,7 +380,7 @@ def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Contents]: case "file": if isinstance(inner_part.file, FileWithUri): contents.append( - UriContent( + Content.from_uri( uri=inner_part.file.uri, media_type=inner_part.file.mime_type or "", additional_properties=inner_part.metadata, @@ -391,7 +389,7 @@ def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Contents]: ) elif isinstance(inner_part.file, FileWithBytes): contents.append( - DataContent( + Content.from_data( data=base64.b64decode(inner_part.file.bytes), media_type=inner_part.file.mime_type or "", additional_properties=inner_part.metadata, @@ -400,7 +398,7 @@ def _parse_contents_from_a2a(self, parts: Sequence[A2APart]) -> list[Contents]: ) case "data": contents.append( - TextContent( + Content.from_text( text=json.dumps(inner_part.data), additional_properties=inner_part.metadata, raw_representation=inner_part, diff --git a/python/packages/a2a/pyproject.toml b/python/packages/a2a/pyproject.toml index 1139d16e3b..0c57ab7e85 100644 --- a/python/packages/a2a/pyproject.toml +++ b/python/packages/a2a/pyproject.toml @@ -4,7 +4,7 @@ description = "A2A integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/a2a/tests/test_a2a_agent.py b/python/packages/a2a/tests/test_a2a_agent.py index 5d77345b20..eca97b2ac6 100644 --- a/python/packages/a2a/tests/test_a2a_agent.py +++ b/python/packages/a2a/tests/test_a2a_agent.py @@ -24,12 +24,8 @@ AgentResponse, AgentResponseUpdate, ChatMessage, - DataContent, - ErrorContent, - HostedFileContent, + Content, Role, - TextContent, - UriContent, ) from agent_framework.a2a import A2AAgent from pytest import fixture, raises @@ -289,8 +285,8 @@ def test_parse_contents_from_a2a_conversion(a2a_agent: A2AAgent) -> None: # Verify conversion assert len(contents) == 2 - assert isinstance(contents[0], TextContent) - assert isinstance(contents[1], TextContent) + assert contents[0].type == "text" + assert contents[1].type == "text" assert contents[0].text == "First part" assert contents[1].text == "Second part" @@ -299,7 +295,7 @@ def test_prepare_message_for_a2a_with_error_content(a2a_agent: A2AAgent) -> None """Test _prepare_message_for_a2a with ErrorContent.""" # Create ChatMessage with ErrorContent - error_content = ErrorContent(message="Test error message") + error_content = Content.from_error(message="Test error message") message = ChatMessage(role=Role.USER, contents=[error_content]) # Convert to A2A message @@ -314,7 +310,7 @@ def test_prepare_message_for_a2a_with_uri_content(a2a_agent: A2AAgent) -> None: """Test _prepare_message_for_a2a with UriContent.""" # Create ChatMessage with UriContent - uri_content = UriContent(uri="http://example.com/file.pdf", media_type="application/pdf") + uri_content = Content.from_uri(uri="http://example.com/file.pdf", media_type="application/pdf") message = ChatMessage(role=Role.USER, contents=[uri_content]) # Convert to A2A message @@ -330,7 +326,7 @@ def test_prepare_message_for_a2a_with_data_content(a2a_agent: A2AAgent) -> None: """Test _prepare_message_for_a2a with DataContent.""" # Create ChatMessage with DataContent (base64 data URI) - data_content = DataContent(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") + data_content = Content.from_uri(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") message = ChatMessage(role=Role.USER, contents=[data_content]) # Convert to A2A message @@ -368,7 +364,7 @@ async def test_run_stream_with_message_response(a2a_agent: A2AAgent, mock_a2a_cl assert len(updates[0].contents) == 1 content = updates[0].contents[0] - assert isinstance(content, TextContent) + assert content.type == "text" assert content.text == "Streaming response from agent!" assert updates[0].response_id == "msg-stream-123" @@ -414,10 +410,10 @@ def test_prepare_message_for_a2a_with_multiple_contents() -> None: message = ChatMessage( role=Role.USER, contents=[ - TextContent(text="Here's the analysis:"), - DataContent(data=b"binary data", media_type="application/octet-stream"), - UriContent(uri="https://example.com/image.png", media_type="image/png"), - TextContent(text='{"structured": "data"}'), + Content.from_text(text="Here's the analysis:"), + Content.from_data(data=b"binary data", media_type="application/octet-stream"), + Content.from_uri(uri="https://example.com/image.png", media_type="image/png"), + Content.from_text(text='{"structured": "data"}'), ], ) @@ -445,7 +441,7 @@ def test_parse_contents_from_a2a_with_data_part() -> None: assert len(contents) == 1 - assert isinstance(contents[0], TextContent) + assert contents[0].type == "text" assert contents[0].text == '{"key": "value", "number": 42}' assert contents[0].additional_properties == {"source": "test"} @@ -470,7 +466,7 @@ def test_prepare_message_for_a2a_with_hosted_file() -> None: # Create message with hosted file content message = ChatMessage( role=Role.USER, - contents=[HostedFileContent(file_id="hosted://storage/document.pdf")], + contents=[Content.from_hosted_file(file_id="hosted://storage/document.pdf")], ) result = agent._prepare_message_for_a2a(message) # noqa: SLF001 @@ -507,7 +503,7 @@ def test_parse_contents_from_a2a_with_hosted_file_uri() -> None: assert len(contents) == 1 - assert isinstance(contents[0], UriContent) + assert contents[0].type == "uri" assert contents[0].uri == "hosted://storage/document.pdf" assert contents[0].media_type == "" # Converted None to empty string diff --git a/python/packages/ag-ui/agent_framework_ag_ui/__init__.py b/python/packages/ag-ui/agent_framework_ag_ui/__init__.py index c6dc575d36..f2c2ba7fe1 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/__init__.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/__init__.py @@ -6,13 +6,6 @@ from ._agent import AgentFrameworkAgent from ._client import AGUIChatClient -from ._confirmation_strategies import ( - ConfirmationStrategy, - DefaultConfirmationStrategy, - DocumentWriterConfirmationStrategy, - RecipeConfirmationStrategy, - TaskPlannerConfirmationStrategy, -) from ._endpoint import add_agent_framework_fastapi_endpoint from ._event_converters import AGUIEventConverter from ._http_service import AGUIHttpService @@ -35,13 +28,8 @@ "AGUIHttpService", "AGUIRequest", "AgentState", - "ConfirmationStrategy", - "DefaultConfirmationStrategy", "PredictStateConfig", "RunMetadata", - "TaskPlannerConfirmationStrategy", - "RecipeConfirmationStrategy", - "DocumentWriterConfirmationStrategy", "DEFAULT_TAGS", "__version__", ] diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_agent.py b/python/packages/ag-ui/agent_framework_ag_ui/_agent.py index 806f5ab1bb..38ca0e9767 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_agent.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -"""AgentFrameworkAgent wrapper for AG-UI protocol - Clean Architecture.""" +"""AgentFrameworkAgent wrapper for AG-UI protocol.""" from collections.abc import AsyncGenerator from typing import Any, cast @@ -8,13 +8,7 @@ from ag_ui.core import BaseEvent from agent_framework import AgentProtocol -from ._confirmation_strategies import ConfirmationStrategy, DefaultConfirmationStrategy -from ._orchestrators import ( - DefaultOrchestrator, - ExecutionContext, - HumanInTheLoopOrchestrator, - Orchestrator, -) +from ._run import run_agent_stream class AgentConfig: @@ -33,7 +27,7 @@ def __init__( state_schema: Optional state schema for state management; accepts dict or Pydantic model/class predict_state_config: Configuration for predictive state updates use_service_thread: Whether the agent thread is service-managed - require_confirmation: Whether predictive updates require confirmation + require_confirmation: Whether predictive updates require user confirmation before applying """ self.state_schema = self._normalize_state_schema(state_schema) self.predict_state_config = predict_state_config or {} @@ -58,12 +52,12 @@ def _normalize_state_schema(state_schema: Any | None) -> dict[str, Any]: base_model_type = None if base_model_type is not None and isinstance(state_schema, base_model_type): - schema_dict = state_schema.__class__.model_json_schema() + schema_dict = state_schema.__class__.model_json_schema() # type: ignore[union-attr] return schema_dict.get("properties", {}) or {} if base_model_type is not None and isinstance(state_schema, type) and issubclass(state_schema, base_model_type): - schema_dict = state_schema.model_json_schema() - return schema_dict.get("properties", {}) or {} + schema_dict = state_schema.model_json_schema() # type: ignore[union-attr] + return schema_dict.get("properties", {}) or {} # type: ignore return {} @@ -72,12 +66,7 @@ class AgentFrameworkAgent: """Wraps Agent Framework agents for AG-UI protocol compatibility. Translates between Agent Framework's AgentProtocol and AG-UI's event-based - protocol. Uses orchestrators to handle different execution flows (standard - execution, human-in-the-loop, etc.). Orchestrators are checked in order; - the first matching orchestrator handles the request. - - Supports predictive state updates for agentic generative UI, with optional - confirmation requirements configurable per use case. + protocol. Follows a simple linear flow: RunStarted -> content events -> RunFinished. """ def __init__( @@ -88,9 +77,7 @@ def __init__( state_schema: Any | None = None, predict_state_config: dict[str, dict[str, str]] | None = None, require_confirmation: bool = True, - orchestrators: list[Orchestrator] | None = None, use_service_thread: bool = False, - confirmation_strategy: ConfirmationStrategy | None = None, ): """Initialize the AG-UI compatible agent wrapper. @@ -99,15 +86,9 @@ def __init__( name: Optional name for the agent description: Optional description state_schema: Optional state schema for state management; accepts dict or Pydantic model/class - predict_state_config: Configuration for predictive state updates. - Format: {"state_key": {"tool": "tool_name", "tool_argument": "arg_name"}} - require_confirmation: Whether predictive updates require confirmation. - Set to False for agentic generative UI that updates automatically. - orchestrators: Custom orchestrators (auto-configured if None). - Orchestrators are checked in order; first match handles the request. - use_service_thread: Whether the agent thread is service-managed. - confirmation_strategy: Strategy for generating confirmation messages. - Defaults to DefaultConfirmationStrategy if None. + predict_state_config: Configuration for predictive state updates + require_confirmation: Whether predictive updates require user confirmation before applying + use_service_thread: Whether the agent thread is service-managed """ self.agent = agent self.name = name or getattr(agent, "name", "agent") @@ -120,74 +101,17 @@ def __init__( require_confirmation=require_confirmation, ) - # Configure orchestrators - if orchestrators is None: - self.orchestrators = self._default_orchestrators() - else: - self.orchestrators = orchestrators - - # Configure confirmation strategy - if confirmation_strategy is None: - self.confirmation_strategy: ConfirmationStrategy = DefaultConfirmationStrategy() - else: - self.confirmation_strategy = confirmation_strategy - - def _default_orchestrators(self) -> list[Orchestrator]: - """Create default orchestrator chain. - - Returns: - List of orchestrators in priority order. First matching orchestrator - handles the request, so order matters. - """ - return [ - HumanInTheLoopOrchestrator(), # Handle tool approval responses - # Add more specialized orchestrators here as needed - DefaultOrchestrator(), # Fallback: standard agent execution - ] - async def run_agent( self, input_data: dict[str, Any], ) -> AsyncGenerator[BaseEvent, None]: """Run the agent and yield AG-UI events. - This is the ONLY public method - much simpler than the original 376-line - implementation. All orchestration logic has been extracted into dedicated - Orchestrator classes. - - The method creates an ExecutionContext with all needed data, then finds - the first orchestrator that can handle the request and delegates to it. - Args: input_data: The AG-UI run input containing messages, state, etc. Yields: AG-UI events - - Raises: - RuntimeError: If no orchestrator matches (should never happen if - DefaultOrchestrator is last in the chain) """ - # Create execution context with all needed data - context = ExecutionContext( - input_data=input_data, - agent=self.agent, - config=self.config, - confirmation_strategy=self.confirmation_strategy, - ) - - # Find matching orchestrator and execute - for orchestrator in self.orchestrators: - if orchestrator.can_handle(context): - async for event in orchestrator.run(context): - yield event - return - - # Should never reach here if DefaultOrchestrator is last - raise RuntimeError("No orchestrator matched - check configuration") - - -__all__ = [ - "AgentFrameworkAgent", - "AgentConfig", -] + async for event in run_agent_stream(input_data, self.agent, self.config): + yield event diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_client.py b/python/packages/ag-ui/agent_framework_ag_ui/_client.py index e31036803c..7a03949b66 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_client.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_client.py @@ -17,12 +17,10 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - DataContent, - FunctionCallContent, + Content, + use_chat_middleware, + use_function_invocation, ) -from agent_framework._middleware import use_chat_middleware -from agent_framework._tools import use_function_invocation -from agent_framework._types import BaseContent, Contents from agent_framework.observability import use_instrumentation from ._event_converters import AGUIEventConverter @@ -53,26 +51,11 @@ logger: logging.Logger = logging.getLogger(__name__) -class ServerFunctionCallContent(BaseContent): - """Wrapper for server function calls to prevent client re-execution. - - All function calls from the remote server are server-side executions. - This wrapper prevents @use_function_invocation from trying to execute them again. - """ - - function_call_content: FunctionCallContent - - def __init__(self, function_call_content: FunctionCallContent) -> None: - """Initialize with the function call content.""" - super().__init__(type="server_function_call") - self.function_call_content = function_call_content - - -def _unwrap_server_function_call_contents(contents: MutableSequence[Contents | dict[str, Any]]) -> None: - """Replace ServerFunctionCallContent instances with their underlying call content.""" +def _unwrap_server_function_call_contents(contents: MutableSequence[Content | dict[str, Any]]) -> None: + """Replace server_function_call instances with their underlying call content.""" for idx, content in enumerate(contents): - if isinstance(content, ServerFunctionCallContent): - contents[idx] = content.function_call_content # type: ignore[assignment] + if content.type == "server_function_call": # type: ignore[union-attr] + contents[idx] = content.function_call # type: ignore[assignment, union-attr] TBaseChatClient = TypeVar("TBaseChatClient", bound=type[BaseChatClient[Any]]) @@ -91,9 +74,9 @@ def _apply_server_function_call_unwrap(chat_client: TBaseChatClient) -> TBaseCha original_get_streaming_response = chat_client.get_streaming_response @wraps(original_get_streaming_response) - async def streaming_wrapper(self, *args: Any, **kwargs: Any) -> AsyncIterable[ChatResponseUpdate]: + async def streaming_wrapper(self: Any, *args: Any, **kwargs: Any) -> AsyncIterable[ChatResponseUpdate]: async for update in original_get_streaming_response(self, *args, **kwargs): - _unwrap_server_function_call_contents(cast(MutableSequence[Contents | dict[str, Any]], update.contents)) + _unwrap_server_function_call_contents(cast(MutableSequence[Content | dict[str, Any]], update.contents)) yield update chat_client.get_streaming_response = streaming_wrapper # type: ignore[assignment] @@ -101,13 +84,11 @@ async def streaming_wrapper(self, *args: Any, **kwargs: Any) -> AsyncIterable[Ch original_get_response = chat_client.get_response @wraps(original_get_response) - async def response_wrapper(self, *args: Any, **kwargs: Any) -> ChatResponse: + async def response_wrapper(self: Any, *args: Any, **kwargs: Any) -> ChatResponse: response = await original_get_response(self, *args, **kwargs) if response.messages: for message in response.messages: - _unwrap_server_function_call_contents( - cast(MutableSequence[Contents | dict[str, Any]], message.contents) - ) + _unwrap_server_function_call_contents(cast(MutableSequence[Content | dict[str, Any]], message.contents)) return response chat_client.get_response = response_wrapper # type: ignore[assignment] @@ -289,13 +270,13 @@ def _extract_state_from_messages( last_message = messages[-1] for content in last_message.contents: - if isinstance(content, DataContent) and content.media_type == "application/json": + if isinstance(content, Content) and content.type == "data" and content.media_type == "application/json": try: uri = content.uri - if uri.startswith("data:application/json;base64,"): + if uri.startswith("data:application/json;base64,"): # type: ignore[union-attr] import base64 - encoded_data = uri.split(",", 1)[1] + encoded_data = uri.split(",", 1)[1] # type: ignore[union-attr] decoded_bytes = base64.b64decode(encoded_data) state = json.loads(decoded_bytes.decode("utf-8")) @@ -433,19 +414,19 @@ async def _inner_get_streaming_response( ) # Distinguish client vs server tools for i, content in enumerate(update.contents): - if isinstance(content, FunctionCallContent): + if content.type == "function_call": # type: ignore[attr-defined] logger.debug( - f"[AGUIChatClient] Function call: {content.name}, in client_tool_set: {content.name in client_tool_set}" + f"[AGUIChatClient] Function call: {content.name}, in client_tool_set: {content.name in client_tool_set}" # type: ignore[attr-defined] ) - if content.name in client_tool_set: + if content.name in client_tool_set: # type: ignore[attr-defined] # Client tool - let @use_function_invocation execute it - if not content.additional_properties: - content.additional_properties = {} - content.additional_properties["agui_thread_id"] = thread_id + if not content.additional_properties: # type: ignore[attr-defined] + content.additional_properties = {} # type: ignore[attr-defined] + content.additional_properties["agui_thread_id"] = thread_id # type: ignore[attr-defined] else: # Server tool - wrap so @use_function_invocation ignores it - logger.debug(f"[AGUIChatClient] Wrapping server tool: {content.name}") - self._register_server_tool_placeholder(content.name) - update.contents[i] = ServerFunctionCallContent(content) # type: ignore + logger.debug(f"[AGUIChatClient] Wrapping server tool: {content.name}") # type: ignore[union-attr] + self._register_server_tool_placeholder(content.name) # type: ignore[arg-type] + update.contents[i] = Content(type="server_function_call", function_call=content) # type: ignore yield update diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_confirmation_strategies.py b/python/packages/ag-ui/agent_framework_ag_ui/_confirmation_strategies.py deleted file mode 100644 index 35e648c100..0000000000 --- a/python/packages/ag-ui/agent_framework_ag_ui/_confirmation_strategies.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Confirmation strategies for human-in-the-loop approval flows. - -Each agent can provide a custom confirmation strategy to generate domain-specific -messages when users approve or reject changes/actions. -""" - -from abc import ABC, abstractmethod -from typing import Any - - -class ConfirmationStrategy(ABC): - """Strategy for generating confirmation messages during human-in-the-loop flows. - - Subclasses must define the message properties. The methods use those properties - by default, but can be overridden for complete customization. - """ - - @property - @abstractmethod - def approval_header(self) -> str: - """Header for approval accepted message. Must be overridden.""" - ... - - @property - @abstractmethod - def approval_footer(self) -> str: - """Footer for approval accepted message. Must be overridden.""" - ... - - @property - @abstractmethod - def rejection_message(self) -> str: - """Message when user rejects. Must be overridden.""" - ... - - @property - @abstractmethod - def state_confirmed_message(self) -> str: - """Message when state is confirmed. Must be overridden.""" - ... - - @property - @abstractmethod - def state_rejected_message(self) -> str: - """Message when state is rejected. Must be overridden.""" - ... - - def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: - """Generate message when user approves function execution. - - Default implementation uses header/footer properties. - Override for complete customization. - - Args: - steps: List of approved steps with 'description', 'status', etc. - - Returns: - Message to display to user - """ - enabled_steps = [s for s in steps if s.get("status") == "enabled"] - message_parts = [self.approval_header.format(count=len(enabled_steps))] - for i, step in enumerate(enabled_steps, 1): - message_parts.append(f"{i}. {step['description']}\n") - message_parts.append(self.approval_footer) - return "".join(message_parts) - - def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: - """Generate message when user rejects function execution. - - Args: - steps: List of rejected steps - - Returns: - Message to display to user - """ - return self.rejection_message - - def on_state_confirmed(self) -> str: - """Generate message when user confirms predictive state changes. - - Returns: - Message to display to user - """ - return self.state_confirmed_message - - def on_state_rejected(self) -> str: - """Generate message when user rejects predictive state changes. - - Returns: - Message to display to user - """ - return self.state_rejected_message - - -class DefaultConfirmationStrategy(ConfirmationStrategy): - """Generic confirmation messages suitable for most agents.""" - - @property - def approval_header(self) -> str: - return "Executing {count} approved steps:\n\n" - - @property - def approval_footer(self) -> str: - return "\nAll steps completed successfully!" - - @property - def rejection_message(self) -> str: - return "No problem! What would you like me to change about the plan?" - - @property - def state_confirmed_message(self) -> str: - return "Changes confirmed and applied successfully!" - - @property - def state_rejected_message(self) -> str: - return "No problem! What would you like me to change?" - - -class TaskPlannerConfirmationStrategy(ConfirmationStrategy): - """Domain-specific confirmation messages for task planning agents.""" - - @property - def approval_header(self) -> str: - return "Executing your requested tasks:\n\n" - - @property - def approval_footer(self) -> str: - return "\nAll tasks completed successfully!" - - @property - def rejection_message(self) -> str: - return "No problem! Let me revise the plan. What would you like me to change?" - - @property - def state_confirmed_message(self) -> str: - return "Tasks confirmed and ready to execute!" - - @property - def state_rejected_message(self) -> str: - return "No problem! How should I adjust the task list?" - - -class RecipeConfirmationStrategy(ConfirmationStrategy): - """Domain-specific confirmation messages for recipe agents.""" - - @property - def approval_header(self) -> str: - return "Updating your recipe:\n\n" - - @property - def approval_footer(self) -> str: - return "\nRecipe updated successfully!" - - @property - def rejection_message(self) -> str: - return "No problem! What ingredients or steps should I change?" - - @property - def state_confirmed_message(self) -> str: - return "Recipe changes applied successfully!" - - @property - def state_rejected_message(self) -> str: - return "No problem! What would you like me to adjust in the recipe?" - - -class DocumentWriterConfirmationStrategy(ConfirmationStrategy): - """Domain-specific confirmation messages for document writing agents.""" - - @property - def approval_header(self) -> str: - return "Applying your edits:\n\n" - - @property - def approval_footer(self) -> str: - return "\nDocument updated successfully!" - - @property - def rejection_message(self) -> str: - return "No problem! Which changes should I keep or modify?" - - @property - def state_confirmed_message(self) -> str: - return "Document edits applied!" - - @property - def state_rejected_message(self) -> str: - return "No problem! What should I change about the document?" - - -def apply_confirmation_strategy( - strategy: ConfirmationStrategy | None, - accepted: bool, - steps: list[dict[str, Any]], -) -> str: - """Apply a confirmation strategy to generate a message. - - This helper consolidates the pattern used in multiple orchestrators. - - Args: - strategy: Strategy to use, or None for default - accepted: Whether the user approved - steps: List of steps (may be empty for state confirmations) - - Returns: - Generated message string - """ - if strategy is None: - strategy = DefaultConfirmationStrategy() - - if not steps: - # State confirmation (no steps) - return strategy.on_state_confirmed() if accepted else strategy.on_state_rejected() - # Step-based approval - return strategy.on_approval_accepted(steps) if accepted else strategy.on_approval_rejected(steps) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_endpoint.py b/python/packages/ag-ui/agent_framework_ag_ui/_endpoint.py index 7948d4f935..dc39be77e7 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_endpoint.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_endpoint.py @@ -4,7 +4,7 @@ import copy import logging -from collections.abc import Sequence +from collections.abc import AsyncGenerator, Sequence from typing import Any from ag_ui.encoder import EventEncoder @@ -56,8 +56,8 @@ def add_agent_framework_fastapi_endpoint( else: wrapped_agent = agent - @app.post(path, tags=tags or ["AG-UI"], dependencies=dependencies) # type: ignore[arg-type] - async def agent_endpoint(request_body: AGUIRequest): # type: ignore[misc] + @app.post(path, tags=tags or ["AG-UI"], dependencies=dependencies, response_model=None) # type: ignore[arg-type] + async def agent_endpoint(request_body: AGUIRequest) -> StreamingResponse | dict[str, str]: """Handle AG-UI agent requests. Note: Function is accessed via FastAPI's decorator registration, @@ -77,17 +77,19 @@ async def agent_endpoint(request_body: AGUIRequest): # type: ignore[misc] ) logger.info(f"Received request at {path}: {input_data.get('run_id', 'no-run-id')}") - async def event_generator(): + async def event_generator() -> AsyncGenerator[str, None]: encoder = EventEncoder() event_count = 0 async for event in wrapped_agent.run_agent(input_data): event_count += 1 - logger.debug(f"[{path}] Event {event_count}: {type(event).__name__}") - - # Log event payload for debugging - if hasattr(event, "model_dump"): - event_data = event.model_dump(exclude_none=True) - logger.debug(f"[{path}] Event payload: {event_data}") + event_type_name = getattr(event, "type", type(event).__name__) + # Log important events at INFO level + if "TOOL_CALL" in str(event_type_name) or "RUN" in str(event_type_name): + if hasattr(event, "model_dump"): + event_data = event.model_dump(exclude_none=True) + logger.info(f"[{path}] Event {event_count}: {event_type_name} - {event_data}") + else: + logger.info(f"[{path}] Event {event_count}: {event_type_name}") encoded = encoder.encode(event) logger.debug( diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py b/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py index 0f485739c9..bd2d989f2a 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py @@ -6,12 +6,9 @@ from agent_framework import ( ChatResponseUpdate, - ErrorContent, + Content, FinishReason, - FunctionCallContent, - FunctionResultContent, Role, - TextContent, ) @@ -117,7 +114,7 @@ def _handle_text_message_content(self, event: dict[str, Any]) -> ChatResponseUpd return ChatResponseUpdate( role=Role.ASSISTANT, message_id=self.current_message_id, - contents=[TextContent(text=delta)], + contents=[Content.from_text(text=delta)], ) def _handle_text_message_end(self, event: dict[str, Any]) -> ChatResponseUpdate | None: @@ -133,7 +130,7 @@ def _handle_tool_call_start(self, event: dict[str, Any]) -> ChatResponseUpdate: return ChatResponseUpdate( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id=self.current_tool_call_id or "", name=self.current_tool_name or "", arguments="", @@ -149,7 +146,7 @@ def _handle_tool_call_args(self, event: dict[str, Any]) -> ChatResponseUpdate: return ChatResponseUpdate( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id=self.current_tool_call_id or "", name=self.current_tool_name or "", arguments=delta, @@ -170,7 +167,7 @@ def _handle_tool_call_result(self, event: dict[str, Any]) -> ChatResponseUpdate: return ChatResponseUpdate( role=Role.TOOL, contents=[ - FunctionResultContent( + Content.from_function_result( call_id=tool_call_id, result=result, ) @@ -197,7 +194,7 @@ def _handle_run_error(self, event: dict[str, Any]) -> ChatResponseUpdate: role=Role.ASSISTANT, finish_reason=FinishReason.CONTENT_FILTER, contents=[ - ErrorContent( + Content.from_error( message=error_message, error_code="RUN_ERROR", ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_events.py b/python/packages/ag-ui/agent_framework_ag_ui/_events.py deleted file mode 100644 index ddf3ebba01..0000000000 --- a/python/packages/ag-ui/agent_framework_ag_ui/_events.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Event bridge for converting Agent Framework events to AG-UI protocol.""" - -import json -import logging -import re -from copy import deepcopy -from typing import Any - -from ag_ui.core import ( - BaseEvent, - CustomEvent, - RunFinishedEvent, - RunStartedEvent, - StateDeltaEvent, - StateSnapshotEvent, - TextMessageContentEvent, - TextMessageEndEvent, - TextMessageStartEvent, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallResultEvent, - ToolCallStartEvent, -) -from agent_framework import ( - AgentResponseUpdate, - FunctionApprovalRequestContent, - FunctionCallContent, - FunctionResultContent, - TextContent, - prepare_function_call_results, -) - -from ._utils import extract_state_from_tool_args, generate_event_id, safe_json_parse - -logger = logging.getLogger(__name__) - - -class AgentFrameworkEventBridge: - """Converts Agent Framework responses to AG-UI events.""" - - def __init__( - self, - run_id: str, - thread_id: str, - predict_state_config: dict[str, dict[str, str]] | None = None, - current_state: dict[str, Any] | None = None, - skip_text_content: bool = False, - require_confirmation: bool = True, - approval_tool_name: str | None = None, - ) -> None: - """ - Initialize the event bridge. - - Args: - run_id: The run identifier. - thread_id: The thread identifier. - predict_state_config: Configuration for predictive state updates. - Format: {"state_key": {"tool": "tool_name", "tool_argument": "arg_name"}} - current_state: Reference to the current state dict for tracking updates. - skip_text_content: If True, skip emitting TextMessageContentEvents (for structured outputs). - require_confirmation: Whether predictive state updates require user confirmation. - """ - self.run_id = run_id - self.thread_id = thread_id - self.current_message_id: str | None = None - self.current_tool_call_id: str | None = None - self.current_tool_call_name: str | None = None # Track the tool name across streaming chunks - self.predict_state_config = predict_state_config or {} - self.current_state = current_state or {} - self.pending_state_updates: dict[str, Any] = {} # Track updates from tool calls - self.skip_text_content = skip_text_content - self.require_confirmation = require_confirmation - self.approval_tool_name = approval_tool_name - - # For predictive state updates: accumulate streaming arguments - self.streaming_tool_args: str = "" # Accumulated JSON string - self.last_emitted_state: dict[str, Any] = {} # Track last emitted state to avoid duplicates - self.state_delta_count: int = 0 # Counter for sampling log output - self.should_stop_after_confirm: bool = False # Flag to stop run after confirm_changes - self.suppressed_summary: str = "" # Store LLM summary to show after confirmation - - async def from_agent_run_update(self, update: AgentResponseUpdate) -> list[BaseEvent]: - """ - Convert an AgentResponseUpdate to AG-UI events. - - Args: - update: The agent run update to convert. - - Returns: - List of AG-UI events. - """ - events: list[BaseEvent] = [] - - logger.info(f"Processing AgentRunUpdate with {len(update.contents)} content items") - for idx, content in enumerate(update.contents): - logger.info(f" Content {idx}: type={type(content).__name__}") - if isinstance(content, TextContent): - events.extend(self._handle_text_content(content)) - elif isinstance(content, FunctionCallContent): - events.extend(self._handle_function_call_content(content)) - elif isinstance(content, FunctionResultContent): - events.extend(self._handle_function_result_content(content)) - elif isinstance(content, FunctionApprovalRequestContent): - events.extend(self._handle_function_approval_request_content(content)) - - return events - - def _handle_text_content(self, content: TextContent) -> list[BaseEvent]: - events: list[BaseEvent] = [] - logger.info(f" TextContent found: length={len(content.text)}") - logger.info( - " Flags: skip_text_content=%s, should_stop_after_confirm=%s", - self.skip_text_content, - self.should_stop_after_confirm, - ) - - if self.skip_text_content: - logger.info(" SKIPPING TextContent: skip_text_content is True") - return events - - if self.should_stop_after_confirm: - logger.info(" SKIPPING TextContent: waiting for confirm_changes response") - self.suppressed_summary += content.text - logger.info(f" Suppressed summary length={len(self.suppressed_summary)}") - return events - - # Skip empty text chunks to avoid emitting - # TextMessageContentEvent with an empty `delta` which fails - # Pydantic validation (AG-UI requires non-empty strings). - if not content.text: - logger.info(" SKIPPING TextContent: empty chunk") - return events - - if not self.current_message_id: - self.current_message_id = generate_event_id() - start_event = TextMessageStartEvent( - message_id=self.current_message_id, - role="assistant", - ) - logger.info(f" EMITTING TextMessageStartEvent with message_id={self.current_message_id}") - events.append(start_event) - - event = TextMessageContentEvent( - message_id=self.current_message_id, - delta=content.text, - ) - logger.info(f" EMITTING TextMessageContentEvent with text_len={len(content.text)}") - events.append(event) - return events - - def _handle_function_call_content(self, content: FunctionCallContent) -> list[BaseEvent]: - events: list[BaseEvent] = [] - if content.name: - logger.debug(f"Tool call: {content.name} (call_id: {content.call_id})") - - if not content.name and not content.call_id and not self.current_tool_call_name: - args_length = len(str(content.arguments)) if content.arguments else 0 - logger.warning(f"FunctionCallContent missing name and call_id. args_length={args_length}") - - tool_call_id = self._coalesce_tool_call_id(content) - # Only emit ToolCallStartEvent once per tool call (when it's a new tool call) - if content.name and tool_call_id != self.current_tool_call_id: - self.streaming_tool_args = "" - self.state_delta_count = 0 - self.current_tool_call_id = tool_call_id - self.current_tool_call_name = content.name - - tool_start_event = ToolCallStartEvent( - tool_call_id=tool_call_id, - tool_call_name=content.name, - parent_message_id=self.current_message_id, - ) - logger.info(f"Emitting ToolCallStartEvent with name='{content.name}', id='{tool_call_id}'") - events.append(tool_start_event) - elif tool_call_id: - self.current_tool_call_id = tool_call_id - - if content.arguments: - delta_str = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments) - logger.info(f"Emitting ToolCallArgsEvent with delta_length={len(delta_str)}, id='{tool_call_id}'") - args_event = ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=delta_str, - ) - events.append(args_event) - - events.extend(self._emit_predictive_state_deltas(delta_str)) - - return events - - def _coalesce_tool_call_id(self, content: FunctionCallContent) -> str: - if content.call_id: - return content.call_id - if self.current_tool_call_id: - return self.current_tool_call_id - return generate_event_id() - - def _emit_predictive_state_deltas(self, argument_chunk: str) -> list[BaseEvent]: - events: list[BaseEvent] = [] - if not self.current_tool_call_name or not self.predict_state_config: - return events - - self.streaming_tool_args += argument_chunk - logger.debug( - "Predictive state: accumulated %s chars for tool '%s'", - len(self.streaming_tool_args), - self.current_tool_call_name, - ) - - parsed_args = safe_json_parse(self.streaming_tool_args) - if parsed_args is None: - for state_key, config in self.predict_state_config.items(): - if config["tool"] != self.current_tool_call_name: - continue - tool_arg_name = config["tool_argument"] - pattern = rf'"{re.escape(tool_arg_name)}":\s*"([^"]*)' - match = re.search(pattern, self.streaming_tool_args) - - if match: - partial_value = match.group(1).replace("\\n", "\n").replace('\\"', '"').replace("\\\\", "\\") - - if state_key not in self.last_emitted_state or self.last_emitted_state[state_key] != partial_value: - state_delta_event = StateDeltaEvent( - delta=[ - { - "op": "replace", - "path": f"/{state_key}", - "value": partial_value, - } - ], - ) - - self.state_delta_count += 1 - if self.state_delta_count % 10 == 1: - logger.info( - "StateDeltaEvent #%s for '%s': op=replace, path=/%s, value_length=%s", - self.state_delta_count, - state_key, - state_key, - len(str(partial_value)), - ) - elif self.state_delta_count % 100 == 0: - logger.info(f"StateDeltaEvent #{self.state_delta_count} emitted") - - events.append(state_delta_event) - self.last_emitted_state[state_key] = partial_value - self.pending_state_updates[state_key] = partial_value - - if parsed_args: - for state_key, config in self.predict_state_config.items(): - if config["tool"] != self.current_tool_call_name: - continue - tool_arg_name = config["tool_argument"] - - state_value = extract_state_from_tool_args(parsed_args, tool_arg_name) - if state_value is None: - continue - - if state_key not in self.last_emitted_state or self.last_emitted_state[state_key] != state_value: - state_delta_event = StateDeltaEvent( - delta=[ - { - "op": "replace", - "path": f"/{state_key}", - "value": state_value, - } - ], - ) - - self.state_delta_count += 1 - if self.state_delta_count % 10 == 1: - logger.info( - "StateDeltaEvent #%s for '%s': op=replace, path=/%s, value_length=%s", - self.state_delta_count, - state_key, - state_key, - len(str(state_value)), - ) - elif self.state_delta_count % 100 == 0: - logger.info(f"StateDeltaEvent #{self.state_delta_count} emitted") - - events.append(state_delta_event) - self.last_emitted_state[state_key] = state_value - self.pending_state_updates[state_key] = state_value - return events - - def _handle_function_result_content(self, content: FunctionResultContent) -> list[BaseEvent]: - events: list[BaseEvent] = [] - if content.call_id: - end_event = ToolCallEndEvent( - tool_call_id=content.call_id, - ) - logger.info(f"Emitting ToolCallEndEvent for completed tool call '{content.call_id}'") - events.append(end_event) - - if self.state_delta_count > 0: - logger.info( - "Tool call '%s' complete: emitted %s StateDeltaEvents total", - content.call_id, - self.state_delta_count, - ) - - self.streaming_tool_args = "" - self.state_delta_count = 0 - - result_message_id = generate_event_id() - result_content = prepare_function_call_results(content.result) - - result_event = ToolCallResultEvent( - message_id=result_message_id, - tool_call_id=content.call_id, - content=result_content, - role="tool", - ) - events.append(result_event) - events.extend(self._emit_state_snapshot_and_confirmation()) - - return events - - def _emit_state_snapshot_and_confirmation(self) -> list[BaseEvent]: - events: list[BaseEvent] = [] - if self.pending_state_updates: - for key, value in self.pending_state_updates.items(): - self.current_state[key] = value - - logger.info(f"Emitting StateSnapshotEvent with keys: {list(self.current_state.keys())}") - if "recipe" in self.current_state: - recipe = self.current_state["recipe"] - logger.info( - "Recipe fields: title=%s, skill_level=%s, ingredients_count=%s, instructions_count=%s", - recipe.get("title"), - recipe.get("skill_level"), - len(recipe.get("ingredients", [])), - len(recipe.get("instructions", [])), - ) - - state_snapshot_event = StateSnapshotEvent( - snapshot=self.current_state, - ) - events.append(state_snapshot_event) - - tool_was_predictive = False - logger.debug( - "Checking predictive state: current_tool='%s', predict_config=%s", - self.current_tool_call_name, - list(self.predict_state_config.keys()) if self.predict_state_config else "None", - ) - for state_key, config in self.predict_state_config.items(): - if self.current_tool_call_name and config["tool"] == self.current_tool_call_name: - logger.info( - "Tool '%s' matches predictive config for state key '%s'", - self.current_tool_call_name, - state_key, - ) - tool_was_predictive = True - break - - if tool_was_predictive and self.require_confirmation: - events.extend(self._emit_confirm_changes_tool_call()) - elif tool_was_predictive: - logger.info("Skipping confirm_changes - require_confirmation is False") - - self.pending_state_updates.clear() - self.last_emitted_state = deepcopy(self.current_state) - self.current_tool_call_name = None - return events - - def _emit_confirm_changes_tool_call(self, function_call: FunctionCallContent | None = None) -> list[BaseEvent]: - """Emit a confirm_changes tool call for Dojo UI compatibility. - - Args: - function_call: Optional function call that needs confirmation. - If provided, includes function info in the confirm_changes args - so Dojo UI can display what's being confirmed. - """ - events: list[BaseEvent] = [] - confirm_call_id = generate_event_id() - logger.info("Emitting confirm_changes tool call for predictive update") - - confirm_start = ToolCallStartEvent( - tool_call_id=confirm_call_id, - tool_call_name="confirm_changes", - parent_message_id=self.current_message_id, - ) - events.append(confirm_start) - - # Include function info if this is for a function approval - # This helps Dojo UI display meaningful confirmation info - if function_call: - args_dict = { - "function_name": function_call.name, - "function_call_id": function_call.call_id, - "function_arguments": function_call.parse_arguments() or {}, - "steps": [ - { - "description": f"Execute {function_call.name}", - "status": "enabled", - } - ], - } - args_json = json.dumps(args_dict) - else: - args_json = "{}" - - confirm_args = ToolCallArgsEvent( - tool_call_id=confirm_call_id, - delta=args_json, - ) - events.append(confirm_args) - - confirm_end = ToolCallEndEvent( - tool_call_id=confirm_call_id, - ) - events.append(confirm_end) - - self.should_stop_after_confirm = True - logger.info("Set flag to stop run after confirm_changes") - return events - - def _emit_function_approval_tool_call(self, function_call: FunctionCallContent) -> list[BaseEvent]: - """Emit a tool call that can drive UI approval for function requests.""" - tool_call_name = "confirm_changes" - if self.approval_tool_name and self.approval_tool_name != function_call.name: - tool_call_name = self.approval_tool_name - - tool_call_id = generate_event_id() - tool_start = ToolCallStartEvent( - tool_call_id=tool_call_id, - tool_call_name=tool_call_name, - parent_message_id=self.current_message_id, - ) - events: list[BaseEvent] = [tool_start] - - args_dict = { - "function_name": function_call.name, - "function_call_id": function_call.call_id, - "function_arguments": function_call.parse_arguments() or {}, - "steps": [ - { - "description": f"Execute {function_call.name}", - "status": "enabled", - } - ], - } - args_json = json.dumps(args_dict) - - events.append( - ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=args_json, - ) - ) - events.append( - ToolCallEndEvent( - tool_call_id=tool_call_id, - ) - ) - - self.should_stop_after_confirm = True - logger.info("Set flag to stop run after confirm_changes") - return events - - def _handle_function_approval_request_content(self, content: FunctionApprovalRequestContent) -> list[BaseEvent]: - events: list[BaseEvent] = [] - logger.info("=== FUNCTION APPROVAL REQUEST ===") - logger.info(f" Function: {content.function_call.name}") - logger.info(f" Call ID: {content.function_call.call_id}") - - parsed_args = content.function_call.parse_arguments() - parsed_arg_keys = list(parsed_args.keys()) if parsed_args else "None" - logger.info(f" Parsed args keys: {parsed_arg_keys}") - - if parsed_args and self.predict_state_config: - logger.info( - " Checking predict_state_config keys: %s", - list(self.predict_state_config.keys()) if self.predict_state_config else "None", - ) - for state_key, config in self.predict_state_config.items(): - if config["tool"] != content.function_call.name: - continue - tool_arg_name = config["tool_argument"] - logger.info( - " MATCHED tool '%s' for state key '%s', arg='%s'", - content.function_call.name, - state_key, - tool_arg_name, - ) - - state_value = extract_state_from_tool_args(parsed_args, tool_arg_name) - if state_value is None: - logger.warning(f" Tool argument '{tool_arg_name}' not found in parsed args") - continue - - self.current_state[state_key] = state_value - logger.info("Emitting StateSnapshotEvent for key '%s', value type: %s", state_key, type(state_value)) # type: ignore - state_snapshot = StateSnapshotEvent( - snapshot=self.current_state, - ) - events.append(state_snapshot) - - if content.function_call.call_id: - end_event = ToolCallEndEvent( - tool_call_id=content.function_call.call_id, - ) - logger.info(f"Emitting ToolCallEndEvent for approval-required tool '{content.function_call.call_id}'") - events.append(end_event) - - # Emit the function_approval_request custom event for UI implementations that support it - approval_event = CustomEvent( - name="function_approval_request", - value={ - "id": content.id, - "function_call": { - "call_id": content.function_call.call_id, - "name": content.function_call.name, - "arguments": content.function_call.parse_arguments(), - }, - }, - ) - logger.info(f"Emitting function_approval_request custom event for '{content.function_call.name}'") - events.append(approval_event) - - # Emit a UI-friendly approval tool call for function approvals. - if self.require_confirmation: - events.extend(self._emit_function_approval_tool_call(content.function_call)) - - # Signal orchestrator to stop the run and wait for user approval response - self.should_stop_after_confirm = True - logger.info("Set flag to stop run - waiting for function approval response") - return events - - def create_run_started_event(self) -> RunStartedEvent: - """Create a run started event.""" - return RunStartedEvent( - run_id=self.run_id, - thread_id=self.thread_id, - ) - - def create_run_finished_event(self, result: Any = None) -> RunFinishedEvent: - """Create a run finished event.""" - return RunFinishedEvent( - run_id=self.run_id, - thread_id=self.thread_id, - result=result, - ) - - def create_message_start_event(self, message_id: str, role: str = "assistant") -> TextMessageStartEvent: - """Create a message start event.""" - return TextMessageStartEvent( - message_id=message_id, - role=role, # type: ignore - ) - - def create_message_end_event(self, message_id: str) -> TextMessageEndEvent: - """Create a message end event.""" - return TextMessageEndEvent( - message_id=message_id, - ) - - def create_state_snapshot_event(self, state: dict[str, Any]) -> StateSnapshotEvent: - """Create a state snapshot event. - - Args: - state: The complete state snapshot. - - Returns: - StateSnapshotEvent. - """ - return StateSnapshotEvent( - snapshot=state, - ) - - def create_state_delta_event(self, delta: list[dict[str, Any]]) -> StateDeltaEvent: - """Create a state delta event using JSON Patch format (RFC 6902). - - Args: - delta: List of JSON Patch operations. - - Returns: - StateDeltaEvent. - """ - return StateDeltaEvent( - delta=delta, - ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py index 1ff858e9f5..f8f1623a30 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py @@ -8,11 +8,8 @@ from agent_framework import ( ChatMessage, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, + Content, Role, - TextContent, prepare_function_call_results, ) @@ -40,11 +37,11 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: tool_ids = { str(content.call_id) for content in msg.contents or [] - if isinstance(content, FunctionCallContent) and content.call_id + if content.type == "function_call" and content.call_id } confirm_changes_call = None for content in msg.contents or []: - if isinstance(content, FunctionCallContent) and content.name == "confirm_changes": + if content.type == "function_call" and content.name == "confirm_changes": confirm_changes_call = content break @@ -59,7 +56,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: approval_call_ids: set[str] = set() approval_accepted: bool | None = None for content in msg.contents or []: - if type(content) is FunctionApprovalResponseContent: + if content.type == "function_approval_response": if content.function_call and content.function_call.call_id: approval_call_ids.add(str(content.function_call.call_id)) if approval_accepted is None: @@ -79,7 +76,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: synthetic_result = ChatMessage( role="tool", contents=[ - FunctionResultContent( + Content.from_function_result( call_id=pending_confirm_changes_id, result="Confirmed" if approval_accepted else "Rejected", ) @@ -93,12 +90,12 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: if pending_confirm_changes_id: user_text = "" for content in msg.contents or []: - if isinstance(content, TextContent): - user_text = content.text + if content.type == "text": + user_text = content.text # type: ignore[assignment] break try: - parsed = json.loads(user_text) + parsed = json.loads(user_text) # type: ignore[arg-type] if "accepted" in parsed: logger.info( f"Injecting synthetic tool result for confirm_changes call_id={pending_confirm_changes_id}" @@ -106,7 +103,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: synthetic_result = ChatMessage( role="tool", contents=[ - FunctionResultContent( + Content.from_function_result( call_id=pending_confirm_changes_id, result="Confirmed" if parsed.get("accepted") else "Rejected", ) @@ -130,7 +127,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: synthetic_result = ChatMessage( role="tool", contents=[ - FunctionResultContent( + Content.from_function_result( call_id=pending_call_id, result="Tool execution skipped - user provided follow-up message", ) @@ -149,7 +146,7 @@ def _sanitize_tool_history(messages: list[ChatMessage]) -> list[ChatMessage]: continue keep = False for content in msg.contents or []: - if isinstance(content, FunctionResultContent): + if content.type == "function_result" and content.call_id: call_id = str(content.call_id) if call_id in pending_tool_call_ids: keep = True @@ -175,7 +172,7 @@ def _deduplicate_messages(messages: list[ChatMessage]) -> list[ChatMessage]: for idx, msg in enumerate(messages): role_value = get_role_value(msg) - if role_value == "tool" and msg.contents and isinstance(msg.contents[0], FunctionResultContent): + if role_value == "tool" and msg.contents and msg.contents[0].type == "function_result": call_id = str(msg.contents[0].call_id) key: Any = (role_value, call_id) @@ -184,7 +181,7 @@ def _deduplicate_messages(messages: list[ChatMessage]) -> list[ChatMessage]: existing_msg = unique_messages[existing_idx] existing_result = None - if existing_msg.contents and isinstance(existing_msg.contents[0], FunctionResultContent): + if existing_msg.contents and existing_msg.contents[0].type == "function_result": existing_result = existing_msg.contents[0].result new_result = msg.contents[0].result @@ -198,11 +195,9 @@ def _deduplicate_messages(messages: list[ChatMessage]) -> list[ChatMessage]: seen_keys[key] = len(unique_messages) unique_messages.append(msg) - elif ( - role_value == "assistant" and msg.contents and any(isinstance(c, FunctionCallContent) for c in msg.contents) - ): + elif role_value == "assistant" and msg.contents and any(c.type == "function_call" for c in msg.contents): tool_call_ids = tuple( - sorted(str(c.call_id) for c in msg.contents if isinstance(c, FunctionCallContent) and c.call_id) + sorted(str(c.call_id) for c in msg.contents if c.type == "function_call" and c.call_id) ) key = (role_value, tool_call_ids) @@ -257,33 +252,29 @@ def _update_tool_call_arguments( tool_calls = raw_msg.get("tool_calls") or raw_msg.get("toolCalls") if not isinstance(tool_calls, list): continue - tool_calls_list = cast(list[Any], tool_calls) - for tool_call in tool_calls_list: + for tool_call in tool_calls: if not isinstance(tool_call, dict): continue - tool_call_dict = cast(dict[str, Any], tool_call) - if str(tool_call_dict.get("id", "")) != tool_call_id: + if str(tool_call.get("id", "")) != tool_call_id: continue - function_payload = tool_call_dict.get("function") + function_payload = tool_call.get("function") if not isinstance(function_payload, dict): return - function_payload_dict = cast(dict[str, Any], function_payload) - existing_args = function_payload_dict.get("arguments") + existing_args = function_payload.get("arguments") if isinstance(existing_args, str): - function_payload_dict["arguments"] = json.dumps(modified_args) + function_payload["arguments"] = json.dumps(modified_args) else: - function_payload_dict["arguments"] = modified_args + function_payload["arguments"] = modified_args return - def _find_matching_func_call(call_id: str) -> FunctionCallContent | None: + def _find_matching_func_call(call_id: str) -> Content | None: for prev_msg in result: role_val = prev_msg.role.value if hasattr(prev_msg.role, "value") else str(prev_msg.role) if role_val != "assistant": continue for content in prev_msg.contents or []: - if isinstance(content, FunctionCallContent): - if content.call_id == call_id and content.name != "confirm_changes": - return content + if content.type == "function_call" and content.call_id == call_id and content.name != "confirm_changes": + return content return None def _parse_arguments(arguments: Any) -> dict[str, Any] | None: @@ -301,9 +292,9 @@ def _resolve_approval_call_id(tool_call_id: str, parsed_payload: dict[str, Any] continue direct_call = None confirm_call = None - sibling_calls: list[FunctionCallContent] = [] + sibling_calls: list[Content] = [] for content in prev_msg.contents or []: - if not isinstance(content, FunctionCallContent): + if content.type != "function_call": continue if content.call_id == tool_call_id: direct_call = content @@ -407,7 +398,7 @@ def _filter_modified_args( if not ( (m.role.value if hasattr(m.role, "value") else str(m.role)) == "tool" and any( - isinstance(c, FunctionResultContent) and c.call_id == approval_call_id + c.type == "function_result" and c.call_id == approval_call_id for c in (m.contents or []) ) ) @@ -439,8 +430,7 @@ def _filter_modified_args( if desc: approved_by_description[str(desc)] = step_item_dict merged_steps: list[Any] = [] - original_steps_list = cast(list[Any], original_steps) - for orig_step in original_steps_list: + for orig_step in original_steps: if not isinstance(orig_step, dict): merged_steps.append(orig_step) continue @@ -465,9 +455,9 @@ def _filter_modified_args( matching_func_call.arguments = updated_args _update_tool_call_arguments(messages, str(approval_call_id), merged_args) # Create a new FunctionCallContent with the modified arguments - func_call_for_approval = FunctionCallContent( - call_id=matching_func_call.call_id, - name=matching_func_call.name, + func_call_for_approval = Content.from_function_call( + call_id=matching_func_call.call_id, # type: ignore[arg-type] + name=matching_func_call.name, # type: ignore[arg-type] arguments=json.dumps(filtered_args), ) logger.info(f"Using modified arguments from approval: {filtered_args}") @@ -476,7 +466,7 @@ def _filter_modified_args( func_call_for_approval = matching_func_call # Create FunctionApprovalResponseContent for the agent framework - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( approved=accepted, id=str(approval_call_id), function_call=func_call_for_approval, @@ -491,7 +481,7 @@ def _filter_modified_args( # Keep the old behavior for backwards compatibility chat_msg = ChatMessage( role=Role.USER, - contents=[TextContent(text=approval_payload_text)], + contents=[Content.from_text(text=approval_payload_text)], additional_properties={"is_tool_result": True, "tool_call_id": str(tool_call_id or "")}, ) if "id" in msg: @@ -504,14 +494,14 @@ def _filter_modified_args( if isinstance(result_content, str): func_result = result_content elif isinstance(result_content, dict): - func_result = cast(dict[str, Any], result_content) + func_result = result_content elif isinstance(result_content, list): - func_result = cast(list[Any], result_content) + func_result = result_content else: func_result = str(result_content) chat_msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id=str(tool_call_id), result=func_result)], + contents=[Content.from_function_result(call_id=str(tool_call_id), result=func_result)], ) if "id" in msg: chat_msg.message_id = msg["id"] @@ -527,21 +517,21 @@ def _filter_modified_args( chat_msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id=str(tool_call_id), result=result_content)], + contents=[Content.from_function_result(call_id=str(tool_call_id), result=result_content)], ) if "id" in msg: chat_msg.message_id = msg["id"] result.append(chat_msg) continue - # If assistant message includes tool calls, convert to FunctionCallContent(s) + # If assistant message includes tool calls, convert to Content.from_function_call(s) tool_calls = msg.get("tool_calls") or msg.get("toolCalls") if tool_calls: contents: list[Any] = [] # Include any assistant text content if present content_text = msg.get("content") if isinstance(content_text, str) and content_text: - contents.append(TextContent(text=content_text)) + contents.append(Content.from_text(text=content_text)) # Convert each tool call entry for tc in tool_calls: if not isinstance(tc, dict): @@ -558,7 +548,7 @@ def _filter_modified_args( arguments = func_dict.get("arguments") contents.append( - FunctionCallContent( + Content.from_function_call( call_id=call_id, name=name, arguments=arguments, @@ -580,14 +570,14 @@ def _filter_modified_args( approval_contents: list[Any] = [] for approval in msg["function_approvals"]: # Create FunctionCallContent with the modified arguments - func_call = FunctionCallContent( + func_call = Content.from_function_call( call_id=approval.get("call_id", ""), name=approval.get("name", ""), arguments=approval.get("arguments", {}), ) # Create the approval response - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( approved=approval.get("approved", True), id=approval.get("id", ""), function_call=func_call, @@ -599,9 +589,9 @@ def _filter_modified_args( # Regular text message content = msg.get("content", "") if isinstance(content, str): - chat_msg = ChatMessage(role=role, contents=[TextContent(text=content)]) + chat_msg = ChatMessage(role=role, contents=[Content.from_text(text=content)]) else: - chat_msg = ChatMessage(role=role, contents=[TextContent(text=str(content))]) + chat_msg = ChatMessage(role=role, contents=[Content.from_text(text=str(content))]) if "id" in msg: chat_msg.message_id = msg["id"] @@ -652,9 +642,9 @@ def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str tool_result_call_id: str | None = None for content in msg.contents: - if isinstance(content, TextContent): - content_text += content.text - elif isinstance(content, FunctionCallContent): + if content.type == "text": + content_text += content.text # type: ignore[operator] + elif content.type == "function_call": tool_calls.append( { "id": content.call_id, @@ -665,7 +655,7 @@ def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str }, } ) - elif isinstance(content, FunctionResultContent): + elif content.type == "function_result": # Tool result content - extract call_id and result tool_result_call_id = content.call_id # Serialize result to string using core utility @@ -702,8 +692,13 @@ def extract_text_from_contents(contents: list[Any]) -> str: """ text_parts: list[str] = [] for content in contents: - if isinstance(content, TextContent): - text_parts.append(content.text) + if type_ := getattr(content, "type", None): + if type_ == "text_reasoning": + continue + if text := getattr(content, "text", None): + text_parts.append(text) + continue + # TODO (moonbox3): should this handle both text and text_reasoning? elif hasattr(content, "text"): text_parts.append(content.text) return "".join(text_parts) @@ -735,40 +730,35 @@ def agui_messages_to_snapshot_format(messages: list[dict[str, Any]]) -> list[dic if isinstance(content, list): # Convert content array format to simple string text_parts: list[str] = [] - content_list = cast(list[Any], content) - for item in content_list: + for item in content: if isinstance(item, dict): - item_dict = cast(dict[str, Any], item) # Convert 'input_text' to 'text' type - if item_dict.get("type") == "input_text": - text_parts.append(str(item_dict.get("text", ""))) - elif item_dict.get("type") == "text": - text_parts.append(str(item_dict.get("text", ""))) + if item.get("type") == "input_text": + text_parts.append(str(item.get("text", ""))) + elif item.get("type") == "text": + text_parts.append(str(item.get("text", ""))) else: # Other types - just extract text field if present - text_parts.append(str(item_dict.get("text", ""))) + text_parts.append(str(item.get("text", ""))) normalized_msg["content"] = "".join(text_parts) elif content is None: normalized_msg["content"] = "" tool_calls = normalized_msg.get("tool_calls") or normalized_msg.get("toolCalls") if isinstance(tool_calls, list): - tool_calls_list = cast(list[Any], tool_calls) - for tool_call in tool_calls_list: + for tool_call in tool_calls: if not isinstance(tool_call, dict): continue - tool_call_dict = cast(dict[str, Any], tool_call) - function_payload = tool_call_dict.get("function") + function_payload = tool_call.get("function") if not isinstance(function_payload, dict): continue - function_payload_dict = cast(dict[str, Any], function_payload) - if "arguments" not in function_payload_dict: + if "arguments" not in function_payload: continue - arguments = function_payload_dict.get("arguments") + arguments = function_payload.get("arguments") if arguments is None: - function_payload_dict["arguments"] = "" + function_payload["arguments"] = "" elif not isinstance(arguments, str): - function_payload_dict["arguments"] = json.dumps(arguments) + function_payload["arguments"] = json.dumps(arguments) # Normalize tool_call_id to toolCallId for tool messages normalized_msg["role"] = normalize_agui_role(normalized_msg.get("role")) @@ -782,11 +772,3 @@ def agui_messages_to_snapshot_format(messages: list[dict[str, Any]]) -> list[dic result.append(normalized_msg) return result - - -__all__ = [ - "agui_messages_to_agent_framework", - "agent_framework_messages_to_agui", - "agui_messages_to_snapshot_format", - "extract_text_from_contents", -] diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py index ebf6ef6f57..f12430a086 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_helpers.py @@ -1,25 +1,21 @@ # Copyright (c) Microsoft. All rights reserved. -"""Helper functions for orchestration logic.""" +"""Helper functions for orchestration logic. + +Most orchestration helpers have been moved inline to _run.py. +This module retains utilities that may be useful for testing or extensions. +""" import json import logging -from typing import TYPE_CHECKING, Any +from typing import Any -from ag_ui.core import StateSnapshotEvent from agent_framework import ( ChatMessage, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, - TextContent, + Content, ) -from .._utils import get_role_value, safe_json_parse - -if TYPE_CHECKING: - from .._events import AgentFrameworkEventBridge - from ._state_manager import StateManager +from .._utils import get_role_value logger = logging.getLogger(__name__) @@ -37,9 +33,9 @@ def pending_tool_call_ids(messages: list[ChatMessage]) -> set[str]: resolved_ids: set[str] = set() for msg in messages: for content in msg.contents: - if isinstance(content, FunctionCallContent) and content.call_id: + if content.type == "function_call" and content.call_id: pending_ids.add(str(content.call_id)) - elif isinstance(content, FunctionResultContent) and content.call_id: + elif content.type == "function_result" and content.call_id: resolved_ids.add(str(content.call_id)) return pending_ids - resolved_ids @@ -56,7 +52,7 @@ def is_state_context_message(message: ChatMessage) -> bool: if get_role_value(message) != "system": return False for content in message.contents: - if isinstance(content, TextContent) and content.text.startswith("Current state of the application:"): + if content.type == "text" and content.text.startswith("Current state of the application:"): # type: ignore[union-attr] return True return False @@ -114,53 +110,6 @@ def tool_name_for_call_id( return str(name) if name else None -def tool_calls_match_state( - provider_messages: list[ChatMessage], - state_manager: "StateManager", -) -> bool: - """Check if tool calls in messages match current state. - - Args: - provider_messages: Messages to check - state_manager: State manager with config and current state - - Returns: - True if tool calls match state configuration - """ - if not state_manager.predict_state_config or not state_manager.current_state: - return False - - for state_key, config in state_manager.predict_state_config.items(): - tool_name = config["tool"] - tool_arg_name = config["tool_argument"] - tool_args: dict[str, Any] | None = None - - for msg in reversed(provider_messages): - if get_role_value(msg) != "assistant": - continue - for content in msg.contents: - if isinstance(content, FunctionCallContent) and content.name == tool_name: - tool_args = safe_json_parse(content.arguments) - break - if tool_args is not None: - break - - if not tool_args: - return False - - if tool_arg_name == "*": - state_value = tool_args - elif tool_arg_name in tool_args: - state_value = tool_args[tool_arg_name] - else: - return False - - if state_manager.current_state.get(state_key) != state_value: - return False - - return True - - def schema_has_steps(schema: Any) -> bool: """Check if a schema has a steps array property. @@ -205,45 +154,10 @@ def select_approval_tool_name(client_tools: list[Any] | None) -> str | None: return None -def select_messages_to_run( - provider_messages: list[ChatMessage], - state_manager: "StateManager", -) -> list[ChatMessage]: - """Select and prepare messages for agent execution. - - Injects state context message when appropriate. - - Args: - provider_messages: Original messages from client - state_manager: State manager instance - - Returns: - Messages ready for agent execution - """ - if not provider_messages: - return [] - - is_new_user_turn = get_role_value(provider_messages[-1]) == "user" - conversation_has_tool_calls = tool_calls_match_state(provider_messages, state_manager) - state_context_msg = state_manager.state_context_message( - is_new_user_turn=is_new_user_turn, conversation_has_tool_calls=conversation_has_tool_calls - ) - if not state_context_msg: - return list(provider_messages) - - messages_to_run = [msg for msg in provider_messages if not is_state_context_message(msg)] - if pending_tool_call_ids(messages_to_run): - return messages_to_run - - insert_index = len(messages_to_run) - 1 if is_new_user_turn else len(messages_to_run) - if insert_index < 0: - insert_index = 0 - messages_to_run.insert(insert_index, state_context_msg) - return messages_to_run - - def build_safe_metadata(thread_metadata: dict[str, Any] | None) -> dict[str, Any]: - """Build metadata dict with truncated string values. + """Build metadata dict with truncated string values for Azure compatibility. + + Azure has a 512 character limit per metadata value. Args: thread_metadata: Raw metadata dict @@ -262,64 +176,7 @@ def build_safe_metadata(thread_metadata: dict[str, Any] | None) -> dict[str, Any return safe_metadata -def collect_approved_state_snapshots( - provider_messages: list[ChatMessage], - predict_state_config: dict[str, dict[str, str]] | None, - current_state: dict[str, Any], - event_bridge: "AgentFrameworkEventBridge", -) -> list[StateSnapshotEvent]: - """Collect state snapshots from approved function calls. - - Args: - provider_messages: Messages containing approvals - predict_state_config: Predictive state configuration - current_state: Current state dict (will be mutated) - event_bridge: Event bridge for creating events - - Returns: - List of state snapshot events - """ - if not predict_state_config: - return [] - - events: list[StateSnapshotEvent] = [] - for msg in provider_messages: - if get_role_value(msg) != "user": - continue - for content in msg.contents: - if type(content) is FunctionApprovalResponseContent: - if not content.function_call or not content.approved: - continue - parsed_args = content.function_call.parse_arguments() - state_args = None - if content.additional_properties: - state_args = content.additional_properties.get("ag_ui_state_args") - if not isinstance(state_args, dict): - state_args = parsed_args - if not state_args: - continue - for state_key, config in predict_state_config.items(): - if config["tool"] != content.function_call.name: - continue - tool_arg_name = config["tool_argument"] - if tool_arg_name == "*": - state_value = state_args - elif isinstance(state_args, dict) and tool_arg_name in state_args: - state_value = state_args[tool_arg_name] - else: - continue - current_state[state_key] = state_value - event_bridge.current_state[state_key] = state_value - logger.info( - f"Emitting StateSnapshotEvent for approved state key '{state_key}' " - f"with {len(state_value) if isinstance(state_value, list) else 'N/A'} items" - ) - events.append(StateSnapshotEvent(snapshot=current_state)) - break - return events - - -def latest_approval_response(messages: list[ChatMessage]) -> FunctionApprovalResponseContent | None: +def latest_approval_response(messages: list[ChatMessage]) -> Content | None: """Get the latest approval response from messages. Args: @@ -332,12 +189,12 @@ def latest_approval_response(messages: list[ChatMessage]) -> FunctionApprovalRes return None last_message = messages[-1] for content in last_message.contents: - if type(content) is FunctionApprovalResponseContent: + if content.type == "function_approval_response": return content return None -def approval_steps(approval: FunctionApprovalResponseContent) -> list[Any]: +def approval_steps(approval: Content) -> list[Any]: """Extract steps from an approval response. Args: @@ -346,9 +203,7 @@ def approval_steps(approval: FunctionApprovalResponseContent) -> list[Any]: Returns: List of steps, or empty list if none """ - state_args: Any | None = None - if approval.additional_properties: - state_args = approval.additional_properties.get("ag_ui_state_args") + state_args = approval.additional_properties.get("ag_ui_state_args", None) if isinstance(state_args, dict): steps = state_args.get("steps") if isinstance(steps, list): @@ -365,7 +220,7 @@ def approval_steps(approval: FunctionApprovalResponseContent) -> list[Any]: def is_step_based_approval( - approval: FunctionApprovalResponseContent, + approval: Content, predict_state_config: dict[str, dict[str, str]] | None, ) -> bool: """Check if an approval is step-based. diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_state_manager.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_state_manager.py deleted file mode 100644 index 7d8a23d84c..0000000000 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_state_manager.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""State orchestration utilities.""" - -import json -from typing import Any - -from ag_ui.core import CustomEvent, EventType -from agent_framework import ChatMessage, TextContent - - -class StateManager: - """Coordinates state defaults, snapshots, and structured updates.""" - - def __init__( - self, - state_schema: dict[str, Any] | None, - predict_state_config: dict[str, dict[str, str]] | None, - require_confirmation: bool, - ) -> None: - self.state_schema = state_schema or {} - self.predict_state_config = predict_state_config or {} - self.require_confirmation = require_confirmation - self.current_state: dict[str, Any] = {} - self._state_from_input: bool = False - - def initialize(self, initial_state: dict[str, Any] | None) -> dict[str, Any]: - """Initialize state with schema defaults.""" - self._state_from_input = initial_state is not None - self.current_state = (initial_state or {}).copy() - self._apply_schema_defaults() - return self.current_state - - def predict_state_event(self) -> CustomEvent | None: - """Create predict-state custom event when configured.""" - if not self.predict_state_config: - return None - - predict_state_value = [ - { - "state_key": state_key, - "tool": config["tool"], - "tool_argument": config["tool_argument"], - } - for state_key, config in self.predict_state_config.items() - ] - - return CustomEvent( - type=EventType.CUSTOM, - name="PredictState", - value=predict_state_value, - ) - - def initial_snapshot_event(self, event_bridge: Any) -> Any: - """Emit initial snapshot when schema and state present.""" - if not self.state_schema: - return None - self._apply_schema_defaults() - return event_bridge.create_state_snapshot_event(self.current_state) - - def state_context_message(self, is_new_user_turn: bool, conversation_has_tool_calls: bool) -> ChatMessage | None: - """Inject state context only when starting a new user turn.""" - if not self.current_state or not self.state_schema: - return None - if not is_new_user_turn: - return None - if conversation_has_tool_calls and not self._state_from_input: - return None - - state_json = json.dumps(self.current_state, indent=2) - return ChatMessage( - role="system", - contents=[ - TextContent( - text=( - "Current state of the application:\n" - f"{state_json}\n\n" - "When modifying state, you MUST include ALL existing data plus your changes.\n" - "For example, if adding one new item to a list, include ALL existing items PLUS the one new item.\n" - "Never replace existing data - always preserve and append or merge." - ) - ) - ], - ) - - def extract_state_updates(self, response_dict: dict[str, Any]) -> dict[str, Any]: - """Extract state updates from structured response payloads.""" - if self.state_schema: - return {key: response_dict[key] for key in self.state_schema.keys() if key in response_dict} - return {k: v for k, v in response_dict.items() if k != "message"} - - def apply_state_updates(self, updates: dict[str, Any]) -> None: - """Merge state updates into current state.""" - if not updates: - return - self.current_state.update(updates) - - def _apply_schema_defaults(self) -> None: - """Fill missing state fields based on schema hints.""" - for key, schema in self.state_schema.items(): - if key in self.current_state: - continue - if isinstance(schema, dict) and schema.get("type") == "array": # type: ignore - self.current_state[key] = [] - else: - self.current_state[key] = {} diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py index 0f86516448..5df6cd1d14 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestration/_tooling.py @@ -84,9 +84,26 @@ def register_additional_client_tools(agent: "AgentProtocol", client_tools: list[ logger.debug(f"[TOOLS] Registered {len(client_tools)} client tools as additional_tools (declaration-only)") +def _has_approval_tools(tools: list[Any]) -> bool: + """Check if any tools require approval.""" + return any(getattr(tool, "approval_mode", None) == "always_require" for tool in tools) + + def merge_tools(server_tools: list[Any], client_tools: list[Any] | None) -> list[Any] | None: - """Combine server and client tools without overriding server metadata.""" + """Combine server and client tools without overriding server metadata. + + IMPORTANT: When server tools have approval_mode="always_require", we MUST return + them so they get passed to the streaming response handler. Otherwise, the approval + check in _try_execute_function_calls won't find the tool and won't trigger approval. + """ if not client_tools: + # Even without client tools, we must pass server tools if any require approval + if server_tools and _has_approval_tools(server_tools): + logger.info( + f"[TOOLS] No client tools but server has approval tools - " + f"passing {len(server_tools)} server tools for approval mode" + ) + return server_tools logger.info("[TOOLS] No client tools - not passing tools= parameter (using agent's configured tools)") return None @@ -94,6 +111,13 @@ def merge_tools(server_tools: list[Any], client_tools: list[Any] | None) -> list unique_client_tools = [tool for tool in client_tools if getattr(tool, "name", None) not in server_tool_names] if not unique_client_tools: + # Same check: must pass server tools if any require approval + if server_tools and _has_approval_tools(server_tools): + logger.info( + f"[TOOLS] Client tools duplicate server but server has approval tools - " + f"passing {len(server_tools)} server tools for approval mode" + ) + return server_tools logger.info("[TOOLS] All client tools duplicate server tools - not passing tools= parameter") return None diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py deleted file mode 100644 index b5566f0aec..0000000000 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py +++ /dev/null @@ -1,807 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Orchestrators for multi-turn agent flows.""" - -import json -import logging -import uuid -from abc import ABC, abstractmethod -from collections.abc import AsyncGenerator, Sequence -from typing import TYPE_CHECKING, Any - -from ag_ui.core import ( - BaseEvent, - MessagesSnapshotEvent, - RunErrorEvent, - TextMessageContentEvent, - TextMessageEndEvent, - TextMessageStartEvent, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallResultEvent, - ToolCallStartEvent, -) -from agent_framework import ( - AgentProtocol, - AgentThread, - ChatAgent, - FunctionCallContent, - FunctionResultContent, - TextContent, -) -from agent_framework._middleware import extract_and_merge_function_middleware -from agent_framework._tools import ( - FunctionInvocationConfiguration, - _collect_approval_responses, # type: ignore - _replace_approval_contents_with_results, # type: ignore - _try_execute_function_calls, # type: ignore -) - -from ._orchestration._helpers import ( - approval_steps, - build_safe_metadata, - collect_approved_state_snapshots, - ensure_tool_call_entry, - is_step_based_approval, - latest_approval_response, - select_approval_tool_name, - select_messages_to_run, - tool_name_for_call_id, -) -from ._orchestration._tooling import ( - collect_server_tools, - merge_tools, - register_additional_client_tools, -) -from ._utils import ( - convert_agui_tools_to_agent_framework, - generate_event_id, - get_conversation_id_from_update, - get_role_value, -) - -if TYPE_CHECKING: - from ._agent import AgentConfig - from ._confirmation_strategies import ConfirmationStrategy - from ._events import AgentFrameworkEventBridge - from ._orchestration._state_manager import StateManager - - -logger = logging.getLogger(__name__) - - -class ExecutionContext: - """Shared context for orchestrators.""" - - def __init__( - self, - input_data: dict[str, Any], - agent: AgentProtocol, - config: "AgentConfig", # noqa: F821 - confirmation_strategy: "ConfirmationStrategy | None" = None, # noqa: F821 - ): - """Initialize execution context. - - Args: - input_data: AG-UI run input containing messages, state, etc. - agent: The Agent Framework agent to execute - config: Agent configuration - confirmation_strategy: Strategy for generating confirmation messages - """ - self.input_data = input_data - self.agent = agent - self.config = config - self.confirmation_strategy = confirmation_strategy - - # Lazy-loaded properties - self._messages = None - self._snapshot_messages = None - self._last_message = None - self._run_id: str | None = None - self._thread_id: str | None = None - self._supplied_run_id: str | None = None - self._supplied_thread_id: str | None = None - - @property - def messages(self): - """Get converted Agent Framework messages (lazy loaded).""" - if self._messages is None: - from ._message_adapters import normalize_agui_input_messages - - raw = self.input_data.get("messages", []) - if not isinstance(raw, list): - raw = [] - self._messages, self._snapshot_messages = normalize_agui_input_messages(raw) - return self._messages - - @property - def snapshot_messages(self) -> list[dict[str, Any]]: - """Get normalized AG-UI snapshot messages (lazy loaded).""" - if self._snapshot_messages is None: - if self._messages is None: - _ = self.messages - else: - from ._message_adapters import agent_framework_messages_to_agui, agui_messages_to_snapshot_format - - raw_snapshot = agent_framework_messages_to_agui(self._messages) - self._snapshot_messages = agui_messages_to_snapshot_format(raw_snapshot) - return self._snapshot_messages or [] - - @property - def last_message(self): - """Get the last message in the conversation (lazy loaded).""" - if self._last_message is None and self.messages: - self._last_message = self.messages[-1] - return self._last_message - - @property - def supplied_run_id(self) -> str | None: - """Get the supplied run ID, if any.""" - if self._supplied_run_id is None: - self._supplied_run_id = self.input_data.get("run_id") or self.input_data.get("runId") - return self._supplied_run_id - - @property - def run_id(self) -> str: - """Get supplied run ID or generate a new run ID.""" - if self._run_id: - return self._run_id - - if self.supplied_run_id: - self._run_id = self.supplied_run_id - - if self._run_id is None: - self._run_id = str(uuid.uuid4()) - - return self._run_id - - @property - def supplied_thread_id(self) -> str | None: - """Get the supplied thread ID, if any.""" - if self._supplied_thread_id is None: - self._supplied_thread_id = self.input_data.get("thread_id") or self.input_data.get("threadId") - return self._supplied_thread_id - - @property - def thread_id(self) -> str: - """Get supplied thread ID or generate a new thread ID.""" - if self._thread_id: - return self._thread_id - - if self.supplied_thread_id: - self._thread_id = self.supplied_thread_id - - if self._thread_id is None: - self._thread_id = str(uuid.uuid4()) - - return self._thread_id - - def update_run_id(self, new_run_id: str) -> None: - """Update the run ID in the context. - - Args: - new_run_id: The new run ID to set - """ - self._supplied_run_id = new_run_id - self._run_id = new_run_id - - def update_thread_id(self, new_thread_id: str) -> None: - """Update the thread ID in the context. - - Args: - new_thread_id: The new thread ID to set - """ - self._supplied_thread_id = new_thread_id - self._thread_id = new_thread_id - - -class Orchestrator(ABC): - """Base orchestrator for agent execution flows.""" - - @abstractmethod - def can_handle(self, context: ExecutionContext) -> bool: - """Determine if this orchestrator handles the current request. - - Args: - context: Execution context with input data and agent - - Returns: - True if this orchestrator should handle the request - """ - ... - - @abstractmethod - async def run( - self, - context: ExecutionContext, - ) -> AsyncGenerator[BaseEvent, None]: - """Execute the orchestration and yield events. - - Args: - context: Execution context - - Yields: - AG-UI events - """ - # This is never executed - just satisfies mypy's requirement for async generators - if False: # pragma: no cover - yield - raise NotImplementedError - - -class HumanInTheLoopOrchestrator(Orchestrator): - """Handles tool approval responses from user.""" - - def can_handle(self, context: ExecutionContext) -> bool: - """Check if last message is a tool approval response. - - Args: - context: Execution context - - Returns: - True if last message is a tool result - """ - msg = context.last_message - if not msg: - return False - - return bool(msg.additional_properties.get("is_tool_result", False)) - - async def run( - self, - context: ExecutionContext, - ) -> AsyncGenerator[BaseEvent, None]: - """Process approval response and generate confirmation events. - - This implementation is extracted from the legacy _agent.py lines 144-244. - - Args: - context: Execution context - - Yields: - AG-UI events (TextMessage, RunFinished) - """ - from ._confirmation_strategies import DefaultConfirmationStrategy - from ._events import AgentFrameworkEventBridge - - logger.info("=== TOOL RESULT DETECTED (HumanInTheLoopOrchestrator) ===") - - # Create event bridge for run events - event_bridge = AgentFrameworkEventBridge( - run_id=context.run_id, - thread_id=context.thread_id, - ) - - # CRITICAL: Every AG-UI run must start with RunStartedEvent - yield event_bridge.create_run_started_event() - - # Get confirmation strategy (use default if none provided) - strategy = context.confirmation_strategy - if strategy is None: - strategy = DefaultConfirmationStrategy() - - # Parse the tool result content - tool_content_text = "" - last_message = context.last_message - if last_message: - for content in last_message.contents: - if isinstance(content, TextContent): - tool_content_text = content.text - break - - try: - tool_result = json.loads(tool_content_text) - accepted = tool_result.get("accepted", False) - steps = tool_result.get("steps", []) - - logger.info(f" Accepted: {accepted}") - logger.info(f" Steps count: {len(steps)}") - - # Emit a text message confirming execution - message_id = generate_event_id() - - yield TextMessageStartEvent(message_id=message_id, role="assistant") - - # Check if this is confirm_changes (no steps) or function approval (has steps) - if not steps: - # This is confirm_changes for predictive state updates - if accepted: - confirmation_message = strategy.on_state_confirmed() - else: - confirmation_message = strategy.on_state_rejected() - elif accepted: - # User approved - execute the enabled steps (function approval flow) - confirmation_message = strategy.on_approval_accepted(steps) - else: - # User rejected - confirmation_message = strategy.on_approval_rejected(steps) - - yield TextMessageContentEvent( - message_id=message_id, - delta=confirmation_message, - ) - - yield TextMessageEndEvent(message_id=message_id) - - # Emit run finished - yield event_bridge.create_run_finished_event() - - except json.JSONDecodeError: - logger.error(f"Failed to parse tool result: {tool_content_text}") - yield RunErrorEvent(message=f"Invalid tool result format: {tool_content_text[:100]}") - yield event_bridge.create_run_finished_event() - - -class DefaultOrchestrator(Orchestrator): - """Standard agent execution (no special handling).""" - - def can_handle(self, context: ExecutionContext) -> bool: - """Always returns True as this is the fallback orchestrator. - - Args: - context: Execution context - - Returns: - Always True - """ - return True - - def _create_initial_events( - self, event_bridge: "AgentFrameworkEventBridge", state_manager: "StateManager" - ) -> Sequence[BaseEvent]: - """Generate initial events for the run. - - Args: - event_bridge: Event bridge for creating events - Returns: - Initial AG-UI events - """ - events: list[BaseEvent] = [event_bridge.create_run_started_event()] - - predict_event = state_manager.predict_state_event() - if predict_event: - events.append(predict_event) - - snapshot_event = state_manager.initial_snapshot_event(event_bridge) - if snapshot_event: - events.append(snapshot_event) - - return events - - async def run( - self, - context: ExecutionContext, - ) -> AsyncGenerator[BaseEvent, None]: - """Standard agent run with event translation. - - This implements the default agent execution flow using the event bridge - to translate Agent Framework events to AG-UI events. - - Args: - context: Execution context - - Yields: - AG-UI events - """ - from ._events import AgentFrameworkEventBridge - from ._orchestration._state_manager import StateManager - - logger.info(f"Starting default agent run for thread_id={context.thread_id}, run_id={context.run_id}") - - response_format = None - if isinstance(context.agent, ChatAgent): - response_format = context.agent.default_options.get("response_format") - skip_text_content = response_format is not None - - client_tools = convert_agui_tools_to_agent_framework(context.input_data.get("tools")) - approval_tool_name = select_approval_tool_name(client_tools) - - state_manager = StateManager( - state_schema=context.config.state_schema, - predict_state_config=context.config.predict_state_config, - require_confirmation=context.config.require_confirmation, - ) - current_state = state_manager.initialize(context.input_data.get("state")) - - event_bridge = AgentFrameworkEventBridge( - run_id=context.run_id, - thread_id=context.thread_id, - predict_state_config=context.config.predict_state_config, - current_state=current_state, - skip_text_content=skip_text_content, - require_confirmation=context.config.require_confirmation, - approval_tool_name=approval_tool_name, - ) - - if context.config.use_service_thread: - thread = AgentThread(service_thread_id=context.supplied_thread_id) - else: - thread = AgentThread() - - thread.metadata = { # type: ignore[attr-defined] - "ag_ui_thread_id": context.thread_id, - "ag_ui_run_id": context.run_id, - } - if current_state: - thread.metadata["current_state"] = current_state # type: ignore[attr-defined] - - provider_messages = context.messages or [] - snapshot_messages = context.snapshot_messages - if not provider_messages: - for event in self._create_initial_events(event_bridge, state_manager): - yield event - logger.warning("No messages provided in AG-UI input") - yield event_bridge.create_run_finished_event() - return - - logger.info(f"Received {len(provider_messages)} provider messages from client") - for i, msg in enumerate(provider_messages): - role = get_role_value(msg) - msg_id = getattr(msg, "message_id", None) - logger.info(f" Message {i}: role={role}, id={msg_id}") - if hasattr(msg, "contents") and msg.contents: - for j, content in enumerate(msg.contents): - content_type = type(content).__name__ - if isinstance(content, TextContent): - logger.debug(" Content %s: %s - text_length=%s", j, content_type, len(content.text)) - elif isinstance(content, FunctionCallContent): - arg_length = len(str(content.arguments)) if content.arguments else 0 - logger.debug( - " Content %s: %s - %s args_length=%s", j, content_type, content.name, arg_length - ) - elif isinstance(content, FunctionResultContent): - result_preview = type(content.result).__name__ if content.result is not None else "None" - logger.debug( - " Content %s: %s - call_id=%s, result_type=%s", - j, - content_type, - content.call_id, - result_preview, - ) - else: - logger.debug(f" Content {j}: {content_type}") - - pending_tool_calls: list[dict[str, Any]] = [] - tool_calls_by_id: dict[str, dict[str, Any]] = {} - tool_results: list[dict[str, Any]] = [] - tool_calls_ended: set[str] = set() - messages_snapshot_emitted = False - accumulated_text_content = "" - active_message_id: str | None = None - - # Check for FunctionApprovalResponseContent and emit updated state snapshot - # This ensures the UI shows the approved state (e.g., 2 steps) not the original (3 steps) - for snapshot_evt in collect_approved_state_snapshots( - provider_messages, - context.config.predict_state_config, - current_state, - event_bridge, - ): - yield snapshot_evt - - messages_to_run = select_messages_to_run(provider_messages, state_manager) - - logger.info(f"[TOOLS] Client sent {len(client_tools) if client_tools else 0} tools") - if client_tools: - for tool in client_tools: - tool_name = getattr(tool, "name", "unknown") - declaration_only = getattr(tool, "declaration_only", None) - logger.info(f"[TOOLS] - Client tool: {tool_name}, declaration_only={declaration_only}") - - server_tools = collect_server_tools(context.agent) - register_additional_client_tools(context.agent, client_tools) - tools_param = merge_tools(server_tools, client_tools) - - collect_updates = response_format is not None - all_updates: list[Any] | None = [] if collect_updates else None - update_count = 0 - # Prepare metadata for chat client (Azure requires string values) - safe_metadata = build_safe_metadata(getattr(thread, "metadata", None)) - - run_kwargs: dict[str, Any] = { - "thread": thread, - "tools": tools_param, - "options": {"metadata": safe_metadata}, - } - if safe_metadata: - run_kwargs["options"]["store"] = True - - async def _resolve_approval_responses( - messages: list[Any], - tools_for_execution: list[Any], - ) -> None: - fcc_todo = _collect_approval_responses(messages) - if not fcc_todo: - return - - approved_responses = [resp for resp in fcc_todo.values() if resp.approved] - approved_function_results: list[Any] = [] - if approved_responses and tools_for_execution: - chat_client = getattr(context.agent, "chat_client", None) - config = ( - getattr(chat_client, "function_invocation_configuration", None) or FunctionInvocationConfiguration() - ) - middleware_pipeline = extract_and_merge_function_middleware(chat_client, run_kwargs) - try: - results, _ = await _try_execute_function_calls( - custom_args=run_kwargs, - attempt_idx=0, - function_calls=approved_responses, - tools=tools_for_execution, - middleware_pipeline=middleware_pipeline, - config=config, - ) - approved_function_results = list(results) - except Exception: - logger.error("Failed to execute approved tool calls; injecting error results.") - approved_function_results = [] - - normalized_results: list[FunctionResultContent] = [] - for idx, approval in enumerate(approved_responses): - if idx < len(approved_function_results) and isinstance( - approved_function_results[idx], FunctionResultContent - ): - normalized_results.append(approved_function_results[idx]) - continue - call_id = approval.function_call.call_id or approval.id - normalized_results.append( - FunctionResultContent(call_id=call_id, result="Error: Tool call invocation failed.") - ) - - _replace_approval_contents_with_results(messages, fcc_todo, normalized_results) # type: ignore - - def _should_emit_tool_snapshot(tool_name: str | None) -> bool: - if not pending_tool_calls or not tool_results: - return False - if tool_name and context.config.predict_state_config and not context.config.require_confirmation: - for config in context.config.predict_state_config.values(): - if config["tool"] == tool_name: - logger.info( - f"Skipping intermediate MessagesSnapshotEvent for predictive tool '{tool_name}' " - " - delaying until summary" - ) - return False - return True - - def _build_messages_snapshot(tool_message_id: str | None = None) -> MessagesSnapshotEvent: - has_text_content = bool(accumulated_text_content) - all_messages = snapshot_messages.copy() - - if pending_tool_calls: - if tool_message_id and not has_text_content: - tool_call_message_id = tool_message_id - else: - tool_call_message_id = ( - active_message_id if not has_text_content and active_message_id else generate_event_id() - ) - tool_call_message = { - "id": tool_call_message_id, - "role": "assistant", - "tool_calls": pending_tool_calls.copy(), - } - all_messages.append(tool_call_message) - - all_messages.extend(tool_results) - - if has_text_content and active_message_id: - assistant_text_message = { - "id": active_message_id, - "role": "assistant", - "content": accumulated_text_content, - } - all_messages.append(assistant_text_message) - - return MessagesSnapshotEvent( - messages=all_messages, # type: ignore[arg-type] - ) - - # Use tools_param if available (includes client tools), otherwise fall back to server_tools - # This ensures both server tools AND client tools can be executed after approval - tools_for_approval = tools_param if tools_param is not None else server_tools - latest_approval = latest_approval_response(messages_to_run) - await _resolve_approval_responses(messages_to_run, tools_for_approval) - - if latest_approval and is_step_based_approval(latest_approval, context.config.predict_state_config): - from ._confirmation_strategies import DefaultConfirmationStrategy - - strategy = context.confirmation_strategy - if strategy is None: - strategy = DefaultConfirmationStrategy() - - steps = approval_steps(latest_approval) - if steps: - if latest_approval.approved: - confirmation_message = strategy.on_approval_accepted(steps) - else: - confirmation_message = strategy.on_approval_rejected(steps) - else: - if latest_approval.approved: - confirmation_message = strategy.on_state_confirmed() - else: - confirmation_message = strategy.on_state_rejected() - - message_id = generate_event_id() - for event in self._create_initial_events(event_bridge, state_manager): - yield event - yield TextMessageStartEvent(message_id=message_id, role="assistant") - yield TextMessageContentEvent(message_id=message_id, delta=confirmation_message) - yield TextMessageEndEvent(message_id=message_id) - yield event_bridge.create_run_finished_event() - return - - should_recreate_event_bridge = False - async for update in context.agent.run_stream(messages_to_run, **run_kwargs): - conv_id = get_conversation_id_from_update(update) - if conv_id and conv_id != context.thread_id: - context.update_thread_id(conv_id) - should_recreate_event_bridge = True - - if update.response_id and update.response_id != context.run_id: - context.update_run_id(update.response_id) - should_recreate_event_bridge = True - - if should_recreate_event_bridge: - event_bridge = AgentFrameworkEventBridge( - run_id=context.run_id, - thread_id=context.thread_id, - predict_state_config=context.config.predict_state_config, - current_state=current_state, - skip_text_content=skip_text_content, - require_confirmation=context.config.require_confirmation, - approval_tool_name=approval_tool_name, - ) - should_recreate_event_bridge = False - - if update_count == 0: - for event in self._create_initial_events(event_bridge, state_manager): - yield event - - update_count += 1 - logger.info(f"[STREAM] Received update #{update_count} from agent") - if all_updates is not None: - all_updates.append(update) - if event_bridge.current_message_id is None and update.contents: - has_tool_call = any(isinstance(content, FunctionCallContent) for content in update.contents) - has_text = any(isinstance(content, TextContent) for content in update.contents) - if has_tool_call and not has_text: - tool_message_id = generate_event_id() - event_bridge.current_message_id = tool_message_id - active_message_id = tool_message_id - accumulated_text_content = "" - logger.info( - "[STREAM] Emitting TextMessageStartEvent for tool-only response message_id=%s", - tool_message_id, - ) - yield TextMessageStartEvent(message_id=tool_message_id, role="assistant") - events = await event_bridge.from_agent_run_update(update) - logger.info(f"[STREAM] Update #{update_count} produced {len(events)} events") - for event in events: - if isinstance(event, TextMessageStartEvent): - active_message_id = event.message_id - accumulated_text_content = "" - elif isinstance(event, TextMessageContentEvent): - accumulated_text_content += event.delta - elif isinstance(event, ToolCallStartEvent): - tool_call_entry = ensure_tool_call_entry(event.tool_call_id, tool_calls_by_id, pending_tool_calls) - tool_call_entry["function"]["name"] = event.tool_call_name - elif isinstance(event, ToolCallArgsEvent): - tool_call_entry = ensure_tool_call_entry(event.tool_call_id, tool_calls_by_id, pending_tool_calls) - tool_call_entry["function"]["arguments"] += event.delta - elif isinstance(event, ToolCallEndEvent): - tool_calls_ended.add(event.tool_call_id) - elif isinstance(event, ToolCallResultEvent): - tool_results.append( - { - "id": event.message_id, - "role": "tool", - "toolCallId": event.tool_call_id, - "content": event.content, - } - ) - logger.info(f"[STREAM] Yielding event: {type(event).__name__}") - yield event - if isinstance(event, ToolCallResultEvent): - tool_name = tool_name_for_call_id(tool_calls_by_id, event.tool_call_id) - if _should_emit_tool_snapshot(tool_name): - messages_snapshot_emitted = True - messages_snapshot = _build_messages_snapshot() - logger.info(f"[STREAM] Yielding event: {type(messages_snapshot).__name__}") - yield messages_snapshot - elif isinstance(event, ToolCallEndEvent): - tool_name = tool_name_for_call_id(tool_calls_by_id, event.tool_call_id) - if tool_name == "confirm_changes": - messages_snapshot_emitted = True - messages_snapshot = _build_messages_snapshot() - logger.info(f"[STREAM] Yielding event: {type(messages_snapshot).__name__}") - yield messages_snapshot - - logger.info(f"[STREAM] Agent stream completed. Total updates: {update_count}") - - if event_bridge.should_stop_after_confirm: - logger.info("Stopping run - waiting for user approval/confirmation response") - if event_bridge.current_message_id: - logger.info(f"[CONFIRM] Emitting TextMessageEndEvent for message_id={event_bridge.current_message_id}") - yield event_bridge.create_message_end_event(event_bridge.current_message_id) - event_bridge.current_message_id = None - yield event_bridge.create_run_finished_event() - return - - if pending_tool_calls: - pending_without_end = [tc for tc in pending_tool_calls if tc.get("id") not in tool_calls_ended] - if pending_without_end: - logger.info( - "Found %s pending tool calls without end event - emitting ToolCallEndEvent", - len(pending_without_end), - ) - for tool_call in pending_without_end: - tool_call_id = tool_call.get("id") - if tool_call_id: - end_event = ToolCallEndEvent(tool_call_id=tool_call_id) - logger.info(f"Emitting ToolCallEndEvent for declaration-only tool call '{tool_call_id}'") - yield end_event - - if response_format and all_updates: - from agent_framework import AgentResponse - from pydantic import BaseModel - - logger.info(f"Processing structured output, update count: {len(all_updates)}") - final_response = AgentResponse.from_agent_run_response_updates( - all_updates, output_format_type=response_format - ) - - if final_response.value and isinstance(final_response.value, BaseModel): - response_dict = final_response.value.model_dump(mode="json", exclude_none=True) - logger.info(f"Received structured output keys: {list(response_dict.keys())}") - - state_updates = state_manager.extract_state_updates(response_dict) - if state_updates: - state_manager.apply_state_updates(state_updates) - state_snapshot = event_bridge.create_state_snapshot_event(current_state) - yield state_snapshot - logger.info(f"Emitted StateSnapshotEvent with updates: {list(state_updates.keys())}") - - if "message" in response_dict and response_dict["message"]: - message_id = generate_event_id() - yield TextMessageStartEvent(message_id=message_id, role="assistant") - yield TextMessageContentEvent(message_id=message_id, delta=response_dict["message"]) - yield TextMessageEndEvent(message_id=message_id) - logger.info(f"Emitted conversational message with length={len(response_dict['message'])}") - - if all_updates is not None and len(all_updates) == 0: - logger.info("No updates received from agent - emitting initial events") - for event in self._create_initial_events(event_bridge, state_manager): - yield event - - logger.info(f"[FINALIZE] Checking for unclosed message. current_message_id={event_bridge.current_message_id}") - if event_bridge.current_message_id: - logger.info(f"[FINALIZE] Emitting TextMessageEndEvent for message_id={event_bridge.current_message_id}") - yield event_bridge.create_message_end_event(event_bridge.current_message_id) - - messages_snapshot = _build_messages_snapshot(tool_message_id=event_bridge.current_message_id) - messages_snapshot_emitted = True - logger.info( - f"[FINALIZE] Emitting MessagesSnapshotEvent with {len(messages_snapshot.messages)} messages " - f"(text content length: {len(accumulated_text_content)})" - ) - yield messages_snapshot - else: - logger.info("[FINALIZE] No current_message_id - skipping TextMessageEndEvent") - if not messages_snapshot_emitted and (pending_tool_calls or tool_results): - messages_snapshot = _build_messages_snapshot() - messages_snapshot_emitted = True - logger.info( - f"[FINALIZE] Emitting MessagesSnapshotEvent with {len(messages_snapshot.messages)} messages" - ) - yield messages_snapshot - - logger.info("[FINALIZE] Emitting RUN_FINISHED event") - yield event_bridge.create_run_finished_event() - logger.info(f"Completed agent run for thread_id={context.thread_id}, run_id={context.run_id}") - - -__all__ = [ - "Orchestrator", - "ExecutionContext", - "HumanInTheLoopOrchestrator", - "DefaultOrchestrator", -] diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_run.py b/python/packages/ag-ui/agent_framework_ag_ui/_run.py new file mode 100644 index 0000000000..d1229620a7 --- /dev/null +++ b/python/packages/ag-ui/agent_framework_ag_ui/_run.py @@ -0,0 +1,963 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Simplified AG-UI orchestration - single linear flow.""" + +import json +import logging +import uuid +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +from ag_ui.core import ( + BaseEvent, + CustomEvent, + MessagesSnapshotEvent, + RunFinishedEvent, + RunStartedEvent, + StateSnapshotEvent, + TextMessageContentEvent, + TextMessageEndEvent, + TextMessageStartEvent, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallResultEvent, + ToolCallStartEvent, +) +from agent_framework import ( + AgentProtocol, + AgentThread, + ChatMessage, + Content, + prepare_function_call_results, +) +from agent_framework._middleware import extract_and_merge_function_middleware +from agent_framework._tools import ( + FunctionInvocationConfiguration, + _collect_approval_responses, # type: ignore + _replace_approval_contents_with_results, # type: ignore + _try_execute_function_calls, # type: ignore +) + +from ._message_adapters import normalize_agui_input_messages +from ._orchestration._predictive_state import PredictiveStateHandler +from ._orchestration._tooling import collect_server_tools, merge_tools, register_additional_client_tools +from ._utils import ( + convert_agui_tools_to_agent_framework, + generate_event_id, + get_conversation_id_from_update, + make_json_safe, +) + +if TYPE_CHECKING: + from collections.abc import AsyncGenerator + + from ._agent import AgentConfig + +logger = logging.getLogger(__name__) + +# Keys that are internal to AG-UI orchestration and should not be passed to chat clients +AG_UI_INTERNAL_METADATA_KEYS = {"ag_ui_thread_id", "ag_ui_run_id", "current_state"} + + +def _build_safe_metadata(thread_metadata: dict[str, Any] | None) -> dict[str, Any]: + """Build metadata dict with truncated string values for Azure compatibility. + + Azure has a 512 character limit per metadata value. + + Args: + thread_metadata: Raw metadata dict + + Returns: + Metadata with string values truncated to 512 chars + """ + if not thread_metadata: + return {} + safe_metadata: dict[str, Any] = {} + for key, value in thread_metadata.items(): + value_str = value if isinstance(value, str) else json.dumps(value) + if len(value_str) > 512: + value_str = value_str[:512] + safe_metadata[key] = value_str + return safe_metadata + + +def _has_only_tool_calls(contents: list[Any]) -> bool: + """Check if contents have only tool calls (no text). + + Args: + contents: List of content items + + Returns: + True if there are tool calls but no text content + """ + has_tool_call = any(getattr(c, "type", None) == "function_call" for c in contents) + has_text = any(getattr(c, "type", None) == "text" and getattr(c, "text", None) for c in contents) + return has_tool_call and not has_text + + +def _should_suppress_intermediate_snapshot( + tool_name: str | None, + predict_state_config: dict[str, dict[str, str]] | None, + require_confirmation: bool, +) -> bool: + """Check if intermediate MessagesSnapshotEvent should be suppressed for this tool. + + For predictive tools without confirmation, we delay the snapshot until the end. + + Args: + tool_name: Name of the tool that just completed + predict_state_config: Predictive state configuration + require_confirmation: Whether confirmation is required + + Returns: + True if snapshot should be suppressed + """ + if not tool_name or not predict_state_config: + return False + # Only suppress when confirmation is disabled + if require_confirmation: + return False + # Check if this tool is a predictive tool + for config in predict_state_config.values(): + if config["tool"] == tool_name: + logger.info(f"Suppressing intermediate MessagesSnapshotEvent for predictive tool '{tool_name}'") + return True + return False + + +def _extract_approved_state_updates( + messages: list[Any], + predictive_handler: PredictiveStateHandler | None, +) -> dict[str, Any]: + """Extract state updates from function_approval_response content. + + This emits StateSnapshotEvent for approved state-changing tools before running agent. + + Args: + messages: List of messages to scan + predictive_handler: Predictive state handler + + Returns: + Dict of state updates to apply + """ + if not predictive_handler: + return {} + + updates: dict[str, Any] = {} + for msg in messages: + for content in msg.contents: + if getattr(content, "type", None) != "function_approval_response": + continue + if not getattr(content, "approved", False) or not getattr(content, "function_call", None): + continue + parsed_args = content.function_call.parse_arguments() + result = predictive_handler.extract_state_value(content.function_call.name, parsed_args) + if result: + state_key, state_value = result + updates[state_key] = state_value + logger.info(f"Found approved state update for key '{state_key}'") + return updates + + +@dataclass +class FlowState: + """Minimal explicit state for a single AG-UI run.""" + + message_id: str | None = None # Current text message being streamed + tool_call_id: str | None = None # Current tool call being streamed + tool_call_name: str | None = None # Name of current tool call + waiting_for_approval: bool = False # Stop after approval request + current_state: dict[str, Any] = field(default_factory=dict) # Shared state + accumulated_text: str = "" # For MessagesSnapshotEvent + pending_tool_calls: list[dict[str, Any]] = field(default_factory=list) # For MessagesSnapshotEvent + tool_calls_by_id: dict[str, dict[str, Any]] = field(default_factory=dict) + tool_results: list[dict[str, Any]] = field(default_factory=list) + tool_calls_ended: set[str] = field(default_factory=set) # Track which tool calls have been ended + + def get_tool_name(self, call_id: str | None) -> str | None: + """Get tool name by call ID.""" + if not call_id or call_id not in self.tool_calls_by_id: + return None + name = self.tool_calls_by_id[call_id]["function"].get("name") + return str(name) if name else None + + def get_pending_without_end(self) -> list[dict[str, Any]]: + """Get tool calls that started but never received an end event (declaration-only).""" + return [tc for tc in self.pending_tool_calls if tc.get("id") not in self.tool_calls_ended] + + +def _create_state_context_message( + current_state: dict[str, Any], + state_schema: dict[str, Any], +) -> ChatMessage | None: + """Create a system message with current state context. + + This injects the current state into the conversation so the model + knows what state exists and can make informed updates. + + Args: + current_state: The current state to inject + state_schema: The state schema (used to determine if injection is needed) + + Returns: + ChatMessage with state context, or None if not needed + """ + if not current_state or not state_schema: + return None + + state_json = json.dumps(current_state, indent=2) + return ChatMessage( + role="system", + contents=[ + Content.from_text( + text=( + "Current state of the application:\n" + f"{state_json}\n\n" + "When modifying state, you MUST include ALL existing data plus your changes.\n" + "For example, if adding one new item to a list, include ALL existing items PLUS the new item.\n" + "Never replace existing data - always preserve and append or merge." + ) + ) + ], + ) + + +def _inject_state_context( + messages: list[ChatMessage], + current_state: dict[str, Any], + state_schema: dict[str, Any], +) -> list[ChatMessage]: + """Inject state context message into messages if appropriate. + + The state context is injected before the last user message to give + the model visibility into the current application state. + + Args: + messages: The messages to potentially inject into + current_state: The current state + state_schema: The state schema + + Returns: + Messages with state context injected if appropriate + """ + state_msg = _create_state_context_message(current_state, state_schema) + if not state_msg: + return messages + + # Check if the last message is from a user (new user turn) + if not messages: + return messages + + from ._utils import get_role_value + + last_role = get_role_value(messages[-1]) + if last_role != "user": + return messages + + # Always inject state context if state is provided + # This ensures UI state changes are visible to the model + + # Insert state context before the last user message + result = list(messages[:-1]) + result.append(state_msg) + result.append(messages[-1]) + return result + + +def _emit_text(content: Content, flow: FlowState, skip_text: bool = False) -> list[BaseEvent]: + """Emit TextMessage events for TextContent.""" + if not content.text: + return [] + + # Skip if we're in structured output mode or waiting for approval + if skip_text or flow.waiting_for_approval: + return [] + + events: list[BaseEvent] = [] + if not flow.message_id: + flow.message_id = generate_event_id() + events.append(TextMessageStartEvent(message_id=flow.message_id, role="assistant")) + + events.append(TextMessageContentEvent(message_id=flow.message_id, delta=content.text)) + flow.accumulated_text += content.text + return events + + +def _emit_tool_call( + content: Content, + flow: FlowState, + predictive_handler: PredictiveStateHandler | None = None, +) -> list[BaseEvent]: + """Emit ToolCall events for FunctionCallContent.""" + events: list[BaseEvent] = [] + + tool_call_id = content.call_id or flow.tool_call_id or generate_event_id() + + # Emit start event when we have a new tool call + if content.name and tool_call_id != flow.tool_call_id: + flow.tool_call_id = tool_call_id + flow.tool_call_name = content.name + if predictive_handler: + predictive_handler.reset_streaming() + + events.append( + ToolCallStartEvent( + tool_call_id=tool_call_id, + tool_call_name=content.name, + parent_message_id=flow.message_id, + ) + ) + + # Track for MessagesSnapshotEvent + tool_entry = { + "id": tool_call_id, + "type": "function", + "function": {"name": content.name, "arguments": ""}, + } + flow.pending_tool_calls.append(tool_entry) + flow.tool_calls_by_id[tool_call_id] = tool_entry + + elif tool_call_id: + flow.tool_call_id = tool_call_id + + # Emit args if present + if content.arguments: + delta = ( + content.arguments if isinstance(content.arguments, str) else json.dumps(make_json_safe(content.arguments)) + ) + events.append(ToolCallArgsEvent(tool_call_id=tool_call_id, delta=delta)) + + # Track args for MessagesSnapshotEvent + if tool_call_id in flow.tool_calls_by_id: + flow.tool_calls_by_id[tool_call_id]["function"]["arguments"] += delta + + # Emit predictive state deltas + if predictive_handler and flow.tool_call_name: + delta_events = predictive_handler.emit_streaming_deltas(flow.tool_call_name, delta) + events.extend(delta_events) + + return events + + +def _emit_tool_result( + content: Content, + flow: FlowState, + predictive_handler: PredictiveStateHandler | None = None, +) -> list[BaseEvent]: + """Emit ToolCallResult events for FunctionResultContent.""" + events: list[BaseEvent] = [] + + # Cannot emit tool result without a call_id to associate it with + if not content.call_id: + return events + + events.append(ToolCallEndEvent(tool_call_id=content.call_id)) + flow.tool_calls_ended.add(content.call_id) # Track ended tool calls + + result_content = prepare_function_call_results(content.result) + message_id = generate_event_id() + events.append( + ToolCallResultEvent( + message_id=message_id, + tool_call_id=content.call_id, + content=result_content, + role="tool", + ) + ) + + # Track for MessagesSnapshotEvent + flow.tool_results.append( + { + "id": message_id, + "role": "tool", + "toolCallId": content.call_id, + "content": result_content, + } + ) + + # Apply predictive state updates and emit snapshot + if predictive_handler: + predictive_handler.apply_pending_updates() + if flow.current_state: + events.append(StateSnapshotEvent(snapshot=flow.current_state)) + + # Reset tool tracking and message context + # After tool result, any subsequent text should start a new message + flow.tool_call_id = None + flow.tool_call_name = None + flow.message_id = None # Reset so next text content starts a new message + + return events + + +def _emit_approval_request( + content: Content, + flow: FlowState, + predictive_handler: PredictiveStateHandler | None = None, + require_confirmation: bool = True, +) -> list[BaseEvent]: + """Emit events for function approval request.""" + events: list[BaseEvent] = [] + + # function_call is required for approval requests - skip if missing + func_call = content.function_call + if not func_call: + logger.warning("Approval request content missing function_call, skipping") + return events + + func_name = func_call.name or "" + func_call_id = func_call.call_id + + # Extract state from function arguments if predictive + if predictive_handler and func_name: + parsed_args = func_call.parse_arguments() + result = predictive_handler.extract_state_value(func_name, parsed_args) + if result: + state_key, state_value = result + flow.current_state[state_key] = state_value + events.append(StateSnapshotEvent(snapshot=flow.current_state)) + + # End the original tool call + if func_call_id: + events.append(ToolCallEndEvent(tool_call_id=func_call_id)) + flow.tool_calls_ended.add(func_call_id) # Track ended tool calls + + # Emit custom event for UI + events.append( + CustomEvent( + name="function_approval_request", + value={ + "id": content.id, + "function_call": { + "call_id": func_call_id, + "name": func_name, + "arguments": make_json_safe(func_call.parse_arguments()), + }, + }, + ) + ) + + # Emit confirm_changes tool call for UI compatibility + # The complete sequence (Start -> Args -> End) signals the UI to show the confirmation dialog + if require_confirmation: + confirm_id = generate_event_id() + events.append( + ToolCallStartEvent( + tool_call_id=confirm_id, + tool_call_name="confirm_changes", + parent_message_id=flow.message_id, + ) + ) + args = { + "function_name": func_name, + "function_call_id": func_call_id, + "function_arguments": make_json_safe(func_call.parse_arguments()) or {}, + "steps": [{"description": f"Execute {func_name}", "status": "enabled"}], + } + events.append(ToolCallArgsEvent(tool_call_id=confirm_id, delta=json.dumps(args))) + events.append(ToolCallEndEvent(tool_call_id=confirm_id)) + + flow.waiting_for_approval = True + return events + + +def _emit_content( + content: Any, + flow: FlowState, + predictive_handler: PredictiveStateHandler | None = None, + skip_text: bool = False, + require_confirmation: bool = True, +) -> list[BaseEvent]: + """Emit appropriate events for any content type.""" + content_type = getattr(content, "type", None) + if content_type == "text": + return _emit_text(content, flow, skip_text) + elif content_type == "function_call": + return _emit_tool_call(content, flow, predictive_handler) + elif content_type == "function_result": + return _emit_tool_result(content, flow, predictive_handler) + elif content_type == "function_approval_request": + return _emit_approval_request(content, flow, predictive_handler, require_confirmation) + return [] + + +def _is_confirm_changes_response(messages: list[Any]) -> bool: + """Check if the last message is a confirm_changes tool result (state confirmation flow). + + This returns True for confirm_changes flows where we emit a confirmation message + and stop. The key indicator is the presence of a 'steps' key in the tool result + (even if empty), combined with 'accepted' boolean. + """ + if not messages: + return False + last = messages[-1] + if not last.additional_properties.get("is_tool_result", False): + return False + + # Parse the content to check if it has the confirm_changes structure + for content in last.contents: + if getattr(content, "type", None) == "text": + try: + result = json.loads(content.text) + # confirm_changes results have 'accepted' and 'steps' keys + if "accepted" in result and "steps" in result: + return True + except json.JSONDecodeError: + # Content is not valid JSON; continue checking other content items + logger.debug("Failed to parse confirm_changes tool result as JSON; treating as non-confirmation.") + return False + + +def _handle_step_based_approval(messages: list[Any]) -> list[BaseEvent]: + """Handle step-based approval response and emit confirmation message.""" + events: list[BaseEvent] = [] + last = messages[-1] + + # Parse the approval content + approval_text = "" + for content in last.contents: + if getattr(content, "type", None) == "text": + approval_text = content.text + break + + try: + result = json.loads(approval_text) + accepted = result.get("accepted", False) + steps = result.get("steps", []) + + if accepted: + # Generate acceptance message with step descriptions + enabled_steps = [s for s in steps if s.get("status") == "enabled"] + if enabled_steps: + message_parts = [f"Executing {len(enabled_steps)} approved steps:\n\n"] + for i, step in enumerate(enabled_steps, 1): + message_parts.append(f"{i}. {step.get('description', 'Step')}\n") + message_parts.append("\nAll steps completed successfully!") + message = "".join(message_parts) + else: + message = "Changes confirmed and applied successfully!" + else: + # Rejection message + message = "No problem! What would you like me to change about the plan?" + except json.JSONDecodeError: + message = "Acknowledged." + + message_id = generate_event_id() + events.append(TextMessageStartEvent(message_id=message_id, role="assistant")) + events.append(TextMessageContentEvent(message_id=message_id, delta=message)) + events.append(TextMessageEndEvent(message_id=message_id)) + + return events + + +async def _resolve_approval_responses( + messages: list[Any], + tools: list[Any], + agent: AgentProtocol, + run_kwargs: dict[str, Any], +) -> None: + """Execute approved function calls and replace approval content with results. + + This modifies the messages list in place, replacing FunctionApprovalResponseContent + with FunctionResultContent containing the actual tool execution result. + + Args: + messages: List of messages (will be modified in place) + tools: List of available tools + agent: The agent instance (to get chat_client and config) + run_kwargs: Kwargs for tool execution + """ + fcc_todo = _collect_approval_responses(messages) + if not fcc_todo: + return + + approved_responses = [resp for resp in fcc_todo.values() if resp.approved] + rejected_responses = [resp for resp in fcc_todo.values() if not resp.approved] + approved_function_results: list[Any] = [] + + # Execute approved tool calls + if approved_responses and tools: + chat_client = getattr(agent, "chat_client", None) + config = getattr(chat_client, "function_invocation_configuration", None) or FunctionInvocationConfiguration() + middleware_pipeline = extract_and_merge_function_middleware(chat_client, run_kwargs) + # Filter out AG-UI-specific kwargs that should not be passed to tool execution + tool_kwargs = {k: v for k, v in run_kwargs.items() if k != "options"} + try: + results, _ = await _try_execute_function_calls( + custom_args=tool_kwargs, + attempt_idx=0, + function_calls=approved_responses, + tools=tools, + middleware_pipeline=middleware_pipeline, + config=config, + ) + approved_function_results = list(results) + except Exception as e: + logger.exception("Failed to execute approved tool calls; injecting error results: %s", e) + approved_function_results = [] + + # Build normalized results for approved responses + normalized_results: list[Content] = [] + for idx, approval in enumerate(approved_responses): + if ( + idx < len(approved_function_results) + and getattr(approved_function_results[idx], "type", None) == "function_result" + ): + normalized_results.append(approved_function_results[idx]) + continue + # Get call_id from function_call if present, otherwise use approval.id + func_call = approval.function_call + call_id = (func_call.call_id if func_call else None) or approval.id or "" + normalized_results.append( + Content.from_function_result(call_id=call_id, result="Error: Tool call invocation failed.") + ) + + # Build rejection results + for rejection in rejected_responses: + func_call = rejection.function_call + call_id = (func_call.call_id if func_call else None) or rejection.id or "" + normalized_results.append( + Content.from_function_result(call_id=call_id, result="Error: Tool call invocation was rejected by user.") + ) + + _replace_approval_contents_with_results(messages, fcc_todo, normalized_results) # type: ignore + + +def _build_messages_snapshot( + flow: FlowState, + snapshot_messages: list[dict[str, Any]], +) -> MessagesSnapshotEvent: + """Build MessagesSnapshotEvent from current flow state.""" + all_messages = list(snapshot_messages) + + # Add assistant message with tool calls + if flow.pending_tool_calls: + tool_call_message = { + "id": flow.message_id or generate_event_id(), + "role": "assistant", + "tool_calls": flow.pending_tool_calls.copy(), + } + if flow.accumulated_text: + tool_call_message["content"] = flow.accumulated_text + all_messages.append(tool_call_message) + + # Add tool results + all_messages.extend(flow.tool_results) + + # Add text-only assistant message if no tool calls + if flow.accumulated_text and not flow.pending_tool_calls: + all_messages.append( + { + "id": flow.message_id or generate_event_id(), + "role": "assistant", + "content": flow.accumulated_text, + } + ) + + return MessagesSnapshotEvent(messages=all_messages) # type: ignore[arg-type] + + +async def run_agent_stream( + input_data: dict[str, Any], + agent: AgentProtocol, + config: "AgentConfig", +) -> "AsyncGenerator[BaseEvent, None]": + """Run agent and yield AG-UI events. + + This is the single entry point for all AG-UI agent runs. It follows a simple + linear flow: RunStarted -> content events -> RunFinished. + + Args: + input_data: AG-UI request data with messages, state, tools, etc. + agent: The Agent Framework agent to run + config: Agent configuration + + Yields: + AG-UI events + """ + # Parse IDs + thread_id = input_data.get("thread_id") or input_data.get("threadId") or str(uuid.uuid4()) + run_id = input_data.get("run_id") or input_data.get("runId") or str(uuid.uuid4()) + + # Initialize flow state with schema defaults + flow = FlowState() + if input_data.get("state"): + flow.current_state = dict(input_data["state"]) + + # Apply schema defaults for missing state keys + if config.state_schema: + for key, schema in config.state_schema.items(): + if key in flow.current_state: + continue + if isinstance(schema, dict) and schema.get("type") == "array": + flow.current_state[key] = [] + else: + flow.current_state[key] = {} + + # Initialize predictive state handler if configured + predictive_handler: PredictiveStateHandler | None = None + if config.predict_state_config: + predictive_handler = PredictiveStateHandler( + predict_state_config=config.predict_state_config, + current_state=flow.current_state, + ) + + # Normalize messages + raw_messages = input_data.get("messages", []) + messages, snapshot_messages = normalize_agui_input_messages(raw_messages) + + # Check for structured output mode (skip text content) + skip_text = False + response_format = None + from agent_framework import ChatAgent + + if isinstance(agent, ChatAgent): + response_format = agent.default_options.get("response_format") + skip_text = response_format is not None + + # Handle empty messages (emit RunStarted immediately since no agent response) + if not messages: + logger.warning("No messages provided in AG-UI input") + yield RunStartedEvent(run_id=run_id, thread_id=thread_id) + yield RunFinishedEvent(run_id=run_id, thread_id=thread_id) + return + + # Prepare tools + client_tools = convert_agui_tools_to_agent_framework(input_data.get("tools")) + server_tools = collect_server_tools(agent) + register_additional_client_tools(agent, client_tools) + tools = merge_tools(server_tools, client_tools) + + # Create thread (with service thread support) + if config.use_service_thread: + supplied_thread_id = input_data.get("thread_id") or input_data.get("threadId") + thread = AgentThread(service_thread_id=supplied_thread_id) + else: + thread = AgentThread() + + # Inject metadata for AG-UI orchestration (Feature #2: Azure-safe truncation) + base_metadata: dict[str, Any] = { + "ag_ui_thread_id": thread_id, + "ag_ui_run_id": run_id, + } + if flow.current_state: + base_metadata["current_state"] = flow.current_state + thread.metadata = _build_safe_metadata(base_metadata) # type: ignore[attr-defined] + + # Build run kwargs (Feature #6: Azure store flag when metadata present) + run_kwargs: dict[str, Any] = {"thread": thread} + if tools: + run_kwargs["tools"] = tools + # Filter out AG-UI internal metadata keys before passing to chat client + # These are used internally for orchestration and should not be sent to the LLM provider + client_metadata = { + k: v for k, v in (getattr(thread, "metadata", None) or {}).items() if k not in AG_UI_INTERNAL_METADATA_KEYS + } + safe_metadata = _build_safe_metadata(client_metadata) if client_metadata else {} + if safe_metadata: + run_kwargs["options"] = {"metadata": safe_metadata, "store": True} + + # Resolve approval responses (execute approved tools, replace approvals with results) + # This must happen before running the agent so it sees the tool results + tools_for_execution = tools if tools is not None else server_tools + await _resolve_approval_responses(messages, tools_for_execution, agent, run_kwargs) + + # Feature #3: Emit StateSnapshotEvent for approved state-changing tools before agent runs + approved_state_updates = _extract_approved_state_updates(messages, predictive_handler) + approved_state_snapshot_emitted = False + if approved_state_updates: + flow.current_state.update(approved_state_updates) + approved_state_snapshot_emitted = True + + # Handle confirm_changes response (state confirmation flow - emit confirmation and stop) + if _is_confirm_changes_response(messages): + yield RunStartedEvent(run_id=run_id, thread_id=thread_id) + # Emit approved state snapshot before confirmation message + if approved_state_snapshot_emitted: + yield StateSnapshotEvent(snapshot=flow.current_state) + for event in _handle_step_based_approval(messages): + yield event + yield RunFinishedEvent(run_id=run_id, thread_id=thread_id) + return + + # Inject state context message so the model knows current application state + # This is critical for shared state scenarios where the UI state needs to be visible + if config.state_schema and flow.current_state: + messages = _inject_state_context(messages, flow.current_state, config.state_schema) + + # Stream from agent - emit RunStarted after first update to get service IDs + run_started_emitted = False + all_updates: list[Any] = [] # Collect for structured output processing + async for update in agent.run_stream(messages, **run_kwargs): + # Collect updates for structured output processing + if response_format is not None: + all_updates.append(update) + + # Update IDs from service response on first update and emit RunStarted + if not run_started_emitted: + conv_id = get_conversation_id_from_update(update) + if conv_id: + thread_id = conv_id + if update.response_id: + run_id = update.response_id + # NOW emit RunStarted with proper IDs + yield RunStartedEvent(run_id=run_id, thread_id=thread_id) + # Emit PredictState custom event if configured + if config.predict_state_config: + predict_state_value = [ + { + "state_key": state_key, + "tool": cfg["tool"], + "tool_argument": cfg["tool_argument"], + } + for state_key, cfg in config.predict_state_config.items() + ] + yield CustomEvent(name="PredictState", value=predict_state_value) + # Emit initial state snapshot only if we have both state_schema and state + if config.state_schema and flow.current_state: + yield StateSnapshotEvent(snapshot=flow.current_state) + run_started_emitted = True + + # Feature #4: Detect tool-only messages (no text content) + # Emit TextMessageStartEvent to create message context for tool calls + if not flow.message_id and _has_only_tool_calls(update.contents): + flow.message_id = generate_event_id() + logger.info(f"Tool-only response detected, creating message_id={flow.message_id}") + yield TextMessageStartEvent(message_id=flow.message_id, role="assistant") + + # Emit events for each content item + for content in update.contents: + for event in _emit_content( + content, + flow, + predictive_handler, + skip_text, + config.require_confirmation, + ): + yield event + + # Stop if waiting for approval + if flow.waiting_for_approval: + break + + # If no updates at all, still emit RunStarted + if not run_started_emitted: + yield RunStartedEvent(run_id=run_id, thread_id=thread_id) + if config.predict_state_config: + predict_state_value = [ + { + "state_key": state_key, + "tool": cfg["tool"], + "tool_argument": cfg["tool_argument"], + } + for state_key, cfg in config.predict_state_config.items() + ] + yield CustomEvent(name="PredictState", value=predict_state_value) + if config.state_schema and flow.current_state: + yield StateSnapshotEvent(snapshot=flow.current_state) + + # Process structured output if response_format is set + if response_format is not None and all_updates: + from agent_framework import AgentResponse + from pydantic import BaseModel + + logger.info(f"Processing structured output, update count: {len(all_updates)}") + final_response = AgentResponse.from_agent_run_response_updates(all_updates, output_format_type=response_format) + + if final_response.value and isinstance(final_response.value, BaseModel): + response_dict = final_response.value.model_dump(mode="json", exclude_none=True) + logger.info(f"Received structured output keys: {list(response_dict.keys())}") + + # Extract state updates - if no state_schema, all non-message fields are state + state_keys = ( + set(config.state_schema.keys()) if config.state_schema else set(response_dict.keys()) - {"message"} + ) + state_updates = {k: v for k, v in response_dict.items() if k in state_keys} + + if state_updates: + flow.current_state.update(state_updates) + yield StateSnapshotEvent(snapshot=flow.current_state) + logger.info(f"Emitted StateSnapshotEvent with updates: {list(state_updates.keys())}") + + # Emit message field as text if present + if "message" in response_dict and response_dict["message"]: + message_id = generate_event_id() + yield TextMessageStartEvent(message_id=message_id, role="assistant") + yield TextMessageContentEvent(message_id=message_id, delta=response_dict["message"]) + yield TextMessageEndEvent(message_id=message_id) + logger.info(f"Emitted conversational message with length={len(response_dict['message'])}") + + # Feature #1: Emit ToolCallEndEvent for declaration-only tools (tools without results) + pending_without_end = flow.get_pending_without_end() + if pending_without_end: + logger.info(f"Found {len(pending_without_end)} pending tool calls without end event") + for tool_call in pending_without_end: + tool_call_id = tool_call.get("id") + tool_name = tool_call.get("function", {}).get("name") + if tool_call_id: + logger.info(f"Emitting ToolCallEndEvent for declaration-only tool '{tool_call_id}'") + yield ToolCallEndEvent(tool_call_id=tool_call_id) + + # For predictive tools with require_confirmation, emit confirm_changes + if config.require_confirmation and config.predict_state_config and tool_name: + is_predictive_tool = any(cfg["tool"] == tool_name for cfg in config.predict_state_config.values()) + if is_predictive_tool: + logger.info(f"Emitting confirm_changes for predictive tool '{tool_name}'") + # Extract state value from tool arguments for StateSnapshot + if predictive_handler: + try: + args_str = tool_call.get("function", {}).get("arguments", "{}") + args = json.loads(args_str) if isinstance(args_str, str) else args_str + result = predictive_handler.extract_state_value(tool_name, args) + if result: + state_key, state_value = result + flow.current_state[state_key] = state_value + yield StateSnapshotEvent(snapshot=flow.current_state) + except json.JSONDecodeError: + # Ignore malformed JSON in tool arguments for predictive state; + # predictive updates are best-effort and should not break the flow. + logger.warning( + "Failed to decode JSON arguments for predictive tool '%s' (tool_call_id=%s).", + tool_name, + tool_call_id, + ) + + # Emit confirm_changes tool call + confirm_id = generate_event_id() + yield ToolCallStartEvent( + tool_call_id=confirm_id, + tool_call_name="confirm_changes", + parent_message_id=flow.message_id, + ) + confirm_args = { + "function_name": tool_name, + "function_call_id": tool_call_id, + "function_arguments": json.loads(tool_call.get("function", {}).get("arguments", "{}")), + "steps": [{"description": f"Execute {tool_name}", "status": "enabled"}], + } + yield ToolCallArgsEvent(tool_call_id=confirm_id, delta=json.dumps(confirm_args)) + yield ToolCallEndEvent(tool_call_id=confirm_id) + flow.waiting_for_approval = True + + # Close any open message + if flow.message_id: + yield TextMessageEndEvent(message_id=flow.message_id) + + # Emit MessagesSnapshotEvent if we have tool calls or results + # Feature #5: Suppress intermediate snapshots for predictive tools without confirmation + should_emit_snapshot = flow.pending_tool_calls or flow.tool_results or flow.accumulated_text + if should_emit_snapshot: + # Check if we should suppress for predictive tool + last_tool_name = None + if flow.tool_results: + last_result = flow.tool_results[-1] + last_call_id = last_result.get("toolCallId") + last_tool_name = flow.get_tool_name(last_call_id) + if not _should_suppress_intermediate_snapshot( + last_tool_name, config.predict_state_config, config.require_confirmation + ): + yield _build_messages_snapshot(flow, snapshot_messages) + + # Always emit RunFinished - confirm_changes tool call is complete (Start -> Args -> End) + # The UI will show confirmation dialog and send a new request when user responds + yield RunFinishedEvent(run_id=run_id, thread_id=thread_id) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_types.py b/python/packages/ag-ui/agent_framework_ag_ui/_types.py index 226abae692..a80cd155d2 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_types.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_types.py @@ -6,21 +6,13 @@ from typing import Any, TypedDict from agent_framework import ChatOptions +from pydantic import BaseModel, Field if sys.version_info >= (3, 13): from typing import TypeVar else: from typing_extensions import TypeVar -__all__ = [ - "AGUIChatOptions", - "AgentState", - "PredictStateConfig", - "RunMetadata", -] - -from pydantic import BaseModel, Field - class PredictStateConfig(TypedDict): """Configuration for predictive state updates.""" @@ -63,6 +55,22 @@ class AGUIRequest(BaseModel): None, description="Optional shared state for agentic generative UI", ) + tools: list[dict[str, Any]] | None = Field( + None, + description="Client-side tools to advertise to the LLM", + ) + context: list[dict[str, Any]] | None = Field( + None, + description="List of context objects provided to the agent", + ) + forwarded_props: dict[str, Any] | None = Field( + None, + description="Additional properties forwarded to the agent", + ) + parent_run_id: str | None = Field( + None, + description="ID of the run that spawned this run", + ) # region AG-UI Chat Options TypedDict diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py index 9f42e24770..967653fff8 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py @@ -141,11 +141,14 @@ def make_json_safe(obj: Any) -> Any: # noqa: ANN401 if isinstance(obj, (datetime, date)): return obj.isoformat() if is_dataclass(obj): - return asdict(obj) # type: ignore[arg-type] + # asdict may return nested non-dataclass objects, so recursively make them safe + return make_json_safe(asdict(obj)) # type: ignore[arg-type] if hasattr(obj, "model_dump"): - return obj.model_dump() # type: ignore[no-any-return] + return make_json_safe(obj.model_dump()) # type: ignore[no-any-return] + if hasattr(obj, "to_dict"): + return make_json_safe(obj.to_dict()) # type: ignore[no-any-return] if hasattr(obj, "dict"): - return obj.dict() # type: ignore[no-any-return] + return make_json_safe(obj.dict()) # type: ignore[no-any-return] if hasattr(obj, "__dict__"): return {key: make_json_safe(value) for key, value in vars(obj).items()} # type: ignore[misc] if isinstance(obj, (list, tuple)): diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md index e9d6d4ed17..f22969f883 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md @@ -289,40 +289,6 @@ wrapped_agent = AgentFrameworkAgent( ) ``` -### Custom Confirmation Strategies - -Provide domain-specific confirmation messages: - -```python -from typing import Any -from agent_framework import ChatAgent -from agent_framework.azure import AzureOpenAIChatClient -from agent_framework.ag_ui import AgentFrameworkAgent, ConfirmationStrategy - -class CustomConfirmationStrategy(ConfirmationStrategy): - def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: - return "Your custom approval message!" - - def on_approval_rejected(self, steps: list[dict[str, Any]]) -> str: - return "Your custom rejection message!" - - def on_state_confirmed(self) -> str: - return "State changes confirmed!" - - def on_state_rejected(self) -> str: - return "State changes rejected!" - -agent = ChatAgent( - name="custom_agent", - chat_client=AzureOpenAIChatClient(model_id="gpt-4o"), -) - -wrapped_agent = AgentFrameworkAgent( - agent=agent, - confirmation_strategy=CustomConfirmationStrategy(), -) -``` - ### Human in the Loop Human-in-the-loop is automatically handled when tools are marked for approval: diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py index bddc51846b..34ade05032 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/document_writer_agent.py @@ -3,11 +3,11 @@ """Example agent demonstrating predictive state updates with document writing.""" from agent_framework import ChatAgent, ChatClientProtocol, ai_function -from agent_framework.ag_ui import AgentFrameworkAgent, DocumentWriterConfirmationStrategy +from agent_framework.ag_ui import AgentFrameworkAgent -@ai_function -def write_document_local(document: str) -> str: +@ai_function(approval_mode="always_require") +def write_document(document: str) -> str: """Write a document. Use markdown formatting to format the document. It's good to format the document extensively so it's easy to read. @@ -28,7 +28,7 @@ def write_document_local(document: str) -> str: _DOCUMENT_WRITER_INSTRUCTIONS = ( "You are a helpful assistant for writing documents. " - "To write the document, you MUST use the write_document_local tool. " + "To write the document, you MUST use the write_document tool. " "You MUST write the full document, even when changing only a few words. " "When you wrote the document, DO NOT repeat it as a message. " "Just briefly summarize the changes you made. 2 sentences max. " @@ -51,7 +51,7 @@ def document_writer_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgen name="document_writer", instructions=_DOCUMENT_WRITER_INSTRUCTIONS, chat_client=chat_client, - tools=[write_document_local], + tools=[write_document], ) return AgentFrameworkAgent( @@ -62,7 +62,6 @@ def document_writer_agent(chat_client: ChatClientProtocol) -> AgentFrameworkAgen "document": {"type": "string", "description": "The current document content"}, }, predict_state_config={ - "document": {"tool": "write_document_local", "tool_argument": "document"}, + "document": {"tool": "write_document", "tool_argument": "document"}, }, - confirmation_strategy=DocumentWriterConfirmationStrategy(), ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py index 05c42efb30..2d38e612aa 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/recipe_agent.py @@ -6,7 +6,7 @@ from typing import Any from agent_framework import ChatAgent, ChatClientProtocol, ai_function -from agent_framework.ag_ui import AgentFrameworkAgent, RecipeConfirmationStrategy +from agent_framework.ag_ui import AgentFrameworkAgent from pydantic import BaseModel, Field @@ -128,6 +128,5 @@ def recipe_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAgent: predict_state_config={ "recipe": {"tool": "update_recipe", "tool_argument": "recipe"}, }, - confirmation_strategy=RecipeConfirmationStrategy(), require_confirmation=False, ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py index c79c36f511..442c9e6182 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_planner_agent.py @@ -5,7 +5,7 @@ from typing import Any from agent_framework import ChatAgent, ChatClientProtocol, ai_function -from agent_framework.ag_ui import AgentFrameworkAgent, TaskPlannerConfirmationStrategy +from agent_framework.ag_ui import AgentFrameworkAgent @ai_function(approval_mode="always_require") @@ -81,5 +81,4 @@ def task_planner_agent(chat_client: ChatClientProtocol[Any]) -> AgentFrameworkAg agent=agent, name="TaskPlanner", description="Plans and executes tasks with user approval", - confirmation_strategy=TaskPlannerConfirmationStrategy(), ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py index 572df2720b..9a4acf4319 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py @@ -18,7 +18,7 @@ TextMessageStartEvent, ToolCallStartEvent, ) -from agent_framework import ChatAgent, ChatClientProtocol, ai_function +from agent_framework import ChatAgent, ChatClientProtocol, ChatMessage, Content, ai_function from agent_framework.ag_ui import AgentFrameworkAgent from pydantic import BaseModel, Field @@ -221,7 +221,6 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non chat_client = chat_agent.chat_client # type: ignore # Build messages for summary call - from agent_framework._types import ChatMessage, TextContent original_messages = input_data.get("messages", []) @@ -234,7 +233,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non messages.append( ChatMessage( role=msg.get("role", "user"), - contents=[TextContent(text=content_str)], + contents=[Content.from_text(text=content_str)], ) ) elif isinstance(msg, ChatMessage): @@ -245,7 +244,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non ChatMessage( role="user", contents=[ - TextContent( + Content.from_text( text="The steps have been successfully executed. Provide a brief one-sentence summary." ) ], diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py index 32324d72eb..5ebdc10d73 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/weather_agent.py @@ -71,7 +71,8 @@ def weather_agent(chat_client: ChatClientProtocol[Any]) -> ChatAgent[Any]: instructions=( "You are a helpful weather assistant. " "Use the get_weather and get_forecast functions to help users with weather information. " - "Always provide friendly and informative responses." + "Always provide friendly and informative responses. " + "First return the weather result, and then return details about the forecast." ), chat_client=chat_client, tools=[get_weather, get_forecast], diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py index e71abe7507..7369c84679 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/server/main.py @@ -4,10 +4,12 @@ import logging import os -from typing import TYPE_CHECKING import uvicorn +from agent_framework import ChatOptions +from agent_framework._clients import BaseChatClient from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint +from agent_framework.anthropic import AnthropicClient from agent_framework.azure import AzureOpenAIChatClient from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware @@ -20,10 +22,6 @@ from ..agents.ui_generator_agent import ui_generator_agent from ..agents.weather_agent import weather_agent -if TYPE_CHECKING: - from agent_framework import ChatOptions - from agent_framework._clients import BaseChatClient - # Configure logging to file and console (disabled by default - set ENABLE_DEBUG_LOGGING=1 to enable) if os.getenv("ENABLE_DEBUG_LOGGING"): log_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "ag_ui_server.log") @@ -65,7 +63,10 @@ # Create a shared chat client for all agents # You can use different chat clients for different agents if needed -chat_client: BaseChatClient[ChatOptions] = AzureOpenAIChatClient() +# Set CHAT_CLIENT=anthropic to use Anthropic, defaults to Azure OpenAI +chat_client: BaseChatClient[ChatOptions] = ( + AnthropicClient() if os.getenv("CHAT_CLIENT", "").lower() == "anthropic" else AzureOpenAIChatClient() +) # Agentic Chat - basic chat agent add_agent_framework_fastapi_endpoint( diff --git a/python/packages/ag-ui/getting_started/client.py b/python/packages/ag-ui/getting_started/client.py index 61bdf0bfb3..7b56103050 100644 --- a/python/packages/ag-ui/getting_started/client.py +++ b/python/packages/ag-ui/getting_started/client.py @@ -50,11 +50,9 @@ async def main(): print("\nAssistant: ", end="", flush=True) # Display text content as it streams - from agent_framework import TextContent - for content in update.contents: - if isinstance(content, TextContent) and content.text: - print(f"\033[96m{content.text}\033[0m", end="", flush=True) + if hasattr(content, "text") and content.text: # type: ignore[attr-defined] + print(f"\033[96m{content.text}\033[0m", end="", flush=True) # type: ignore[attr-defined] # Display finish reason if present if update.finish_reason: diff --git a/python/packages/ag-ui/getting_started/client_advanced.py b/python/packages/ag-ui/getting_started/client_advanced.py index 08698a80a0..3c7ae6a334 100644 --- a/python/packages/ag-ui/getting_started/client_advanced.py +++ b/python/packages/ag-ui/getting_started/client_advanced.py @@ -73,11 +73,9 @@ async def streaming_example(client: AGUIChatClient, thread_id: str | None = None if not thread_id and update.additional_properties: thread_id = update.additional_properties.get("thread_id") - from agent_framework import TextContent - for content in update.contents: - if isinstance(content, TextContent) and content.text: - print(content.text, end="", flush=True) + if content.type == "text" and content.text: # type: ignore[attr-defined] + print(content.text, end="", flush=True) # type: ignore[attr-defined] print("\n") return thread_id @@ -138,13 +136,11 @@ async def tool_example(client: AGUIChatClient, thread_id: str | None = None): print(f"Assistant: {response.text}") # Show tool calls if any - from agent_framework import FunctionCallContent - tool_called = False for message in response.messages: for content in message.contents: - if isinstance(content, FunctionCallContent): - print(f"\n[Tool Called: {content.name}]") + if content.type == "function_call": # type: ignore[attr-defined] + print(f"\n[Tool Called: {content.name}]") # type: ignore[attr-defined] tool_called = True if not tool_called: @@ -176,7 +172,7 @@ async def conversation_example(client: AGUIChatClient): # Second turn - using same thread print("\nUser: What's my name?\n") - response2 = await client.get_response("What's my name?", metadata={"thread_id": thread_id}) + response2 = await client.get_response("What's my name?", options={"metadata": {"thread_id": thread_id}}) print(f"Assistant: {response2.text}") # Check if context was maintained @@ -186,7 +182,7 @@ async def conversation_example(client: AGUIChatClient): # Third turn print("\nUser: Can you also tell me what 10 * 5 is?\n") response3 = await client.get_response( - "Can you also tell me what 10 * 5 is?", metadata={"thread_id": thread_id}, tools=[calculate] + "Can you also tell me what 10 * 5 is?", options={"metadata": {"thread_id": thread_id}}, tools=[calculate] ) print(f"Assistant: {response3.text}") diff --git a/python/packages/ag-ui/getting_started/client_with_agent.py b/python/packages/ag-ui/getting_started/client_with_agent.py index 91b099820b..63a89b4344 100644 --- a/python/packages/ag-ui/getting_started/client_with_agent.py +++ b/python/packages/ag-ui/getting_started/client_with_agent.py @@ -22,7 +22,7 @@ import logging import os -from agent_framework import ChatAgent, FunctionCallContent, FunctionResultContent, TextContent, ai_function +from agent_framework import ChatAgent, ai_function from agent_framework.ag_ui import AGUIChatClient # Enable debug logging @@ -141,8 +141,9 @@ def _preview_for_message(m) -> str: # Build from contents when no direct text parts: list[str] = [] for c in getattr(m, "contents", []) or []: - if isinstance(c, FunctionCallContent): - args = c.arguments + content_type = getattr(c, "type", None) + if content_type == "function_call": + args = getattr(c, "arguments", None) if isinstance(args, dict): try: import json as _json @@ -152,12 +153,15 @@ def _preview_for_message(m) -> str: args_str = str(args) else: args_str = str(args or "{}") - parts.append(f"tool_call {c.name} {args_str}") - elif isinstance(c, FunctionResultContent): - parts.append(f"tool_result[{c.call_id}]: {str(c.result)[:40]}") - elif isinstance(c, TextContent): - if c.text: - parts.append(c.text) + parts.append(f"tool_call {getattr(c, 'name', '?')} {args_str}") + elif content_type == "function_result": + call_id = getattr(c, "call_id", "?") + result = getattr(c, "result", None) + parts.append(f"tool_result[{call_id}]: {str(result)[:40]}") + elif content_type == "text": + text = getattr(c, "text", None) + if text: + parts.append(text) else: typename = getattr(c, "type", c.__class__.__name__) parts.append(f"<{typename}>") diff --git a/python/packages/ag-ui/pyproject.toml b/python/packages/ag-ui/pyproject.toml index 21fe4f234b..3b0e09f6dc 100644 --- a/python/packages/ag-ui/pyproject.toml +++ b/python/packages/ag-ui/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "agent-framework-ag-ui" -version = "1.0.0b260114" +version = "1.0.0b260116" description = "AG-UI protocol integration for Agent Framework" readme = "README.md" license-files = ["LICENSE"] diff --git a/python/packages/ag-ui/tests/test_ag_ui_client.py b/python/packages/ag-ui/tests/test_ag_ui_client.py index bc1cc6d711..b05810972e 100644 --- a/python/packages/ag-ui/tests/test_ag_ui_client.py +++ b/python/packages/ag-ui/tests/test_ag_ui_client.py @@ -11,14 +11,13 @@ ChatOptions, ChatResponse, ChatResponseUpdate, - FunctionCallContent, + Content, Role, - TextContent, ai_function, ) from pytest import MonkeyPatch -from agent_framework_ag_ui._client import AGUIChatClient, ServerFunctionCallContent +from agent_framework_ag_ui._client import AGUIChatClient from agent_framework_ag_ui._http_service import AGUIHttpService @@ -96,13 +95,11 @@ async def test_extract_state_from_messages_with_state(self) -> None: state_json = json.dumps(state_data) state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8") - from agent_framework import DataContent - messages = [ ChatMessage(role="user", text="Hello"), ChatMessage( role="user", - contents=[DataContent(uri=f"data:application/json;base64,{state_b64}")], + contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")], ), ] @@ -121,12 +118,10 @@ async def test_extract_state_invalid_json(self) -> None: invalid_json = "not valid json" state_b64 = base64.b64encode(invalid_json.encode("utf-8")).decode("utf-8") - from agent_framework import DataContent - messages = [ ChatMessage( role="user", - contents=[DataContent(uri=f"data:application/json;base64,{state_b64}")], + contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")], ), ] @@ -200,8 +195,8 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str first_content = updates[1].contents[0] second_content = updates[2].contents[0] - assert isinstance(first_content, TextContent) - assert isinstance(second_content, TextContent) + assert first_content.type == "text" + assert second_content.type == "text" assert first_content.text == "Hello" assert second_content.text == " world" @@ -294,13 +289,12 @@ async def mock_post_run(*args: object, **kwargs: Any) -> AsyncGenerator[dict[str updates.append(update) function_calls = [ - content for update in updates for content in update.contents if isinstance(content, FunctionCallContent) + content for update in updates for content in update.contents if content.type == "function_call" ] assert function_calls assert function_calls[0].name == "get_time_zone" - assert not any( - isinstance(content, ServerFunctionCallContent) for update in updates for content in update.contents - ) + + assert not any(content.type == "server_function_call" for update in updates for content in update.contents) async def test_server_tool_calls_not_executed_locally(self, monkeypatch: MonkeyPatch) -> None: """Server tools should not trigger local function invocation even when client tools exist.""" @@ -343,13 +337,11 @@ async def test_state_transmission(self, monkeypatch: MonkeyPatch) -> None: state_json = json.dumps(state_data) state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8") - from agent_framework import DataContent - messages = [ ChatMessage(role="user", text="Hello"), ChatMessage( role="user", - contents=[DataContent(uri=f"data:application/json;base64,{state_b64}")], + contents=[Content.from_uri(uri=f"data:application/json;base64,{state_b64}")], ), ] diff --git a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py index f919c00a56..8acd56a094 100644 --- a/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py +++ b/python/packages/ag-ui/tests/test_agent_wrapper_comprehensive.py @@ -9,7 +9,7 @@ from typing import Any import pytest -from agent_framework import ChatAgent, ChatMessage, ChatOptions, ChatResponseUpdate, TextContent +from agent_framework import ChatAgent, ChatMessage, ChatOptions, ChatResponseUpdate, Content from pydantic import BaseModel sys.path.insert(0, str(Path(__file__).parent)) @@ -23,7 +23,7 @@ async def test_agent_initialization_basic(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent[ChatOptions]( chat_client=StreamingChatClientStub(stream_fn), @@ -45,7 +45,7 @@ async def test_agent_initialization_with_state_schema(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"document": {"type": "string"}} @@ -61,7 +61,7 @@ async def test_agent_initialization_with_predict_state_config(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) predict_config = {"document": {"tool": "write_doc", "tool_argument": "content"}} @@ -77,7 +77,7 @@ async def test_agent_initialization_with_pydantic_state_schema(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) class MyState(BaseModel): document: str @@ -100,7 +100,7 @@ async def test_run_started_event_emission(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -124,7 +124,7 @@ async def test_predict_state_custom_event_emission(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) predict_config = { @@ -156,7 +156,7 @@ async def test_initial_state_snapshot_with_schema(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) state_schema = {"document": {"type": "string"}} @@ -186,7 +186,7 @@ async def test_state_initialization_object_type(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"recipe": {"type": "object", "properties": {}}} @@ -213,7 +213,7 @@ async def test_state_initialization_array_type(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) state_schema: dict[str, dict[str, Any]] = {"steps": {"type": "array", "items": {}}} @@ -240,7 +240,7 @@ async def test_run_finished_event_emission(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -262,7 +262,7 @@ async def test_tool_result_confirm_changes_accepted(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Document updated")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Document updated")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent( @@ -309,7 +309,7 @@ async def test_tool_result_confirm_changes_rejected(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="OK")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -343,7 +343,7 @@ async def test_tool_result_function_approval_accepted(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="OK")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -389,7 +389,7 @@ async def test_tool_result_function_approval_rejected(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="OK")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="OK")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -420,18 +420,26 @@ async def stream_fn( async def test_thread_metadata_tracking(): - """Test that thread metadata includes ag_ui_thread_id and ag_ui_run_id.""" + """Test that thread metadata includes ag_ui_thread_id and ag_ui_run_id. + + AG-UI internal metadata is stored in thread.metadata for orchestration, + but filtered out before passing to the chat client's options.metadata. + """ from agent_framework.ag_ui import AgentFrameworkAgent - thread_metadata: dict[str, Any] = {} + captured_thread: dict[str, Any] = {} + captured_options: dict[str, Any] = {} async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - metadata = options.get("metadata") - if metadata: - thread_metadata.update(metadata) - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + # Capture the thread object from kwargs + thread = kwargs.get("thread") + if thread and hasattr(thread, "metadata"): + captured_thread["metadata"] = thread.metadata + # Capture options to verify internal keys are NOT passed to chat client + captured_options.update(options) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -446,23 +454,38 @@ async def stream_fn( async for event in wrapper.run_agent(input_data): events.append(event) + # AG-UI internal metadata should be stored in thread.metadata + thread_metadata = captured_thread.get("metadata", {}) assert thread_metadata.get("ag_ui_thread_id") == "test_thread_123" assert thread_metadata.get("ag_ui_run_id") == "test_run_456" + # Internal metadata should NOT be passed to chat client options + options_metadata = captured_options.get("metadata", {}) + assert "ag_ui_thread_id" not in options_metadata + assert "ag_ui_run_id" not in options_metadata + async def test_state_context_injection(): - """Test that current state is injected into thread metadata.""" + """Test that current state is injected into thread metadata. + + AG-UI internal metadata (including current_state) is stored in thread.metadata + for orchestration, but filtered out before passing to the chat client's options.metadata. + """ from agent_framework_ag_ui import AgentFrameworkAgent - thread_metadata: dict[str, Any] = {} + captured_thread: dict[str, Any] = {} + captured_options: dict[str, Any] = {} async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - metadata = options.get("metadata") - if metadata: - thread_metadata.update(metadata) - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + # Capture the thread object from kwargs + thread = kwargs.get("thread") + if thread and hasattr(thread, "metadata"): + captured_thread["metadata"] = thread.metadata + # Capture options to verify internal keys are NOT passed to chat client + captured_options.update(options) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent( @@ -479,11 +502,17 @@ async def stream_fn( async for event in wrapper.run_agent(input_data): events.append(event) + # Current state should be stored in thread.metadata + thread_metadata = captured_thread.get("metadata", {}) current_state = thread_metadata.get("current_state") if isinstance(current_state, str): current_state = json.loads(current_state) assert current_state == {"document": "Test content"} + # Internal metadata should NOT be passed to chat client options + options_metadata = captured_options.get("metadata", {}) + assert "current_state" not in options_metadata + async def test_no_messages_provided(): """Test handling when no messages are provided.""" @@ -492,7 +521,7 @@ async def test_no_messages_provided(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -516,7 +545,7 @@ async def test_message_end_event_emission(): async def stream_fn( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Hello world")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Hello world")]) agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) wrapper = AgentFrameworkAgent(agent=agent) @@ -595,48 +624,6 @@ async def stream_fn( assert len(tool_events) == 0 -async def test_suppressed_summary_with_document_state(): - """Test suppressed summary uses document state for confirmation message.""" - from agent_framework.ag_ui import AgentFrameworkAgent, DocumentWriterConfirmationStrategy - - async def stream_fn( - messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any - ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="Response")]) - - agent = ChatAgent(name="test_agent", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) - wrapper = AgentFrameworkAgent( - agent=agent, - state_schema={"document": {"type": "string"}}, - predict_state_config={"document": {"tool": "write_doc", "tool_argument": "content"}}, - confirmation_strategy=DocumentWriterConfirmationStrategy(), - ) - - # Simulate confirmation with document state - tool_result: dict[str, Any] = {"accepted": True, "steps": []} - input_data: dict[str, Any] = { - "messages": [ - { - "role": "tool", - "content": json.dumps(tool_result), - "toolCallId": "confirm_123", - } - ], - "state": {"document": "This is the beginning of a document. It contains important information."}, - } - - events: list[Any] = [] - async for event in wrapper.run_agent(input_data): - events.append(event) - - # Should generate fallback summary from document state - text_events = [e for e in events if e.type == "TEXT_MESSAGE_CONTENT"] - assert len(text_events) > 0 - # Should contain some reference to the document - full_text = "".join(e.delta for e in text_events) - assert "written" in full_text.lower() or "document" in full_text.lower() - - async def test_agent_with_use_service_thread_is_false(): """Test that when use_service_thread is False, the AgentThread used to run the agent is NOT set to the service thread ID.""" from agent_framework.ag_ui import AgentFrameworkAgent @@ -650,7 +637,7 @@ async def stream_fn( thread = kwargs.get("thread") request_service_thread_id = thread.service_thread_id if thread else None yield ChatResponseUpdate( - contents=[TextContent(text="Response")], response_id="resp_67890", conversation_id="conv_12345" + contents=[Content.from_text(text="Response")], response_id="resp_67890", conversation_id="conv_12345" ) agent = ChatAgent(chat_client=StreamingChatClientStub(stream_fn)) @@ -677,7 +664,7 @@ async def stream_fn( thread = kwargs.get("thread") request_service_thread_id = thread.service_thread_id if thread else None yield ChatResponseUpdate( - contents=[TextContent(text="Response")], response_id="resp_67890", conversation_id="conv_12345" + contents=[Content.from_text(text="Response")], response_id="resp_67890", conversation_id="conv_12345" ) agent = ChatAgent(chat_client=StreamingChatClientStub(stream_fn)) @@ -693,7 +680,7 @@ async def stream_fn( async def test_function_approval_mode_executes_tool(): """Test that function approval with approval_mode='always_require' sends the correct messages.""" - from agent_framework import FunctionResultContent, ai_function + from agent_framework import ai_function from agent_framework.ag_ui import AgentFrameworkAgent messages_received: list[Any] = [] @@ -712,7 +699,7 @@ async def stream_fn( # Capture the messages received by the chat client messages_received.clear() messages_received.extend(messages) - yield ChatResponseUpdate(contents=[TextContent(text="Processing completed")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Processing completed")]) agent = ChatAgent( chat_client=StreamingChatClientStub(stream_fn), @@ -770,7 +757,7 @@ async def stream_fn( tool_result_found = False for msg in messages_received: for content in msg.contents: - if isinstance(content, FunctionResultContent): + if content.type == "function_result": tool_result_found = True assert content.call_id == "call_get_datetime_123" assert content.result == "2025/12/01 12:00:00" @@ -784,7 +771,7 @@ async def stream_fn( async def test_function_approval_mode_rejection(): """Test that function approval rejection creates a rejection response.""" - from agent_framework import FunctionResultContent, ai_function + from agent_framework import ai_function from agent_framework.ag_ui import AgentFrameworkAgent messages_received: list[Any] = [] @@ -803,7 +790,7 @@ async def stream_fn( # Capture the messages received by the chat client messages_received.clear() messages_received.extend(messages) - yield ChatResponseUpdate(contents=[TextContent(text="Operation cancelled")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="Operation cancelled")]) agent = ChatAgent( name="test_agent", @@ -855,7 +842,7 @@ async def stream_fn( rejection_found = False for msg in messages_received: for content in msg.contents: - if isinstance(content, FunctionResultContent): + if content.type == "function_result": rejection_found = True assert content.call_id == "call_delete_123" assert content.result == "Error: Tool call invocation was rejected by user." diff --git a/python/packages/ag-ui/tests/test_backend_tool_rendering.py b/python/packages/ag-ui/tests/test_backend_tool_rendering.py deleted file mode 100644 index 446da23ff2..0000000000 --- a/python/packages/ag-ui/tests/test_backend_tool_rendering.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Tests for backend tool rendering.""" - -from typing import cast - -from ag_ui.core import ( - TextMessageContentEvent, - TextMessageStartEvent, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallResultEvent, - ToolCallStartEvent, -) -from agent_framework import AgentResponseUpdate, FunctionCallContent, FunctionResultContent, TextContent - -from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - -async def test_tool_call_flow(): - """Test complete tool call flow: call -> args -> end -> result.""" - bridge = AgentFrameworkEventBridge(run_id="test-run", thread_id="test-thread") - - # Step 1: Tool call starts - tool_call = FunctionCallContent( - call_id="weather-123", - name="get_weather", - arguments={"location": "Seattle"}, - ) - - update1 = AgentResponseUpdate(contents=[tool_call]) - events1 = await bridge.from_agent_run_update(update1) - - # Should have: ToolCallStartEvent, ToolCallArgsEvent - assert len(events1) == 2 - assert isinstance(events1[0], ToolCallStartEvent) - assert isinstance(events1[1], ToolCallArgsEvent) - - start_event = events1[0] - assert start_event.tool_call_id == "weather-123" - assert start_event.tool_call_name == "get_weather" - - args_event = events1[1] - assert "Seattle" in args_event.delta - - # Step 2: Tool result comes back - tool_result = FunctionResultContent( - call_id="weather-123", - result="Weather in Seattle: Rainy, 52°F", - ) - - update2 = AgentResponseUpdate(contents=[tool_result]) - events2 = await bridge.from_agent_run_update(update2) - - # Should have: ToolCallEndEvent, ToolCallResultEvent - assert len(events2) == 2 - assert isinstance(events2[0], ToolCallEndEvent) - assert isinstance(events2[1], ToolCallResultEvent) - - end_event = events2[0] - assert end_event.tool_call_id == "weather-123" - - result_event = events2[1] - assert result_event.tool_call_id == "weather-123" - assert "Seattle" in result_event.content - assert "Rainy" in result_event.content - - -async def test_text_with_tool_call(): - """Test agent response with both text and tool calls.""" - bridge = AgentFrameworkEventBridge(run_id="test-run", thread_id="test-thread") - - # Agent says something then calls a tool - text_content = TextContent(text="Let me check the weather for you.") - tool_call = FunctionCallContent( - call_id="weather-456", - name="get_forecast", - arguments={"location": "San Francisco", "days": 3}, - ) - - update = AgentResponseUpdate(contents=[text_content, tool_call]) - events = await bridge.from_agent_run_update(update) - - # Should have: TextMessageStart, TextMessageContent, ToolCallStart, ToolCallArgs - assert len(events) == 4 - - assert isinstance(events[0], TextMessageStartEvent) - assert isinstance(events[1], TextMessageContentEvent) - assert isinstance(events[2], ToolCallStartEvent) - assert isinstance(events[3], ToolCallArgsEvent) - - text_event = events[1] - assert "check the weather" in text_event.delta - - tool_start = events[2] - assert tool_start.tool_call_name == "get_forecast" - - -async def test_multiple_tool_results(): - """Test handling multiple tool results in sequence.""" - bridge = AgentFrameworkEventBridge(run_id="test-run", thread_id="test-thread") - - # Multiple tool results - results = [ - FunctionResultContent(call_id="tool-1", result="Result 1"), - FunctionResultContent(call_id="tool-2", result="Result 2"), - FunctionResultContent(call_id="tool-3", result="Result 3"), - ] - - update = AgentResponseUpdate(contents=results) - events = await bridge.from_agent_run_update(update) - - # Should have 3 pairs of ToolCallEndEvent + ToolCallResultEvent = 6 events - assert len(events) == 6 - - # Verify the pattern: End, Result, End, Result, End, Result - for i in range(3): - end_idx = i * 2 - result_idx = i * 2 + 1 - - assert isinstance(events[end_idx], ToolCallEndEvent) - assert isinstance(events[result_idx], ToolCallResultEvent) - - end_event = cast(ToolCallEndEvent, events[end_idx]) - result_event = cast(ToolCallResultEvent, events[result_idx]) - - assert end_event.tool_call_id == f"tool-{i + 1}" - assert result_event.tool_call_id == f"tool-{i + 1}" - assert f"Result {i + 1}" in result_event.content diff --git a/python/packages/ag-ui/tests/test_confirmation_strategies_comprehensive.py b/python/packages/ag-ui/tests/test_confirmation_strategies_comprehensive.py deleted file mode 100644 index ab355d8995..0000000000 --- a/python/packages/ag-ui/tests/test_confirmation_strategies_comprehensive.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Comprehensive tests for all confirmation strategies.""" - -import pytest - -from agent_framework_ag_ui._confirmation_strategies import ( - ConfirmationStrategy, - DefaultConfirmationStrategy, - DocumentWriterConfirmationStrategy, - RecipeConfirmationStrategy, - TaskPlannerConfirmationStrategy, -) - - -@pytest.fixture -def sample_steps() -> list[dict[str, str]]: - """Sample steps for testing approval messages.""" - return [ - {"description": "Step 1: Do something", "status": "enabled"}, - {"description": "Step 2: Do another thing", "status": "enabled"}, - {"description": "Step 3: Disabled step", "status": "disabled"}, - ] - - -@pytest.fixture -def all_enabled_steps() -> list[dict[str, str]]: - """All steps enabled.""" - return [ - {"description": "Task A", "status": "enabled"}, - {"description": "Task B", "status": "enabled"}, - {"description": "Task C", "status": "enabled"}, - ] - - -@pytest.fixture -def empty_steps() -> list[dict[str, str]]: - """Empty steps list.""" - return [] - - -class TestDefaultConfirmationStrategy: - """Tests for DefaultConfirmationStrategy.""" - - def test_on_approval_accepted_with_enabled_steps(self, sample_steps: list[dict[str, str]]) -> None: - strategy = DefaultConfirmationStrategy() - message = strategy.on_approval_accepted(sample_steps) - - assert "Executing 2 approved steps" in message - assert "Step 1: Do something" in message - assert "Step 2: Do another thing" in message - assert "Step 3" not in message # Disabled step shouldn't appear - assert "All steps completed successfully!" in message - - def test_on_approval_accepted_with_all_enabled(self, all_enabled_steps: list[dict[str, str]]) -> None: - strategy = DefaultConfirmationStrategy() - message = strategy.on_approval_accepted(all_enabled_steps) - - assert "Executing 3 approved steps" in message - assert "Task A" in message - assert "Task B" in message - assert "Task C" in message - - def test_on_approval_accepted_with_empty_steps(self, empty_steps: list[dict[str, str]]) -> None: - strategy = DefaultConfirmationStrategy() - message = strategy.on_approval_accepted(empty_steps) - - assert "Executing 0 approved steps" in message - assert "All steps completed successfully!" in message - - def test_on_approval_rejected(self, sample_steps: list[dict[str, str]]) -> None: - strategy = DefaultConfirmationStrategy() - message = strategy.on_approval_rejected(sample_steps) - - assert "No problem!" in message - assert "What would you like me to change" in message - - def test_on_state_confirmed(self) -> None: - strategy = DefaultConfirmationStrategy() - message = strategy.on_state_confirmed() - - assert "Changes confirmed" in message - assert "successfully" in message - - def test_on_state_rejected(self) -> None: - strategy = DefaultConfirmationStrategy() - message = strategy.on_state_rejected() - - assert "No problem!" in message - assert "What would you like me to change" in message - - -class TestTaskPlannerConfirmationStrategy: - """Tests for TaskPlannerConfirmationStrategy.""" - - def test_on_approval_accepted_with_enabled_steps(self, sample_steps: list[dict[str, str]]) -> None: - strategy = TaskPlannerConfirmationStrategy() - message = strategy.on_approval_accepted(sample_steps) - - assert "Executing your requested tasks" in message - assert "1. Step 1: Do something" in message - assert "2. Step 2: Do another thing" in message - assert "Step 3" not in message - assert "All tasks completed successfully!" in message - - def test_on_approval_accepted_with_all_enabled(self, all_enabled_steps: list[dict[str, str]]) -> None: - strategy = TaskPlannerConfirmationStrategy() - message = strategy.on_approval_accepted(all_enabled_steps) - - assert "Executing your requested tasks" in message - assert "1. Task A" in message - assert "2. Task B" in message - assert "3. Task C" in message - - def test_on_approval_accepted_with_empty_steps(self, empty_steps: list[dict[str, str]]) -> None: - strategy = TaskPlannerConfirmationStrategy() - message = strategy.on_approval_accepted(empty_steps) - - assert "Executing your requested tasks" in message - assert "All tasks completed successfully!" in message - - def test_on_approval_rejected(self, sample_steps: list[dict[str, str]]) -> None: - strategy = TaskPlannerConfirmationStrategy() - message = strategy.on_approval_rejected(sample_steps) - - assert "No problem!" in message - assert "revise the plan" in message - - def test_on_state_confirmed(self) -> None: - strategy = TaskPlannerConfirmationStrategy() - message = strategy.on_state_confirmed() - - assert "Tasks confirmed" in message - assert "ready to execute" in message - - def test_on_state_rejected(self) -> None: - strategy = TaskPlannerConfirmationStrategy() - message = strategy.on_state_rejected() - - assert "No problem!" in message - assert "adjust the task list" in message - - -class TestRecipeConfirmationStrategy: - """Tests for RecipeConfirmationStrategy.""" - - def test_on_approval_accepted_with_enabled_steps(self, sample_steps: list[dict[str, str]]) -> None: - strategy = RecipeConfirmationStrategy() - message = strategy.on_approval_accepted(sample_steps) - - assert "Updating your recipe" in message - assert "1. Step 1: Do something" in message - assert "2. Step 2: Do another thing" in message - assert "Step 3" not in message - assert "Recipe updated successfully!" in message - - def test_on_approval_accepted_with_all_enabled(self, all_enabled_steps: list[dict[str, str]]) -> None: - strategy = RecipeConfirmationStrategy() - message = strategy.on_approval_accepted(all_enabled_steps) - - assert "Updating your recipe" in message - assert "1. Task A" in message - assert "2. Task B" in message - assert "3. Task C" in message - - def test_on_approval_accepted_with_empty_steps(self, empty_steps: list[dict[str, str]]) -> None: - strategy = RecipeConfirmationStrategy() - message = strategy.on_approval_accepted(empty_steps) - - assert "Updating your recipe" in message - assert "Recipe updated successfully!" in message - - def test_on_approval_rejected(self, sample_steps: list[dict[str, str]]) -> None: - strategy = RecipeConfirmationStrategy() - message = strategy.on_approval_rejected(sample_steps) - - assert "No problem!" in message - assert "ingredients or steps" in message - - def test_on_state_confirmed(self) -> None: - strategy = RecipeConfirmationStrategy() - message = strategy.on_state_confirmed() - - assert "Recipe changes applied" in message - assert "successfully" in message - - def test_on_state_rejected(self) -> None: - strategy = RecipeConfirmationStrategy() - message = strategy.on_state_rejected() - - assert "No problem!" in message - assert "adjust in the recipe" in message - - -class TestDocumentWriterConfirmationStrategy: - """Tests for DocumentWriterConfirmationStrategy.""" - - def test_on_approval_accepted_with_enabled_steps(self, sample_steps: list[dict[str, str]]) -> None: - strategy = DocumentWriterConfirmationStrategy() - message = strategy.on_approval_accepted(sample_steps) - - assert "Applying your edits" in message - assert "1. Step 1: Do something" in message - assert "2. Step 2: Do another thing" in message - assert "Step 3" not in message - assert "Document updated successfully!" in message - - def test_on_approval_accepted_with_all_enabled(self, all_enabled_steps: list[dict[str, str]]) -> None: - strategy = DocumentWriterConfirmationStrategy() - message = strategy.on_approval_accepted(all_enabled_steps) - - assert "Applying your edits" in message - assert "1. Task A" in message - assert "2. Task B" in message - assert "3. Task C" in message - - def test_on_approval_accepted_with_empty_steps(self, empty_steps: list[dict[str, str]]) -> None: - strategy = DocumentWriterConfirmationStrategy() - message = strategy.on_approval_accepted(empty_steps) - - assert "Applying your edits" in message - assert "Document updated successfully!" in message - - def test_on_approval_rejected(self, sample_steps: list[dict[str, str]]) -> None: - strategy = DocumentWriterConfirmationStrategy() - message = strategy.on_approval_rejected(sample_steps) - - assert "No problem!" in message - assert "keep or modify" in message - - def test_on_state_confirmed(self) -> None: - strategy = DocumentWriterConfirmationStrategy() - message = strategy.on_state_confirmed() - - assert "Document edits applied!" in message - - def test_on_state_rejected(self) -> None: - strategy = DocumentWriterConfirmationStrategy() - message = strategy.on_state_rejected() - - assert "No problem!" in message - assert "change about the document" in message - - -class TestConfirmationStrategyInterface: - """Tests for ConfirmationStrategy abstract base class.""" - - def test_cannot_instantiate_abstract_class(self): - """Verify ConfirmationStrategy is abstract and cannot be instantiated.""" - with pytest.raises(TypeError): - ConfirmationStrategy() # type: ignore - - def test_all_strategies_implement_interface(self): - """Verify all concrete strategies implement the full interface.""" - strategies = [ - DefaultConfirmationStrategy(), - TaskPlannerConfirmationStrategy(), - RecipeConfirmationStrategy(), - DocumentWriterConfirmationStrategy(), - ] - - sample_steps = [{"description": "Test", "status": "enabled"}] - - for strategy in strategies: - # All should have these methods - assert callable(strategy.on_approval_accepted) - assert callable(strategy.on_approval_rejected) - assert callable(strategy.on_state_confirmed) - assert callable(strategy.on_state_rejected) - - # All should return strings - assert isinstance(strategy.on_approval_accepted(sample_steps), str) - assert isinstance(strategy.on_approval_rejected(sample_steps), str) - assert isinstance(strategy.on_state_confirmed(), str) - assert isinstance(strategy.on_state_rejected(), str) diff --git a/python/packages/ag-ui/tests/test_document_writer_flow.py b/python/packages/ag-ui/tests/test_document_writer_flow.py deleted file mode 100644 index 2e5cec9f95..0000000000 --- a/python/packages/ag-ui/tests/test_document_writer_flow.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Tests for document writer predictive state flow with confirm_changes.""" - -from ag_ui.core import EventType, StateDeltaEvent, ToolCallArgsEvent, ToolCallEndEvent, ToolCallStartEvent -from agent_framework import AgentResponseUpdate, FunctionCallContent, FunctionResultContent, TextContent - -from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - -async def test_streaming_document_with_state_deltas(): - """Test that streaming tool arguments emit progressive StateDeltaEvents.""" - predict_config = { - "document": {"tool": "write_document_local", "tool_argument": "document"}, - } - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config=predict_config, - ) - - # Simulate streaming tool call - first chunk with name - tool_call_start = FunctionCallContent( - call_id="call_123", - name="write_document_local", - arguments='{"document":"Once', - ) - update1 = AgentResponseUpdate(contents=[tool_call_start]) - events1 = await bridge.from_agent_run_update(update1) - - # Should have ToolCallStartEvent and ToolCallArgsEvent - assert any(e.type == EventType.TOOL_CALL_START for e in events1) - assert any(e.type == EventType.TOOL_CALL_ARGS for e in events1) - - # Second chunk - incomplete JSON, should try partial extraction - tool_call_chunk2 = FunctionCallContent(call_id="call_123", name="write_document_local", arguments=" upon a time") - update2 = AgentResponseUpdate(contents=[tool_call_chunk2]) - events2 = await bridge.from_agent_run_update(update2) - - # Should emit StateDeltaEvent with partial document - state_deltas = [e for e in events2 if isinstance(e, StateDeltaEvent)] - assert len(state_deltas) >= 1 - - # Check JSON Patch format - delta = state_deltas[0] - assert isinstance(delta.delta, list) - assert len(delta.delta) > 0 - assert delta.delta[0]["op"] == "replace" - assert delta.delta[0]["path"] == "/document" - assert "Once upon a time" in delta.delta[0]["value"] - - -async def test_confirm_changes_emission(): - """Test that confirm_changes tool call is emitted after predictive tool completion.""" - predict_config = { - "document": {"tool": "write_document_local", "tool_argument": "document"}, - } - - current_state: dict[str, str] = {} - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config=predict_config, - current_state=current_state, - ) - - # Set current tool name (simulating earlier tool call start) - bridge.current_tool_call_name = "write_document_local" - bridge.pending_state_updates = {"document": "A short story"} - - # Tool result - tool_result = FunctionResultContent( - call_id="call_123", - result="Document written.", - ) - - update = AgentResponseUpdate(contents=[tool_result]) - events = await bridge.from_agent_run_update(update) - - # Should have: ToolCallEndEvent, ToolCallResultEvent, StateSnapshotEvent, confirm_changes sequence - assert any(e.type == EventType.TOOL_CALL_END for e in events) - assert any(e.type == EventType.TOOL_CALL_RESULT for e in events) - assert any(e.type == EventType.STATE_SNAPSHOT for e in events) - - # Check for confirm_changes tool call - confirm_starts = [e for e in events if isinstance(e, ToolCallStartEvent) and e.tool_call_name == "confirm_changes"] - assert len(confirm_starts) == 1 - - confirm_args = [e for e in events if isinstance(e, ToolCallArgsEvent) and e.delta == "{}"] - assert len(confirm_args) >= 1 - - confirm_ends = [e for e in events if isinstance(e, ToolCallEndEvent)] - # At least 2: one for write_document_local, one for confirm_changes - assert len(confirm_ends) >= 2 - - # Check that stop flag is set - assert bridge.should_stop_after_confirm is True - - -async def test_text_suppression_before_confirm(): - """Test that text messages are suppressed when confirm_changes is pending.""" - predict_config = { - "document": {"tool": "write_document_local", "tool_argument": "document"}, - } - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config=predict_config, - ) - - # Set flag indicating we're waiting for confirmation - bridge.should_stop_after_confirm = True - - # Text content that should be suppressed - text = TextContent(text="I have written a story about pirates.") - update = AgentResponseUpdate(contents=[text]) - - events = await bridge.from_agent_run_update(update) - - # Should NOT emit TextMessageContentEvent - text_events = [e for e in events if e.type == EventType.TEXT_MESSAGE_CONTENT] - assert len(text_events) == 0 - - # But should save the text - assert bridge.suppressed_summary == "I have written a story about pirates." - - -async def test_no_confirm_for_non_predictive_tools(): - """Test that confirm_changes is NOT emitted for regular tool calls.""" - predict_config = { - "document": {"tool": "write_document_local", "tool_argument": "document"}, - } - - current_state: dict[str, str] = {} - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config=predict_config, - current_state=current_state, - ) - - # Different tool (not in predict_state_config) - bridge.current_tool_call_name = "get_weather" - - tool_result = FunctionResultContent( - call_id="call_456", - result="Sunny, 72°F", - ) - - update = AgentResponseUpdate(contents=[tool_result]) - events = await bridge.from_agent_run_update(update) - - # Should NOT have confirm_changes - confirm_starts = [e for e in events if isinstance(e, ToolCallStartEvent) and e.tool_call_name == "confirm_changes"] - assert len(confirm_starts) == 0 - - # Stop flag should NOT be set - assert bridge.should_stop_after_confirm is False - - -async def test_state_delta_deduplication(): - """Test that duplicate state values don't emit multiple StateDeltaEvents.""" - predict_config = { - "document": {"tool": "write_document_local", "tool_argument": "document"}, - } - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config=predict_config, - ) - - # First tool call with document - tool_call1 = FunctionCallContent( - call_id="call_1", - name="write_document_local", - arguments='{"document":"Same text"}', - ) - update1 = AgentResponseUpdate(contents=[tool_call1]) - events1 = await bridge.from_agent_run_update(update1) - - # Count state deltas - state_deltas_1 = [e for e in events1 if isinstance(e, StateDeltaEvent)] - assert len(state_deltas_1) >= 1 - - # Second tool call with SAME document (shouldn't emit new delta) - bridge.current_tool_call_name = "write_document_local" - tool_call2 = FunctionCallContent( - call_id="call_2", - name="write_document_local", - arguments='{"document":"Same text"}', # Identical content - ) - update2 = AgentResponseUpdate(contents=[tool_call2]) - events2 = await bridge.from_agent_run_update(update2) - - # Should NOT emit state delta (same value) - state_deltas_2 = [e for e in events2 if e.type == EventType.STATE_DELTA] - assert len(state_deltas_2) == 0 - - -async def test_predict_state_config_multiple_fields(): - """Test predictive state with multiple state fields.""" - predict_config = { - "title": {"tool": "create_post", "tool_argument": "title"}, - "content": {"tool": "create_post", "tool_argument": "body"}, - } - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config=predict_config, - ) - - # Tool call with both fields - tool_call = FunctionCallContent( - call_id="call_999", - name="create_post", - arguments='{"title":"My Post","body":"Post content"}', - ) - update = AgentResponseUpdate(contents=[tool_call]) - events = await bridge.from_agent_run_update(update) - - # Should emit StateDeltaEvent for both fields - state_deltas = [e for e in events if isinstance(e, StateDeltaEvent)] - assert len(state_deltas) >= 2 - - # Check both fields are present - paths = [delta.delta[0]["path"] for delta in state_deltas] - assert "/title" in paths - assert "/content" in paths diff --git a/python/packages/ag-ui/tests/test_endpoint.py b/python/packages/ag-ui/tests/test_endpoint.py index 59cb884c5c..e09bb32fce 100644 --- a/python/packages/ag-ui/tests/test_endpoint.py +++ b/python/packages/ag-ui/tests/test_endpoint.py @@ -6,7 +6,7 @@ import sys from pathlib import Path -from agent_framework import ChatAgent, ChatResponseUpdate, TextContent +from agent_framework import ChatAgent, ChatResponseUpdate, Content from fastapi import FastAPI, Header, HTTPException from fastapi.params import Depends from fastapi.testclient import TestClient @@ -20,7 +20,7 @@ def build_chat_client(response_text: str = "Test response") -> StreamingChatClientStub: """Create a typed chat client stub for endpoint tests.""" - updates = [ChatResponseUpdate(contents=[TextContent(text=response_text)])] + updates = [ChatResponseUpdate(contents=[Content.from_text(text=response_text)])] return StreamingChatClientStub(stream_from_updates(updates)) diff --git a/python/packages/ag-ui/tests/test_events_comprehensive.py b/python/packages/ag-ui/tests/test_events_comprehensive.py deleted file mode 100644 index 295ba00372..0000000000 --- a/python/packages/ag-ui/tests/test_events_comprehensive.py +++ /dev/null @@ -1,822 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Comprehensive tests for AgentFrameworkEventBridge (_events.py).""" - -import json - -from agent_framework import ( - AgentResponseUpdate, - FunctionApprovalRequestContent, - FunctionCallContent, - FunctionResultContent, - TextContent, -) - - -async def test_basic_text_message_conversion(): - """Test basic TextContent to AG-UI events.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate(contents=[TextContent(text="Hello")]) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 2 - assert events[0].type == "TEXT_MESSAGE_START" - assert events[0].role == "assistant" - assert events[1].type == "TEXT_MESSAGE_CONTENT" - assert events[1].delta == "Hello" - - -async def test_text_message_streaming(): - """Test streaming TextContent with multiple chunks.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update1 = AgentResponseUpdate(contents=[TextContent(text="Hello ")]) - update2 = AgentResponseUpdate(contents=[TextContent(text="world")]) - - events1 = await bridge.from_agent_run_update(update1) - events2 = await bridge.from_agent_run_update(update2) - - # First update: START + CONTENT - assert len(events1) == 2 - assert events1[0].type == "TEXT_MESSAGE_START" - assert events1[1].delta == "Hello " - - # Second update: just CONTENT (same message) - assert len(events2) == 1 - assert events2[0].type == "TEXT_MESSAGE_CONTENT" - assert events2[0].delta == "world" - - # Both content events should have same message_id - assert events1[1].message_id == events2[0].message_id - - -async def test_skip_text_content_for_structured_outputs(): - """Test that text content is skipped when skip_text_content=True.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread", skip_text_content=True) - - update = AgentResponseUpdate(contents=[TextContent(text='{"result": "data"}')]) - events = await bridge.from_agent_run_update(update) - - # No events should be emitted - assert len(events) == 0 - - -async def test_skip_text_content_for_empty_text(): - """Test streaming TextContent with empty chunks.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update1 = AgentResponseUpdate(contents=[TextContent(text="Hello ")]) - update2 = AgentResponseUpdate(contents=[TextContent(text="")]) # Empty chunk - update3 = AgentResponseUpdate(contents=[TextContent(text="world")]) - - events1 = await bridge.from_agent_run_update(update1) - events2 = await bridge.from_agent_run_update(update2) - events3 = await bridge.from_agent_run_update(update3) - - # First update: START + CONTENT - assert len(events1) == 2 - assert events1[0].type == "TEXT_MESSAGE_START" - assert events1[1].delta == "Hello " - - # Second update: should skip empty chunk, no events - assert len(events2) == 0 - - # Third update: just CONTENT (same message) - assert len(events3) == 1 - assert events3[0].type == "TEXT_MESSAGE_CONTENT" - assert events3[0].delta == "world" - - # Both content events should have same message_id - assert events1[1].message_id == events3[0].message_id - - -async def test_tool_call_with_name(): - """Test FunctionCallContent with name emits ToolCallStartEvent.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate(contents=[FunctionCallContent(name="search_web", call_id="call_123")]) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 1 - assert events[0].type == "TOOL_CALL_START" - assert events[0].tool_call_name == "search_web" - assert events[0].tool_call_id == "call_123" - - -async def test_tool_call_streaming_args(): - """Test streaming tool call arguments.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - # First chunk: name only - update1 = AgentResponseUpdate(contents=[FunctionCallContent(name="search_web", call_id="call_123")]) - events1 = await bridge.from_agent_run_update(update1) - - # Second chunk: arguments chunk 1 (name can be empty string for continuation) - update2 = AgentResponseUpdate(contents=[FunctionCallContent(name="", call_id="call_123", arguments='{"query": "')]) - events2 = await bridge.from_agent_run_update(update2) - - # Third chunk: arguments chunk 2 - update3 = AgentResponseUpdate(contents=[FunctionCallContent(name="", call_id="call_123", arguments='AI"}')]) - events3 = await bridge.from_agent_run_update(update3) - - # First update: ToolCallStartEvent - assert len(events1) == 1 - assert events1[0].type == "TOOL_CALL_START" - - # Second update: ToolCallArgsEvent - assert len(events2) == 1 - assert events2[0].type == "TOOL_CALL_ARGS" - assert events2[0].delta == '{"query": "' - - # Third update: ToolCallArgsEvent - assert len(events3) == 1 - assert events3[0].type == "TOOL_CALL_ARGS" - assert events3[0].delta == 'AI"}' - - # All should have same tool_call_id - assert events1[0].tool_call_id == events2[0].tool_call_id == events3[0].tool_call_id - - -async def test_streaming_tool_call_no_duplicate_start_events(): - """Test that streaming tool calls emit exactly one ToolCallStartEvent. - - This is a regression test for the Anthropic streaming fix where input_json_delta - events were incorrectly passing the tool name, causing duplicate ToolCallStartEvents. - - The correct behavior is: - - Initial FunctionCallContent with name -> emits ToolCallStartEvent - - Subsequent FunctionCallContent with name="" -> emits only ToolCallArgsEvent - - See: https://github.com/microsoft/agent-framework/pull/3051 - """ - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - # Simulate streaming tool call: first chunk has name, subsequent chunks have name="" - update1 = AgentResponseUpdate(contents=[FunctionCallContent(name="get_weather", call_id="call_789")]) - update2 = AgentResponseUpdate(contents=[FunctionCallContent(name="", call_id="call_789", arguments='{"loc":')]) - update3 = AgentResponseUpdate(contents=[FunctionCallContent(name="", call_id="call_789", arguments='"SF"}')]) - - events1 = await bridge.from_agent_run_update(update1) - events2 = await bridge.from_agent_run_update(update2) - events3 = await bridge.from_agent_run_update(update3) - - # Count all ToolCallStartEvents - should be exactly 1 - all_events = events1 + events2 + events3 - tool_call_start_count = sum(1 for e in all_events if e.type == "TOOL_CALL_START") - assert tool_call_start_count == 1, f"Expected 1 ToolCallStartEvent, got {tool_call_start_count}" - - # Verify event types - assert events1[0].type == "TOOL_CALL_START" - assert events2[0].type == "TOOL_CALL_ARGS" - assert events3[0].type == "TOOL_CALL_ARGS" - - -async def test_tool_result_with_dict(): - """Test FunctionResultContent with dict result.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - result_data = {"status": "success", "count": 42} - update = AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_123", result=result_data)]) - events = await bridge.from_agent_run_update(update) - - # Should emit ToolCallEndEvent + ToolCallResultEvent - assert len(events) == 2 - assert events[0].type == "TOOL_CALL_END" - assert events[0].tool_call_id == "call_123" - - assert events[1].type == "TOOL_CALL_RESULT" - assert events[1].tool_call_id == "call_123" - assert events[1].role == "tool" - # Result should be JSON-serialized - assert json.loads(events[1].content) == result_data - - -async def test_tool_result_with_string(): - """Test FunctionResultContent with string result.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_123", result="Search complete")]) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 2 - assert events[0].type == "TOOL_CALL_END" - assert events[1].type == "TOOL_CALL_RESULT" - assert events[1].content == "Search complete" - - -async def test_tool_result_with_none(): - """Test FunctionResultContent with None result.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_123", result=None)]) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 2 - assert events[0].type == "TOOL_CALL_END" - assert events[1].type == "TOOL_CALL_RESULT" - # prepare_function_call_results serializes None as JSON "null" - assert events[1].content == "null" - - -async def test_multiple_tool_results_in_sequence(): - """Test multiple tool results processed sequentially.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate( - contents=[ - FunctionResultContent(call_id="call_1", result="Result 1"), - FunctionResultContent(call_id="call_2", result="Result 2"), - ] - ) - events = await bridge.from_agent_run_update(update) - - # Each result emits: ToolCallEndEvent + ToolCallResultEvent = 4 events total - assert len(events) == 4 - assert events[0].tool_call_id == "call_1" - assert events[1].tool_call_id == "call_1" - assert events[2].tool_call_id == "call_2" - assert events[3].tool_call_id == "call_2" - - -async def test_function_approval_request_basic(): - """Test FunctionApprovalRequestContent conversion.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - # Set require_confirmation=False to test just the function_approval_request event - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - require_confirmation=False, - ) - - func_call = FunctionCallContent( - call_id="call_123", - name="send_email", - arguments={"to": "user@example.com", "subject": "Test"}, - ) - approval = FunctionApprovalRequestContent( - id="approval_001", - function_call=func_call, - ) - - update = AgentResponseUpdate(contents=[approval]) - events = await bridge.from_agent_run_update(update) - - # Should emit: ToolCallEndEvent + CustomEvent - assert len(events) == 2 - - # First: ToolCallEndEvent to close the tool call - assert events[0].type == "TOOL_CALL_END" - assert events[0].tool_call_id == "call_123" - - # Second: CustomEvent with approval details - assert events[1].type == "CUSTOM" - assert events[1].name == "function_approval_request" - assert events[1].value["id"] == "approval_001" - assert events[1].value["function_call"]["name"] == "send_email" - - -async def test_empty_predict_state_config(): - """Test behavior with no predictive state configuration.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={}, # Empty config - ) - - # Tool call with arguments - update = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="write_doc", call_id="call_1", arguments='{"content": "test"}'), - FunctionResultContent(call_id="call_1", result="Done"), - ] - ) - events = await bridge.from_agent_run_update(update) - - # Should NOT emit StateDeltaEvent or confirm_changes - event_types = [e.type for e in events] - assert "STATE_DELTA" not in event_types - assert "STATE_SNAPSHOT" not in event_types - - # Should have: ToolCallStart, ToolCallArgs, ToolCallEnd, ToolCallResult - assert event_types == [ - "TOOL_CALL_START", - "TOOL_CALL_ARGS", - "TOOL_CALL_END", - "TOOL_CALL_RESULT", - ] - - -async def test_tool_not_in_predict_state_config(): - """Test tool that doesn't match any predict_state_config entry.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={ - "document": {"tool": "write_document", "tool_argument": "content"}, - }, - ) - - # Different tool name - update = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="search_web", call_id="call_1", arguments='{"query": "AI"}'), - FunctionResultContent(call_id="call_1", result="Results"), - ] - ) - events = await bridge.from_agent_run_update(update) - - # Should NOT emit StateDeltaEvent or confirm_changes - event_types = [e.type for e in events] - assert "STATE_DELTA" not in event_types - assert "STATE_SNAPSHOT" not in event_types - - -async def test_state_management_tracking(): - """Test current_state and pending_state_updates tracking.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - initial_state = {"document": ""} - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={ - "document": {"tool": "write_doc", "tool_argument": "content"}, - }, - current_state=initial_state, - ) - - # Streaming tool call - update1 = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="write_doc", call_id="call_1"), - FunctionCallContent(name="", call_id="call_1", arguments='{"content": "Hello"}'), - ] - ) - await bridge.from_agent_run_update(update1) - - # Check pending_state_updates was populated - assert "document" in bridge.pending_state_updates - assert bridge.pending_state_updates["document"] == "Hello" - - # Tool result should update current_state - update2 = AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_1", result="Done")]) - await bridge.from_agent_run_update(update2) - - # current_state should be updated - assert bridge.current_state["document"] == "Hello" - - # pending_state_updates should be cleared - assert len(bridge.pending_state_updates) == 0 - - -async def test_wildcard_tool_argument(): - """Test tool_argument='*' uses all arguments as state value.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={ - "recipe": {"tool": "create_recipe", "tool_argument": "*"}, - }, - current_state={}, - ) - - # Complete tool call with dict arguments - update = AgentResponseUpdate( - contents=[ - FunctionCallContent( - name="create_recipe", - call_id="call_1", - arguments={"title": "Pasta", "ingredients": ["pasta", "sauce"]}, - ), - FunctionResultContent(call_id="call_1", result="Created"), - ] - ) - events = await bridge.from_agent_run_update(update) - - # Find StateDeltaEvent - delta_events = [e for e in events if e.type == "STATE_DELTA"] - assert len(delta_events) > 0 - - # Value should be the entire arguments dict - delta = delta_events[0].delta[0] - assert delta["path"] == "/recipe" - assert delta["value"] == {"title": "Pasta", "ingredients": ["pasta", "sauce"]} - - -async def test_run_lifecycle_events(): - """Test RunStartedEvent and RunFinishedEvent creation.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - started = bridge.create_run_started_event() - assert started.type == "RUN_STARTED" - assert started.run_id == "test_run" - assert started.thread_id == "test_thread" - - finished = bridge.create_run_finished_event(result={"status": "complete"}) - assert finished.type == "RUN_FINISHED" - assert finished.run_id == "test_run" - assert finished.thread_id == "test_thread" - assert finished.result == {"status": "complete"} - - -async def test_message_lifecycle_events(): - """Test TextMessageStartEvent and TextMessageEndEvent creation.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - start = bridge.create_message_start_event("msg_123", role="assistant") - assert start.type == "TEXT_MESSAGE_START" - assert start.message_id == "msg_123" - assert start.role == "assistant" - - end = bridge.create_message_end_event("msg_123") - assert end.type == "TEXT_MESSAGE_END" - assert end.message_id == "msg_123" - - -async def test_state_event_creation(): - """Test StateSnapshotEvent and StateDeltaEvent creation helpers.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - # StateSnapshotEvent - snapshot = bridge.create_state_snapshot_event({"document": "content"}) - assert snapshot.type == "STATE_SNAPSHOT" - assert snapshot.snapshot == {"document": "content"} - - # StateDeltaEvent with JSON Patch - delta = bridge.create_state_delta_event([{"op": "replace", "path": "/document", "value": "new content"}]) - assert delta.type == "STATE_DELTA" - assert len(delta.delta) == 1 - assert delta.delta[0]["op"] == "replace" - assert delta.delta[0]["path"] == "/document" - assert delta.delta[0]["value"] == "new content" - - -async def test_state_snapshot_after_tool_result(): - """Test StateSnapshotEvent emission after tool result with pending updates.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={ - "document": {"tool": "write_doc", "tool_argument": "content"}, - }, - current_state={"document": ""}, - ) - - # Tool call with streaming args - update1 = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="write_doc", call_id="call_1"), - FunctionCallContent(name="", call_id="call_1", arguments='{"content": "Test"}'), - ] - ) - await bridge.from_agent_run_update(update1) - - # Tool result should trigger StateSnapshotEvent - update2 = AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_1", result="Done")]) - events = await bridge.from_agent_run_update(update2) - - # Should have: ToolCallEnd, ToolCallResult, StateSnapshot, ToolCallStart (confirm_changes), ToolCallArgs, ToolCallEnd - snapshot_events = [e for e in events if e.type == "STATE_SNAPSHOT"] - assert len(snapshot_events) == 1 - assert snapshot_events[0].snapshot["document"] == "Test" - - -async def test_message_id_persistence_across_chunks(): - """Test that message_id persists across multiple text chunks.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - # First chunk - update1 = AgentResponseUpdate(contents=[TextContent(text="Hello ")]) - events1 = await bridge.from_agent_run_update(update1) - message_id = events1[0].message_id - - # Second chunk - update2 = AgentResponseUpdate(contents=[TextContent(text="world")]) - events2 = await bridge.from_agent_run_update(update2) - - # Should use same message_id - assert events2[0].message_id == message_id - assert bridge.current_message_id == message_id - - -async def test_tool_call_id_tracking(): - """Test tool_call_id tracking across streaming chunks.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - # First chunk with name - update1 = AgentResponseUpdate(contents=[FunctionCallContent(name="search", call_id="call_1")]) - await bridge.from_agent_run_update(update1) - - assert bridge.current_tool_call_id == "call_1" - assert bridge.current_tool_call_name == "search" - - # Second chunk with args but no name - update2 = AgentResponseUpdate(contents=[FunctionCallContent(name="", call_id="call_1", arguments='{"q":"AI"}')]) - events2 = await bridge.from_agent_run_update(update2) - - # Should still track same tool call - assert bridge.current_tool_call_id == "call_1" - assert events2[0].tool_call_id == "call_1" - - -async def test_tool_name_reset_after_result(): - """Test current_tool_call_name is reset after tool result.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={ - "document": {"tool": "write_doc", "tool_argument": "content"}, - }, - ) - - # Tool call - update1 = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="write_doc", call_id="call_1"), - FunctionCallContent(name="", call_id="call_1", arguments='{"content": "Test"}'), - ] - ) - await bridge.from_agent_run_update(update1) - - assert bridge.current_tool_call_name == "write_doc" - - # Tool result with predictive state (should trigger confirm_changes and reset) - update2 = AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_1", result="Done")]) - await bridge.from_agent_run_update(update2) - - # Tool name should be reset - assert bridge.current_tool_call_name is None - - -async def test_function_approval_with_wildcard_argument(): - """Test function approval with wildcard * argument.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={ - "payload": {"tool": "submit", "tool_argument": "*"}, - }, - ) - - approval_content = FunctionApprovalRequestContent( - id="approval_1", - function_call=FunctionCallContent( - name="submit", call_id="call_1", arguments='{"key1": "value1", "key2": "value2"}' - ), - ) - - update = AgentResponseUpdate(contents=[approval_content]) - events = await bridge.from_agent_run_update(update) - - # Should emit StateSnapshotEvent with entire parsed args as value - snapshot_events = [e for e in events if e.type == "STATE_SNAPSHOT"] - assert len(snapshot_events) == 1 - assert snapshot_events[0].snapshot["payload"] == {"key1": "value1", "key2": "value2"} - - -async def test_function_approval_missing_argument(): - """Test function approval when specified argument is not in parsed args.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={ - "data": {"tool": "process", "tool_argument": "missing_field"}, - }, - ) - - approval_content = FunctionApprovalRequestContent( - id="approval_1", - function_call=FunctionCallContent(name="process", call_id="call_1", arguments='{"other_field": "value"}'), - ) - - update = AgentResponseUpdate(contents=[approval_content]) - events = await bridge.from_agent_run_update(update) - - # Should not emit StateSnapshotEvent since argument not found - snapshot_events = [e for e in events if e.type == "STATE_SNAPSHOT"] - assert len(snapshot_events) == 0 - - -async def test_empty_predict_state_config_no_deltas(): - """Test with empty predict_state_config (no predictive updates).""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread", predict_state_config={}) - - # Tool call with arguments - update = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="search", call_id="call_1"), - FunctionCallContent(name="", call_id="call_1", arguments='{"query": "test"}'), - ] - ) - events = await bridge.from_agent_run_update(update) - - # Should not emit any StateDeltaEvents - delta_events = [e for e in events if e.type == "STATE_DELTA"] - assert len(delta_events) == 0 - - -async def test_tool_with_no_matching_config(): - """Test tool call for tool not in predict_state_config.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={"document": {"tool": "write_doc", "tool_argument": "content"}}, - ) - - # Tool call for different tool - update = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="search_web", call_id="call_1"), - FunctionCallContent(name="", call_id="call_1", arguments='{"query": "test"}'), - ] - ) - events = await bridge.from_agent_run_update(update) - - # Should not emit StateDeltaEvents - delta_events = [e for e in events if e.type == "STATE_DELTA"] - assert len(delta_events) == 0 - - -async def test_tool_call_without_name_or_id(): - """Test handling FunctionCallContent with no name and no call_id.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - # This should not crash but log an error - update = AgentResponseUpdate(contents=[FunctionCallContent(name="", call_id="", arguments='{"arg": "val"}')]) - events = await bridge.from_agent_run_update(update) - - # Should emit ToolCallArgsEvent with generated ID - assert len(events) >= 1 - - -async def test_state_delta_count_logging(): - """Test that state delta count increments and logs at intervals.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}, - ) - - # Emit multiple state deltas with different content each time - for i in range(15): - update = AgentResponseUpdate( - contents=[ - FunctionCallContent(name="", call_id="call_1", arguments=f'{{"text": "Content variation {i}"}}'), - ] - ) - # Set the tool name to match config - bridge.current_tool_call_name = "write" - await bridge.from_agent_run_update(update) - - # State delta count should have incremented (one per unique state update) - assert bridge.state_delta_count >= 1 - - -# Tests for list type tool results (MCP tool serialization) - - -async def test_tool_result_with_empty_list(): - """Test FunctionResultContent with empty list result.""" - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_123", result=[])]) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 2 - assert events[0].type == "TOOL_CALL_END" - assert events[1].type == "TOOL_CALL_RESULT" - # Empty list serializes as JSON empty array - assert events[1].content == "[]" - - -async def test_tool_result_with_single_text_content(): - """Test FunctionResultContent with single TextContent-like item (MCP tool result).""" - from dataclasses import dataclass - - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - @dataclass - class MockTextContent: - text: str - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate( - contents=[FunctionResultContent(call_id="call_123", result=[MockTextContent("Hello from MCP tool!")])] - ) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 2 - assert events[0].type == "TOOL_CALL_END" - assert events[1].type == "TOOL_CALL_RESULT" - # TextContent text is extracted and serialized as JSON array - assert events[1].content == '["Hello from MCP tool!"]' - - -async def test_tool_result_with_multiple_text_contents(): - """Test FunctionResultContent with multiple TextContent-like items (MCP tool result).""" - from dataclasses import dataclass - - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - @dataclass - class MockTextContent: - text: str - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate( - contents=[ - FunctionResultContent( - call_id="call_123", - result=[MockTextContent("First result"), MockTextContent("Second result")], - ) - ] - ) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 2 - assert events[0].type == "TOOL_CALL_END" - assert events[1].type == "TOOL_CALL_RESULT" - # Multiple TextContent items should return JSON array - assert events[1].content == '["First result", "Second result"]' - - -async def test_tool_result_with_model_dump_objects(): - """Test FunctionResultContent with Pydantic BaseModel objects.""" - from pydantic import BaseModel - - from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - class MockModel(BaseModel): - value: int - - bridge = AgentFrameworkEventBridge(run_id="test_run", thread_id="test_thread") - - update = AgentResponseUpdate( - contents=[FunctionResultContent(call_id="call_123", result=[MockModel(value=1), MockModel(value=2)])] - ) - events = await bridge.from_agent_run_update(update) - - assert len(events) == 2 - assert events[1].type == "TOOL_CALL_RESULT" - # Should be properly serialized JSON array without double escaping - assert events[1].content == '[{"value": 1}, {"value": 2}]' diff --git a/python/packages/ag-ui/tests/test_helpers.py b/python/packages/ag-ui/tests/test_helpers.py new file mode 100644 index 0000000000..b4a7e9f047 --- /dev/null +++ b/python/packages/ag-ui/tests/test_helpers.py @@ -0,0 +1,502 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for orchestration helper functions.""" + +from agent_framework import ChatMessage, Content + +from agent_framework_ag_ui._orchestration._helpers import ( + approval_steps, + build_safe_metadata, + ensure_tool_call_entry, + is_state_context_message, + is_step_based_approval, + latest_approval_response, + pending_tool_call_ids, + schema_has_steps, + select_approval_tool_name, + tool_name_for_call_id, +) + + +class TestPendingToolCallIds: + """Tests for pending_tool_call_ids function.""" + + def test_empty_messages(self): + """Returns empty set for empty messages list.""" + result = pending_tool_call_ids([]) + assert result == set() + + def test_no_tool_calls(self): + """Returns empty set when no tool calls in messages.""" + messages = [ + ChatMessage(role="user", contents=[Content.from_text("Hello")]), + ChatMessage(role="assistant", contents=[Content.from_text("Hi there")]), + ] + result = pending_tool_call_ids(messages) + assert result == set() + + def test_pending_tool_call(self): + """Returns pending tool call ID when no result exists.""" + messages = [ + ChatMessage( + role="assistant", + contents=[Content.from_function_call(call_id="call_123", name="get_weather", arguments="{}")], + ), + ] + result = pending_tool_call_ids(messages) + assert result == {"call_123"} + + def test_resolved_tool_call(self): + """Returns empty set when tool call has result.""" + messages = [ + ChatMessage( + role="assistant", + contents=[Content.from_function_call(call_id="call_123", name="get_weather", arguments="{}")], + ), + ChatMessage( + role="tool", + contents=[Content.from_function_result(call_id="call_123", result="sunny")], + ), + ] + result = pending_tool_call_ids(messages) + assert result == set() + + def test_multiple_tool_calls_some_resolved(self): + """Returns only unresolved tool call IDs.""" + messages = [ + ChatMessage( + role="assistant", + contents=[ + Content.from_function_call(call_id="call_1", name="tool_a", arguments="{}"), + Content.from_function_call(call_id="call_2", name="tool_b", arguments="{}"), + Content.from_function_call(call_id="call_3", name="tool_c", arguments="{}"), + ], + ), + ChatMessage( + role="tool", + contents=[Content.from_function_result(call_id="call_1", result="result_a")], + ), + ChatMessage( + role="tool", + contents=[Content.from_function_result(call_id="call_3", result="result_c")], + ), + ] + result = pending_tool_call_ids(messages) + assert result == {"call_2"} + + +class TestIsStateContextMessage: + """Tests for is_state_context_message function.""" + + def test_state_context_message(self): + """Returns True for state context message.""" + message = ChatMessage( + role="system", + contents=[Content.from_text("Current state of the application: {}")], + ) + assert is_state_context_message(message) is True + + def test_non_system_message(self): + """Returns False for non-system message.""" + message = ChatMessage( + role="user", + contents=[Content.from_text("Current state of the application: {}")], + ) + assert is_state_context_message(message) is False + + def test_system_message_without_state_prefix(self): + """Returns False for system message without state prefix.""" + message = ChatMessage( + role="system", + contents=[Content.from_text("You are a helpful assistant.")], + ) + assert is_state_context_message(message) is False + + def test_empty_contents(self): + """Returns False for message with empty contents.""" + message = ChatMessage(role="system", contents=[]) + assert is_state_context_message(message) is False + + +class TestEnsureToolCallEntry: + """Tests for ensure_tool_call_entry function.""" + + def test_creates_new_entry(self): + """Creates new entry when ID not found.""" + tool_calls_by_id: dict = {} + pending_tool_calls: list = [] + + entry = ensure_tool_call_entry("call_123", tool_calls_by_id, pending_tool_calls) + + assert entry["id"] == "call_123" + assert entry["type"] == "function" + assert entry["function"]["name"] == "" + assert entry["function"]["arguments"] == "" + assert "call_123" in tool_calls_by_id + assert len(pending_tool_calls) == 1 + + def test_returns_existing_entry(self): + """Returns existing entry when ID found.""" + existing_entry = { + "id": "call_123", + "type": "function", + "function": {"name": "get_weather", "arguments": '{"city": "NYC"}'}, + } + tool_calls_by_id = {"call_123": existing_entry} + pending_tool_calls: list = [] + + entry = ensure_tool_call_entry("call_123", tool_calls_by_id, pending_tool_calls) + + assert entry is existing_entry + assert entry["function"]["name"] == "get_weather" + assert len(pending_tool_calls) == 0 # Not added again + + +class TestToolNameForCallId: + """Tests for tool_name_for_call_id function.""" + + def test_returns_tool_name(self): + """Returns tool name for valid entry.""" + tool_calls_by_id = { + "call_123": { + "id": "call_123", + "function": {"name": "get_weather", "arguments": "{}"}, + } + } + result = tool_name_for_call_id(tool_calls_by_id, "call_123") + assert result == "get_weather" + + def test_returns_none_for_missing_id(self): + """Returns None when ID not found.""" + tool_calls_by_id: dict = {} + result = tool_name_for_call_id(tool_calls_by_id, "call_123") + assert result is None + + def test_returns_none_for_missing_function(self): + """Returns None when function key missing.""" + tool_calls_by_id = {"call_123": {"id": "call_123"}} + result = tool_name_for_call_id(tool_calls_by_id, "call_123") + assert result is None + + def test_returns_none_for_non_dict_function(self): + """Returns None when function is not a dict.""" + tool_calls_by_id = {"call_123": {"id": "call_123", "function": "not_a_dict"}} + result = tool_name_for_call_id(tool_calls_by_id, "call_123") + assert result is None + + def test_returns_none_for_empty_name(self): + """Returns None when name is empty.""" + tool_calls_by_id = {"call_123": {"id": "call_123", "function": {"name": "", "arguments": "{}"}}} + result = tool_name_for_call_id(tool_calls_by_id, "call_123") + assert result is None + + +class TestSchemaHasSteps: + """Tests for schema_has_steps function.""" + + def test_schema_with_steps_array(self): + """Returns True when schema has steps array property.""" + schema = {"properties": {"steps": {"type": "array"}}} + assert schema_has_steps(schema) is True + + def test_schema_without_steps(self): + """Returns False when schema doesn't have steps.""" + schema = {"properties": {"name": {"type": "string"}}} + assert schema_has_steps(schema) is False + + def test_schema_with_non_array_steps(self): + """Returns False when steps is not array type.""" + schema = {"properties": {"steps": {"type": "string"}}} + assert schema_has_steps(schema) is False + + def test_non_dict_schema(self): + """Returns False for non-dict schema.""" + assert schema_has_steps(None) is False + assert schema_has_steps("not a dict") is False + assert schema_has_steps([]) is False + + def test_missing_properties(self): + """Returns False when properties key is missing.""" + schema = {"type": "object"} + assert schema_has_steps(schema) is False + + def test_non_dict_properties(self): + """Returns False when properties is not a dict.""" + schema = {"properties": "not a dict"} + assert schema_has_steps(schema) is False + + def test_non_dict_steps(self): + """Returns False when steps is not a dict.""" + schema = {"properties": {"steps": "not a dict"}} + assert schema_has_steps(schema) is False + + +class TestSelectApprovalToolName: + """Tests for select_approval_tool_name function.""" + + def test_none_client_tools(self): + """Returns None when client_tools is None.""" + result = select_approval_tool_name(None) + assert result is None + + def test_empty_client_tools(self): + """Returns None when client_tools is empty.""" + result = select_approval_tool_name([]) + assert result is None + + def test_finds_approval_tool(self): + """Returns tool name when tool has steps schema.""" + + class MockTool: + name = "generate_task_steps" + + def parameters(self): + return {"properties": {"steps": {"type": "array"}}} + + result = select_approval_tool_name([MockTool()]) + assert result == "generate_task_steps" + + def test_skips_tool_without_name(self): + """Skips tools without name attribute.""" + + class MockToolNoName: + def parameters(self): + return {"properties": {"steps": {"type": "array"}}} + + result = select_approval_tool_name([MockToolNoName()]) + assert result is None + + def test_skips_tool_without_parameters_method(self): + """Skips tools without callable parameters method.""" + + class MockToolNoParams: + name = "some_tool" + parameters = "not callable" + + result = select_approval_tool_name([MockToolNoParams()]) + assert result is None + + def test_skips_tool_without_steps_schema(self): + """Skips tools that don't have steps in schema.""" + + class MockToolNoSteps: + name = "other_tool" + + def parameters(self): + return {"properties": {"data": {"type": "string"}}} + + result = select_approval_tool_name([MockToolNoSteps()]) + assert result is None + + +class TestBuildSafeMetadata: + """Tests for build_safe_metadata function.""" + + def test_none_metadata(self): + """Returns empty dict for None metadata.""" + result = build_safe_metadata(None) + assert result == {} + + def test_empty_metadata(self): + """Returns empty dict for empty metadata.""" + result = build_safe_metadata({}) + assert result == {} + + def test_string_values_under_limit(self): + """Preserves string values under 512 chars.""" + metadata = {"key1": "short value", "key2": "another value"} + result = build_safe_metadata(metadata) + assert result == metadata + + def test_truncates_long_string_values(self): + """Truncates string values over 512 chars.""" + long_value = "x" * 1000 + metadata = {"key": long_value} + result = build_safe_metadata(metadata) + assert len(result["key"]) == 512 + assert result["key"] == "x" * 512 + + def test_non_string_values_serialized(self): + """Serializes non-string values to JSON.""" + metadata = {"count": 42, "items": ["a", "b"]} + result = build_safe_metadata(metadata) + assert result["count"] == "42" + assert result["items"] == '["a", "b"]' + + def test_truncates_serialized_values(self): + """Truncates serialized JSON values over 512 chars.""" + long_list = list(range(200)) # Will serialize to >512 chars + metadata = {"data": long_list} + result = build_safe_metadata(metadata) + assert len(result["data"]) == 512 + + +class TestLatestApprovalResponse: + """Tests for latest_approval_response function.""" + + def test_empty_messages(self): + """Returns None for empty messages.""" + result = latest_approval_response([]) + assert result is None + + def test_no_approval_response(self): + """Returns None when no approval response in last message.""" + messages = [ + ChatMessage(role="assistant", contents=[Content.from_text("Hello")]), + ] + result = latest_approval_response(messages) + assert result is None + + def test_finds_approval_response(self): + """Returns approval response from last message.""" + # Create a function call content first + fc = Content.from_function_call(call_id="call_123", name="test_tool", arguments="{}") + approval_content = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + ) + messages = [ + ChatMessage(role="user", contents=[approval_content]), + ] + result = latest_approval_response(messages) + assert result is approval_content + + +class TestApprovalSteps: + """Tests for approval_steps function.""" + + def test_steps_from_ag_ui_state_args(self): + """Extracts steps from ag_ui_state_args.""" + fc = Content.from_function_call(call_id="call_123", name="test_tool", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + additional_properties={"ag_ui_state_args": {"steps": [{"id": 1}, {"id": 2}]}}, + ) + result = approval_steps(approval) + assert result == [{"id": 1}, {"id": 2}] + + def test_steps_from_function_call(self): + """Extracts steps from function call arguments.""" + fc = Content.from_function_call( + call_id="call_123", + name="test", + arguments='{"steps": [{"step": 1}]}', + ) + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + ) + result = approval_steps(approval) + assert result == [{"step": 1}] + + def test_empty_steps_when_no_state_args(self): + """Returns empty list when no ag_ui_state_args.""" + fc = Content.from_function_call(call_id="call_123", name="test_tool", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + ) + result = approval_steps(approval) + assert result == [] + + def test_empty_steps_when_state_args_not_dict(self): + """Returns empty list when ag_ui_state_args is not a dict.""" + fc = Content.from_function_call(call_id="call_123", name="test_tool", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + additional_properties={"ag_ui_state_args": "not a dict"}, + ) + result = approval_steps(approval) + assert result == [] + + def test_empty_steps_when_steps_not_list(self): + """Returns empty list when steps is not a list.""" + fc = Content.from_function_call(call_id="call_123", name="test_tool", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + additional_properties={"ag_ui_state_args": {"steps": "not a list"}}, + ) + result = approval_steps(approval) + assert result == [] + + +class TestIsStepBasedApproval: + """Tests for is_step_based_approval function.""" + + def test_returns_true_when_has_steps(self): + """Returns True when approval has steps.""" + fc = Content.from_function_call(call_id="call_123", name="test_tool", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + additional_properties={"ag_ui_state_args": {"steps": [{"id": 1}]}}, + ) + result = is_step_based_approval(approval, None) + assert result is True + + def test_returns_false_no_steps_no_function_call(self): + """Returns False when no steps and no function call.""" + # Create content directly to have no function_call + approval = Content( + type="function_approval_response", + function_call=None, + ) + result = is_step_based_approval(approval, None) + assert result is False + + def test_returns_false_no_predict_config(self): + """Returns False when no predict_state_config.""" + fc = Content.from_function_call(call_id="call_123", name="some_tool", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + ) + result = is_step_based_approval(approval, None) + assert result is False + + def test_returns_true_when_tool_matches_config(self): + """Returns True when tool matches predict_state_config with steps.""" + fc = Content.from_function_call(call_id="call_123", name="generate_steps", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + ) + config = {"steps": {"tool": "generate_steps", "tool_argument": "steps"}} + result = is_step_based_approval(approval, config) + assert result is True + + def test_returns_false_when_tool_not_in_config(self): + """Returns False when tool not in predict_state_config.""" + fc = Content.from_function_call(call_id="call_123", name="other_tool", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + ) + config = {"steps": {"tool": "generate_steps", "tool_argument": "steps"}} + result = is_step_based_approval(approval, config) + assert result is False + + def test_returns_false_when_tool_arg_not_steps(self): + """Returns False when tool_argument is not 'steps'.""" + fc = Content.from_function_call(call_id="call_123", name="generate_steps", arguments="{}") + approval = Content.from_function_approval_response( + approved=True, + id="approval_123", + function_call=fc, + ) + config = {"document": {"tool": "generate_steps", "tool_argument": "content"}} + result = is_step_based_approval(approval, config) + assert result is False diff --git a/python/packages/ag-ui/tests/test_human_in_the_loop.py b/python/packages/ag-ui/tests/test_human_in_the_loop.py deleted file mode 100644 index 00e64472b6..0000000000 --- a/python/packages/ag-ui/tests/test_human_in_the_loop.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Tests for human in the loop (function approval requests).""" - -from agent_framework import AgentResponseUpdate, FunctionApprovalRequestContent, FunctionCallContent - -from agent_framework_ag_ui._events import AgentFrameworkEventBridge - - -async def test_function_approval_request_emission(): - """Test that CustomEvent is emitted for FunctionApprovalRequestContent.""" - # Set require_confirmation=False to test just the function_approval_request event - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - require_confirmation=False, - ) - - # Create approval request - func_call = FunctionCallContent( - call_id="call_123", - name="send_email", - arguments={"to": "user@example.com", "subject": "Test"}, - ) - approval_request = FunctionApprovalRequestContent( - id="approval_001", - function_call=func_call, - ) - - update = AgentResponseUpdate(contents=[approval_request]) - events = await bridge.from_agent_run_update(update) - - # Should emit ToolCallEndEvent + CustomEvent for approval request - assert len(events) == 2 - - # First event: ToolCallEndEvent to close the tool call - assert events[0].type == "TOOL_CALL_END" - assert events[0].tool_call_id == "call_123" - - # Second event: CustomEvent with approval details - event = events[1] - assert event.type == "CUSTOM" - assert event.name == "function_approval_request" - assert event.value["id"] == "approval_001" - assert event.value["function_call"]["call_id"] == "call_123" - assert event.value["function_call"]["name"] == "send_email" - assert event.value["function_call"]["arguments"]["to"] == "user@example.com" - assert event.value["function_call"]["arguments"]["subject"] == "Test" - - -async def test_function_approval_request_with_confirm_changes(): - """Test that confirm_changes is also emitted when require_confirmation=True.""" - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - require_confirmation=True, - ) - - func_call = FunctionCallContent( - call_id="call_456", - name="delete_file", - arguments={"path": "/tmp/test.txt"}, - ) - approval_request = FunctionApprovalRequestContent( - id="approval_002", - function_call=func_call, - ) - - update = AgentResponseUpdate(contents=[approval_request]) - events = await bridge.from_agent_run_update(update) - - # Should emit: ToolCallEndEvent, CustomEvent, and confirm_changes (Start, Args, End) = 5 events - assert len(events) == 5 - - # Check ToolCallEndEvent - assert events[0].type == "TOOL_CALL_END" - assert events[0].tool_call_id == "call_456" - - # Check function_approval_request CustomEvent - assert events[1].type == "CUSTOM" - assert events[1].name == "function_approval_request" - - # Check confirm_changes tool call events - assert events[2].type == "TOOL_CALL_START" - assert events[2].tool_call_name == "confirm_changes" - assert events[3].type == "TOOL_CALL_ARGS" - # Verify confirm_changes includes function info for Dojo UI - import json - - args = json.loads(events[3].delta) - assert args["function_name"] == "delete_file" - assert args["function_call_id"] == "call_456" - assert args["function_arguments"] == {"path": "/tmp/test.txt"} - assert args["steps"] == [ - { - "description": "Execute delete_file", - "status": "enabled", - } - ] - assert events[4].type == "TOOL_CALL_END" - - -async def test_multiple_approval_requests(): - """Test handling multiple approval requests in one update.""" - # Set require_confirmation=False to simplify the test - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - require_confirmation=False, - ) - - func_call_1 = FunctionCallContent( - call_id="call_1", - name="create_event", - arguments={"title": "Meeting"}, - ) - approval_1 = FunctionApprovalRequestContent( - id="approval_1", - function_call=func_call_1, - ) - - func_call_2 = FunctionCallContent( - call_id="call_2", - name="book_room", - arguments={"room": "Conference A"}, - ) - approval_2 = FunctionApprovalRequestContent( - id="approval_2", - function_call=func_call_2, - ) - - update = AgentResponseUpdate(contents=[approval_1, approval_2]) - events = await bridge.from_agent_run_update(update) - - # Should emit ToolCallEndEvent + CustomEvent for each approval (4 events total) - assert len(events) == 4 - - # Events should alternate: End, Custom, End, Custom - assert events[0].type == "TOOL_CALL_END" - assert events[0].tool_call_id == "call_1" - - assert events[1].type == "CUSTOM" - assert events[1].name == "function_approval_request" - assert events[1].value["id"] == "approval_1" - - assert events[2].type == "TOOL_CALL_END" - assert events[2].tool_call_id == "call_2" - - assert events[3].type == "CUSTOM" - assert events[3].name == "function_approval_request" - assert events[3].value["id"] == "approval_2" - - -async def test_function_approval_request_sets_stop_flag(): - """Test that function approval request sets should_stop_after_confirm flag. - - This ensures the orchestrator stops the run after emitting the approval request, - allowing the UI to send back an approval response. - """ - bridge = AgentFrameworkEventBridge( - run_id="test_run", - thread_id="test_thread", - ) - - assert bridge.should_stop_after_confirm is False - - func_call = FunctionCallContent( - call_id="call_stop_test", - name="get_datetime", - arguments={}, - ) - approval_request = FunctionApprovalRequestContent( - id="approval_stop_test", - function_call=func_call, - ) - - update = AgentResponseUpdate(contents=[approval_request]) - await bridge.from_agent_run_update(update) - - assert bridge.should_stop_after_confirm is True diff --git a/python/packages/ag-ui/tests/test_message_adapters.py b/python/packages/ag-ui/tests/test_message_adapters.py index 9173314a28..4f6c3f1d42 100644 --- a/python/packages/ag-ui/tests/test_message_adapters.py +++ b/python/packages/ag-ui/tests/test_message_adapters.py @@ -5,7 +5,7 @@ import json import pytest -from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent, Role, TextContent +from agent_framework import ChatMessage, Content, Role from agent_framework_ag_ui._message_adapters import ( agent_framework_messages_to_agui, @@ -24,7 +24,7 @@ def sample_agui_message(): @pytest.fixture def sample_agent_framework_message(): """Create a sample Agent Framework message.""" - return ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")], message_id="msg-123") + return ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")], message_id="msg-123") def test_agui_to_agent_framework_basic(sample_agui_message): @@ -89,7 +89,7 @@ def test_agui_tool_result_to_agent_framework(): assert message.role == Role.USER assert len(message.contents) == 1 - assert isinstance(message.contents[0], TextContent) + assert message.contents[0].type == "text" assert message.contents[0].text == '{"accepted": true, "steps": []}' assert message.additional_properties is not None @@ -141,7 +141,7 @@ def test_agui_tool_approval_updates_tool_call_arguments(): assert len(messages) == 2 assistant_msg = messages[0] - func_call = next(content for content in assistant_msg.contents if isinstance(content, FunctionCallContent)) + func_call = next(content for content in assistant_msg.contents if content.type == "function_call") assert func_call.arguments == { "steps": [ {"description": "Boil water", "status": "enabled"}, @@ -157,11 +157,9 @@ def test_agui_tool_approval_updates_tool_call_arguments(): ] } - from agent_framework import FunctionApprovalResponseContent - approval_msg = messages[1] approval_content = next( - content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + content for content in approval_msg.contents if content.type == "function_approval_response" ) assert approval_content.function_call.parse_arguments() == { "steps": [ @@ -211,12 +209,9 @@ def test_agui_tool_approval_from_confirm_changes_maps_to_function_call(): ] messages = agui_messages_to_agent_framework(messages_input) - - from agent_framework import FunctionApprovalResponseContent - approval_msg = messages[1] approval_content = next( - content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + content for content in approval_msg.contents if content.type == "function_approval_response" ) assert approval_content.function_call.call_id == "call_tool" @@ -259,12 +254,9 @@ def test_agui_tool_approval_from_confirm_changes_falls_back_to_sibling_call(): ] messages = agui_messages_to_agent_framework(messages_input) - - from agent_framework import FunctionApprovalResponseContent - approval_msg = messages[1] approval_content = next( - content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + content for content in approval_msg.contents if content.type == "function_approval_response" ) assert approval_content.function_call.call_id == "call_tool" @@ -315,12 +307,9 @@ def test_agui_tool_approval_from_generate_task_steps_maps_to_function_call(): ] messages = agui_messages_to_agent_framework(messages_input) - - from agent_framework import FunctionApprovalResponseContent - approval_msg = messages[1] approval_content = next( - content for content in approval_msg.contents if isinstance(content, FunctionApprovalResponseContent) + content for content in approval_msg.contents if content.type == "function_approval_response" ) assert approval_content.function_call.call_id == "call_tool" @@ -380,15 +369,14 @@ def test_agui_function_approvals(): assert msg.role == Role.USER assert len(msg.contents) == 2 - from agent_framework import FunctionApprovalResponseContent - - assert isinstance(msg.contents[0], FunctionApprovalResponseContent) + assert msg.contents[0].type == "function_approval_response" assert msg.contents[0].approved is True assert msg.contents[0].id == "approval-1" assert msg.contents[0].function_call.name == "search" assert msg.contents[0].function_call.call_id == "call-1" - assert isinstance(msg.contents[1], FunctionApprovalResponseContent) + assert msg.contents[1].type == "function_approval_response" + assert msg.contents[1].id == "approval-2" assert msg.contents[1].approved is False @@ -406,7 +394,7 @@ def test_agui_non_string_content(): assert len(messages) == 1 assert len(messages[0].contents) == 1 - assert isinstance(messages[0].contents[0], TextContent) + assert messages[0].contents[0].type == "text" assert "nested" in messages[0].contents[0].text @@ -440,9 +428,9 @@ def test_agui_with_tool_calls_to_agent_framework(): assert msg.role == Role.ASSISTANT assert msg.message_id == "msg-789" # First content is text, second is the function call - assert isinstance(msg.contents[0], TextContent) + assert msg.contents[0].type == "text" assert msg.contents[0].text == "Calling tool" - assert isinstance(msg.contents[1], FunctionCallContent) + assert msg.contents[1].type == "function_call" assert msg.contents[1].call_id == "call-123" assert msg.contents[1].name == "get_weather" assert msg.contents[1].arguments == {"location": "Seattle"} @@ -453,8 +441,8 @@ def test_agent_framework_to_agui_with_tool_calls(): msg = ChatMessage( role=Role.ASSISTANT, contents=[ - TextContent(text="Calling tool"), - FunctionCallContent(call_id="call-123", name="search", arguments={"query": "test"}), + Content.from_text(text="Calling tool"), + Content.from_function_call(call_id="call-123", name="search", arguments={"query": "test"}), ], message_id="msg-456", ) @@ -477,7 +465,7 @@ def test_agent_framework_to_agui_multiple_text_contents(): """Test concatenating multiple text contents.""" msg = ChatMessage( role=Role.ASSISTANT, - contents=[TextContent(text="Part 1 "), TextContent(text="Part 2")], + contents=[Content.from_text(text="Part 1 "), Content.from_text(text="Part 2")], ) messages = agent_framework_messages_to_agui([msg]) @@ -488,7 +476,7 @@ def test_agent_framework_to_agui_multiple_text_contents(): def test_agent_framework_to_agui_no_message_id(): """Test message without message_id - should auto-generate ID.""" - msg = ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")]) + msg = ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")]) messages = agent_framework_messages_to_agui([msg]) @@ -500,7 +488,7 @@ def test_agent_framework_to_agui_no_message_id(): def test_agent_framework_to_agui_system_role(): """Test system role conversion.""" - msg = ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="System")]) + msg = ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="System")]) messages = agent_framework_messages_to_agui([msg]) @@ -510,7 +498,7 @@ def test_agent_framework_to_agui_system_role(): def test_extract_text_from_contents(): """Test extracting text from contents list.""" - contents = [TextContent(text="Hello "), TextContent(text="World")] + contents = [Content.from_text(text="Hello "), Content.from_text(text="World")] result = extract_text_from_contents(contents) @@ -533,7 +521,7 @@ def __init__(self, text: str): def test_extract_text_from_custom_contents(): """Test extracting text from custom content objects.""" - contents = [CustomTextContent(text="Custom "), TextContent(text="Mixed")] + contents = [CustomTextContent(text="Custom "), Content.from_text(text="Mixed")] result = extract_text_from_contents(contents) @@ -547,7 +535,7 @@ def test_agent_framework_to_agui_function_result_dict(): """Test converting FunctionResultContent with dict result to AG-UI.""" msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id="call-123", result={"key": "value", "count": 42})], + contents=[Content.from_function_result(call_id="call-123", result={"key": "value", "count": 42})], message_id="msg-789", ) @@ -564,7 +552,7 @@ def test_agent_framework_to_agui_function_result_none(): """Test converting FunctionResultContent with None result to AG-UI.""" msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id="call-123", result=None)], + contents=[Content.from_function_result(call_id="call-123", result=None)], message_id="msg-789", ) @@ -580,7 +568,7 @@ def test_agent_framework_to_agui_function_result_string(): """Test converting FunctionResultContent with string result to AG-UI.""" msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id="call-123", result="plain text result")], + contents=[Content.from_function_result(call_id="call-123", result="plain text result")], message_id="msg-789", ) @@ -595,7 +583,7 @@ def test_agent_framework_to_agui_function_result_empty_list(): """Test converting FunctionResultContent with empty list result to AG-UI.""" msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id="call-123", result=[])], + contents=[Content.from_function_result(call_id="call-123", result=[])], message_id="msg-789", ) @@ -617,7 +605,7 @@ class MockTextContent: msg = ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id="call-123", result=[MockTextContent("Hello from MCP!")])], + contents=[Content.from_function_result(call_id="call-123", result=[MockTextContent("Hello from MCP!")])], message_id="msg-789", ) @@ -640,7 +628,7 @@ class MockTextContent: msg = ChatMessage( role=Role.TOOL, contents=[ - FunctionResultContent( + Content.from_function_result( call_id="call-123", result=[MockTextContent("First result"), MockTextContent("Second result")], ) @@ -654,3 +642,109 @@ class MockTextContent: agui_msg = messages[0] # Multiple items should return JSON array assert agui_msg["content"] == '["First result", "Second result"]' + + +# Additional tests for better coverage + + +def test_extract_text_from_contents_empty(): + """Test extracting text from empty contents.""" + result = extract_text_from_contents([]) + assert result == "" + + +def test_extract_text_from_contents_multiple(): + """Test extracting text from multiple text contents.""" + contents = [ + Content.from_text("Hello "), + Content.from_text("World"), + ] + result = extract_text_from_contents(contents) + assert result == "Hello World" + + +def test_extract_text_from_contents_non_text(): + """Test extracting text ignores non-text contents.""" + contents = [ + Content.from_text("Hello"), + Content.from_function_call(call_id="call_1", name="tool", arguments="{}"), + ] + result = extract_text_from_contents(contents) + assert result == "Hello" + + +def test_agui_to_agent_framework_with_tool_calls(): + """Test converting AG-UI message with tool_calls.""" + messages = [ + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_123", + "type": "function", + "function": {"name": "get_weather", "arguments": '{"city": "NYC"}'}, + } + ], + } + ] + + result = agui_messages_to_agent_framework(messages) + + assert len(result) == 1 + assert len(result[0].contents) == 1 + assert result[0].contents[0].type == "function_call" + assert result[0].contents[0].name == "get_weather" + + +def test_agui_to_agent_framework_tool_result(): + """Test converting AG-UI tool result message.""" + messages = [ + { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "id": "call_123", + "type": "function", + "function": {"name": "get_weather", "arguments": "{}"}, + } + ], + }, + { + "role": "tool", + "content": "Sunny", + "toolCallId": "call_123", + }, + ] + + result = agui_messages_to_agent_framework(messages) + + assert len(result) == 2 + # Second message should be tool result + tool_msg = result[1] + assert tool_msg.role == Role.TOOL + assert tool_msg.contents[0].type == "function_result" + assert tool_msg.contents[0].result == "Sunny" + + +def test_agui_messages_to_snapshot_format_empty(): + """Test converting empty messages to snapshot format.""" + result = agui_messages_to_snapshot_format([]) + assert result == [] + + +def test_agui_messages_to_snapshot_format_basic(): + """Test converting messages to snapshot format.""" + messages = [ + {"role": "user", "content": "Hello", "id": "msg_1"}, + {"role": "assistant", "content": "Hi there", "id": "msg_2"}, + ] + + result = agui_messages_to_snapshot_format(messages) + + assert len(result) == 2 + assert result[0]["role"] == "user" + assert result[0]["content"] == "Hello" + assert result[1]["role"] == "assistant" + assert result[1]["content"] == "Hi there" diff --git a/python/packages/ag-ui/tests/test_message_hygiene.py b/python/packages/ag-ui/tests/test_message_hygiene.py index 380ff438bd..ecc01de3cb 100644 --- a/python/packages/ag-ui/tests/test_message_hygiene.py +++ b/python/packages/ag-ui/tests/test_message_hygiene.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent, TextContent +from agent_framework import ChatMessage, Content from agent_framework_ag_ui._message_adapters import _deduplicate_messages, _sanitize_tool_history @@ -10,7 +10,7 @@ def test_sanitize_tool_history_injects_confirm_changes_result() -> None: ChatMessage( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( name="confirm_changes", call_id="call_confirm_123", arguments='{"changes": "test"}', @@ -19,7 +19,7 @@ def test_sanitize_tool_history_injects_confirm_changes_result() -> None: ), ChatMessage( role="user", - contents=[TextContent(text='{"accepted": true}')], + contents=[Content.from_text(text='{"accepted": true}')], ), ] @@ -37,11 +37,11 @@ def test_deduplicate_messages_prefers_non_empty_tool_results() -> None: messages = [ ChatMessage( role="tool", - contents=[FunctionResultContent(call_id="call1", result="")], + contents=[Content.from_function_result(call_id="call1", result="")], ), ChatMessage( role="tool", - contents=[FunctionResultContent(call_id="call1", result="result data")], + contents=[Content.from_function_result(call_id="call1", result="result data")], ), ] diff --git a/python/packages/ag-ui/tests/test_orchestrators.py b/python/packages/ag-ui/tests/test_orchestrators.py deleted file mode 100644 index 279ddedc82..0000000000 --- a/python/packages/ag-ui/tests/test_orchestrators.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Tests for AG-UI orchestrators.""" - -from collections.abc import AsyncGenerator -from typing import Any -from unittest.mock import MagicMock - -from ag_ui.core import BaseEvent, RunFinishedEvent -from agent_framework import ( - AgentResponseUpdate, - AgentThread, - BaseChatClient, - ChatAgent, - ChatResponseUpdate, - FunctionInvocationConfiguration, - TextContent, - ai_function, -) - -from agent_framework_ag_ui._agent import AgentConfig -from agent_framework_ag_ui._orchestrators import DefaultOrchestrator, ExecutionContext - - -@ai_function -def server_tool() -> str: - """Server-executable tool.""" - return "server" - - -def _create_mock_chat_agent( - tools: list[Any] | None = None, - response_format: Any = None, - capture_tools: list[Any] | None = None, - capture_messages: list[Any] | None = None, -) -> ChatAgent: - """Create a ChatAgent with mocked chat client for testing. - - Args: - tools: Tools to configure on the agent. - response_format: Response format to configure. - capture_tools: If provided, tools passed to run_stream will be appended here. - capture_messages: If provided, messages passed to run_stream will be appended here. - """ - mock_chat_client = MagicMock(spec=BaseChatClient) - mock_chat_client.function_invocation_configuration = FunctionInvocationConfiguration() - - agent = ChatAgent( - chat_client=mock_chat_client, - tools=tools or [server_tool], - response_format=response_format, - ) - - # Create a mock run_stream that captures parameters and yields a simple response - async def mock_run_stream( - messages: list[Any], - *, - # thread: AgentThread, - # tools: list[Any] | None = None, - # **kwargs: Any, - # ) -> AsyncGenerator[AgentRunResponseUpdate, None]: - # self.seen_tools = tools - # yield AgentRunResponseUpdate( - # contents=[TextContent(text="ok")], - # role="assistant", - # response_id=thread.metadata.get("ag_ui_run_id"), # type: ignore[attr-defined] (metadata always created in orchestrator) - # raw_representation=ChatResponseUpdate( - # contents=[TextContent(text="ok")], - # conversation_id=thread.metadata.get("ag_ui_thread_id"), # type: ignore[attr-defined] (metadata always created in orchestrator) - # response_id=thread.metadata.get("ag_ui_run_id"), # type: ignore[attr-defined] (metadata always created in orchestrator) - # ), - # ) - thread: AgentThread, - tools: list[Any] | None = None, - **kwargs: Any, - ) -> AsyncGenerator[AgentResponseUpdate, None]: - if capture_tools is not None and tools is not None: - capture_tools.extend(tools) - if capture_messages is not None: - capture_messages.extend(messages) - yield AgentResponseUpdate( - contents=[TextContent(text="ok")], - role="assistant", - response_id=thread.metadata.get("ag_ui_run_id"), # type: ignore[attr-defined] (metadata always created in orchestrator) - raw_representation=ChatResponseUpdate( - contents=[TextContent(text="ok")], - conversation_id=thread.metadata.get("ag_ui_thread_id"), # type: ignore[attr-defined] (metadata always created in orchestrator) - response_id=thread.metadata.get("ag_ui_run_id"), # type: ignore[attr-defined] (metadata always created in orchestrator) - ), - ) - - # Patch the run_stream method - agent.run_stream = mock_run_stream # type: ignore[method-assign] - - return agent - - -async def test_default_orchestrator_merges_client_tools() -> None: - """Client tool declarations are merged with server tools before running agent.""" - captured_tools: list[Any] = [] - agent = _create_mock_chat_agent(tools=[server_tool], capture_tools=captured_tools) - orchestrator = DefaultOrchestrator() - - input_data = { - "messages": [ - { - "role": "user", - "content": [{"type": "input_text", "text": "Hello"}], - } - ], - "tools": [ - { - "name": "get_weather", - "description": "Client weather lookup.", - "parameters": { - "type": "object", - "properties": {"location": {"type": "string"}}, - "required": ["location"], - }, - } - ], - } - - context = ExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events = [] - async for event in orchestrator.run(context): - events.append(event) - - assert len(captured_tools) > 0 - tool_names = [getattr(tool, "name", "?") for tool in captured_tools] - assert "server_tool" in tool_names - assert "get_weather" in tool_names - assert agent.chat_client.function_invocation_configuration.additional_tools - - -async def test_default_orchestrator_with_camel_case_ids() -> None: - """Client tool is able to extract camelCase IDs.""" - agent = _create_mock_chat_agent() - orchestrator = DefaultOrchestrator() - - input_data = { - "runId": "test-camelcase-runid", - "threadId": "test-camelcase-threadid", - "messages": [ - { - "role": "user", - "content": [{"type": "input_text", "text": "Hello"}], - } - ], - "tools": [], - } - - context = ExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events = [] - async for event in orchestrator.run(context): - events.append(event) - - # assert the last event has the expected run_id and thread_id - assert isinstance(events[-1], RunFinishedEvent) - last_event = events[-1] - assert last_event.run_id == "test-camelcase-runid" - assert last_event.thread_id == "test-camelcase-threadid" - - -async def test_default_orchestrator_with_snake_case_ids() -> None: - """Client tool is able to extract snake_case IDs.""" - agent = _create_mock_chat_agent() - orchestrator = DefaultOrchestrator() - - input_data = { - "run_id": "test-snakecase-runid", - "thread_id": "test-snakecase-threadid", - "messages": [ - { - "role": "user", - "content": [{"type": "input_text", "text": "Hello"}], - } - ], - "tools": [], - } - - context = ExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events: list[BaseEvent] = [] - async for event in orchestrator.run(context): - events.append(event) - - # assert the last event has the expected run_id and thread_id - assert isinstance(events[-1], RunFinishedEvent) - last_event = events[-1] - assert last_event.run_id == "test-snakecase-runid" - assert last_event.thread_id == "test-snakecase-threadid" - - -async def test_state_context_injected_when_tool_call_state_mismatch() -> None: - """State context should be injected when current state differs from tool call args.""" - captured_messages: list[Any] = [] - agent = _create_mock_chat_agent(tools=[], capture_messages=captured_messages) - orchestrator = DefaultOrchestrator() - - tool_recipe = {"title": "Salad", "special_preferences": []} - current_recipe = {"title": "Salad", "special_preferences": ["Vegetarian"]} - - input_data = { - "state": {"recipe": current_recipe}, - "messages": [ - {"role": "system", "content": "Instructions"}, - { - "role": "assistant", - "tool_calls": [ - { - "id": "call_1", - "type": "function", - "function": {"name": "update_recipe", "arguments": {"recipe": tool_recipe}}, - } - ], - }, - {"role": "user", "content": "What are the dietary preferences?"}, - ], - } - - context = ExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig( - state_schema={"recipe": {"type": "object"}}, - predict_state_config={"recipe": {"tool": "update_recipe", "tool_argument": "recipe"}}, - require_confirmation=False, - ), - ) - - async for _event in orchestrator.run(context): - pass - - assert len(captured_messages) > 0 - state_messages = [] - for msg in captured_messages: - role_value = msg.role.value if hasattr(msg.role, "value") else str(msg.role) - if role_value != "system": - continue - for content in msg.contents or []: - if isinstance(content, TextContent) and content.text.startswith("Current state of the application:"): - state_messages.append(content.text) - assert state_messages - assert "Vegetarian" in state_messages[0] - - -async def test_state_context_not_injected_when_tool_call_matches_state() -> None: - """State context should be skipped when tool call args match current state.""" - captured_messages: list[Any] = [] - agent = _create_mock_chat_agent(tools=[], capture_messages=captured_messages) - orchestrator = DefaultOrchestrator() - - input_data = { - "messages": [ - {"role": "system", "content": "Instructions"}, - { - "role": "assistant", - "tool_calls": [ - { - "id": "call_1", - "type": "function", - "function": {"name": "update_recipe", "arguments": {"recipe": {}}}, - } - ], - }, - {"role": "user", "content": "What are the dietary preferences?"}, - ], - } - - context = ExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig( - state_schema={"recipe": {"type": "object"}}, - predict_state_config={"recipe": {"tool": "update_recipe", "tool_argument": "recipe"}}, - require_confirmation=False, - ), - ) - - async for _event in orchestrator.run(context): - pass - - assert len(captured_messages) > 0 - state_messages = [] - for msg in captured_messages: - role_value = msg.role.value if hasattr(msg.role, "value") else str(msg.role) - if role_value != "system": - continue - for content in msg.contents or []: - if isinstance(content, TextContent) and content.text.startswith("Current state of the application:"): - state_messages.append(content.text) - assert not state_messages diff --git a/python/packages/ag-ui/tests/test_orchestrators_coverage.py b/python/packages/ag-ui/tests/test_orchestrators_coverage.py deleted file mode 100644 index 6c311d593a..0000000000 --- a/python/packages/ag-ui/tests/test_orchestrators_coverage.py +++ /dev/null @@ -1,878 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Comprehensive tests for orchestrator coverage.""" - -import sys -from collections.abc import AsyncGenerator -from pathlib import Path -from types import SimpleNamespace -from typing import Any - -from agent_framework import ( - AgentResponseUpdate, - ChatMessage, - TextContent, - ai_function, -) -from pydantic import BaseModel - -from agent_framework_ag_ui._agent import AgentConfig -from agent_framework_ag_ui._orchestrators import DefaultOrchestrator, HumanInTheLoopOrchestrator - -sys.path.insert(0, str(Path(__file__).parent)) -from utils_test_ag_ui import StubAgent, TestExecutionContext - - -@ai_function(approval_mode="always_require") -def approval_tool(param: str) -> str: - """Tool requiring approval.""" - return f"executed: {param}" - - -DEFAULT_OPTIONS: dict[str, Any] = {"tools": [approval_tool], "response_format": None} - - -async def test_human_in_the_loop_json_decode_error() -> None: - """Test HumanInTheLoopOrchestrator handles invalid JSON in tool result.""" - orchestrator = HumanInTheLoopOrchestrator() - - input_data: dict[str, Any] = { - "messages": [ - { - "role": "tool", - "content": [{"type": "text", "text": "not valid json {"}], - } - ], - } - - messages = [ - ChatMessage( - role="tool", - contents=[TextContent(text="not valid json {")], - additional_properties={"is_tool_result": True}, - ) - ] - - agent = StubAgent( - default_options={"tools": [approval_tool], "response_format": None}, - updates=[AgentResponseUpdate(contents=[TextContent(text="response")], role="assistant")], - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages, normalize=False) - - assert orchestrator.can_handle(context) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should emit RunErrorEvent for invalid JSON - error_events: list[Any] = [e for e in events if e.type == "RUN_ERROR"] - assert len(error_events) == 1 - assert "Invalid tool result format" in error_events[0].message - - -async def test_sanitize_tool_history_confirm_changes() -> None: - """Test sanitize_tool_history logic for confirm_changes synthetic result.""" - from agent_framework import ChatMessage, FunctionCallContent, TextContent - - # Create messages that will trigger confirm_changes synthetic result injection - messages = [ - ChatMessage( - role="assistant", - contents=[ - FunctionCallContent( - name="confirm_changes", - call_id="call_confirm_123", - arguments='{"changes": "test"}', - ) - ], - ), - ChatMessage( - role="user", - contents=[TextContent(text='{"accepted": true}')], - ), - ] - - # The sanitize_tool_history function is internal to DefaultOrchestrator.run - # We'll test it indirectly by checking the orchestrator processes it correctly - orchestrator = DefaultOrchestrator() - - # Use pre-constructed ChatMessage objects to bypass message adapter - input_data: dict[str, Any] = {"messages": []} - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - # Override the messages property to use our pre-constructed messages - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Agent should receive synthetic tool result - assert len(agent.messages_received) > 0 - tool_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "tool" - ] - assert len(tool_messages) == 1 - assert str(tool_messages[0].contents[0].call_id) == "call_confirm_123" - assert tool_messages[0].contents[0].result == "Confirmed" - - -async def test_sanitize_tool_history_orphaned_tool_result() -> None: - """Test sanitize_tool_history removes orphaned tool results.""" - from agent_framework import ChatMessage, FunctionResultContent, TextContent - - # Tool result without preceding assistant tool call - messages = [ - ChatMessage( - role="tool", - contents=[FunctionResultContent(call_id="orphan_123", result="orphaned data")], - ), - ChatMessage( - role="user", - contents=[TextContent(text="Hello")], - ), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Orphaned tool result should be filtered out - tool_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "tool" - ] - assert len(tool_messages) == 0 - - -async def test_orphaned_tool_result_sanitization() -> None: - """Test that orphaned tool results are filtered out.""" - orchestrator = DefaultOrchestrator() - - input_data: dict[str, Any] = { - "messages": [ - { - "role": "tool", - "content": [{"type": "tool_result", "tool_call_id": "orphan_123", "content": "result"}], - }, - { - "role": "user", - "content": [{"type": "text", "text": "Hello"}], - }, - ], - } - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Orphaned tool result should be filtered, only user message remains - tool_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "tool" - ] - assert len(tool_messages) == 0 - - -async def test_deduplicate_messages_empty_tool_results() -> None: - """Test deduplicate_messages prefers non-empty tool results.""" - from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent - - messages = [ - ChatMessage( - role="assistant", - contents=[FunctionCallContent(name="test_tool", call_id="call_789", arguments="{}")], - ), - ChatMessage( - role="tool", - contents=[FunctionResultContent(call_id="call_789", result="")], - ), - ChatMessage( - role="tool", - contents=[FunctionResultContent(call_id="call_789", result="real data")], - ), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should have only one tool result with actual data - tool_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "tool" - ] - assert len(tool_messages) == 1 - assert tool_messages[0].contents[0].result == "real data" - - -async def test_deduplicate_messages_duplicate_assistant_tool_calls() -> None: - """Test deduplicate_messages removes duplicate assistant tool call messages.""" - from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent - - messages = [ - ChatMessage( - role="assistant", - contents=[FunctionCallContent(name="test_tool", call_id="call_abc", arguments="{}")], - ), - ChatMessage( - role="assistant", - contents=[FunctionCallContent(name="test_tool", call_id="call_abc", arguments="{}")], - ), - ChatMessage( - role="tool", - contents=[FunctionResultContent(call_id="call_abc", result="result")], - ), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should have only one assistant message - assistant_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "assistant" - ] - assert len(assistant_messages) == 1 - - -async def test_deduplicate_messages_duplicate_system_messages() -> None: - """Test that deduplication logic is invoked for system messages.""" - from agent_framework import ChatMessage, TextContent - - messages = [ - ChatMessage( - role="system", - contents=[TextContent(text="You are a helpful assistant.")], - ), - ChatMessage( - role="system", - contents=[TextContent(text="You are a helpful assistant.")], - ), - ChatMessage( - role="user", - contents=[TextContent(text="Hello")], - ), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Deduplication uses hash() which may not deduplicate identical content - # This test verifies deduplication logic runs without errors - system_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "system" - ] - # At least one system message should be present - assert len(system_messages) >= 1 - - -async def test_state_context_injection() -> None: - """Test state context message injection for first request.""" - orchestrator = DefaultOrchestrator() - - input_data: dict[str, Any] = { - "messages": [ - { - "role": "user", - "content": [{"type": "text", "text": "Hello"}], - } - ], - "state": {"items": ["apple", "banana"]}, - } - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(state_schema={"items": {"type": "array"}}), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should inject system message with current state - system_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "system" - ] - assert len(system_messages) == 1 - assert "apple" in system_messages[0].contents[0].text - assert "banana" in system_messages[0].contents[0].text - - -async def test_state_context_injection_with_tool_calls_and_input_state() -> None: - """Test state context is injected when state is provided, even with tool calls.""" - from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent, TextContent - - messages = [ - ChatMessage( - role="assistant", - contents=[FunctionCallContent(name="get_weather", call_id="call_xyz", arguments="{}")], - ), - ChatMessage( - role="tool", - contents=[FunctionResultContent(call_id="call_xyz", result="sunny")], - ), - ChatMessage( - role="user", - contents=[TextContent(text="Thanks")], - ), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": [], "state": {"weather": "sunny"}} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(state_schema={"weather": {"type": "string"}}), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should inject state context system message because input state is provided - system_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "system" - ] - assert len(system_messages) == 1 - - -async def test_structured_output_processing() -> None: - """Test structured output extraction and state update.""" - - class RecipeState(BaseModel): - ingredients: list[str] - message: str - - orchestrator = DefaultOrchestrator() - - input_data: dict[str, Any] = { - "messages": [ - { - "role": "user", - "content": [{"type": "text", "text": "Add tomato"}], - } - ], - } - - # Agent with structured output - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - updates=[ - AgentResponseUpdate( - contents=[TextContent(text='{"ingredients": ["tomato"], "message": "Added tomato"}')], - role="assistant", - ) - ], - ) - agent.default_options["response_format"] = RecipeState - - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(state_schema={"ingredients": {"type": "array"}}), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should emit StateSnapshotEvent with ingredients - state_events: list[Any] = [e for e in events if e.type == "STATE_SNAPSHOT"] - assert len(state_events) >= 1 - - # Should emit TextMessage with message field - text_content_events: list[Any] = [e for e in events if e.type == "TEXT_MESSAGE_CONTENT"] - assert len(text_content_events) >= 1 - assert any("Added tomato" in e.delta for e in text_content_events) - - -async def test_duplicate_client_tools_filtered() -> None: - """Test that client tools duplicating server tools are filtered out.""" - - @ai_function - def get_weather(location: str) -> str: - """Get weather for location.""" - return f"Weather in {location}" - - orchestrator = DefaultOrchestrator() - - input_data: dict[str, Any] = { - "messages": [ - { - "role": "user", - "content": [{"type": "text", "text": "Hello"}], - } - ], - "tools": [ - { - "name": "get_weather", - "description": "Client weather tool.", - "parameters": { - "type": "object", - "properties": {"location": {"type": "string"}}, - "required": ["location"], - }, - } - ], - } - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - agent.default_options["tools"] = [get_weather] - - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # tools parameter should not be passed since client tool duplicates server tool - assert agent.tools_received is None - - -async def test_unique_client_tools_merged() -> None: - """Test that unique client tools are merged with server tools.""" - - @ai_function - def server_tool() -> str: - """Server tool.""" - return "server" - - orchestrator = DefaultOrchestrator() - - input_data: dict[str, Any] = { - "messages": [ - { - "role": "user", - "content": [{"type": "text", "text": "Hello"}], - } - ], - "tools": [ - { - "name": "client_tool", - "description": "Unique client tool.", - "parameters": { - "type": "object", - "properties": {"param": {"type": "string"}}, - "required": ["param"], - }, - } - ], - } - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - agent.default_options["tools"] = [server_tool] - - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # tools parameter should be passed with both server and client tools - assert agent.tools_received is not None - tool_names = [getattr(tool, "name", None) for tool in agent.tools_received] - assert "server_tool" in tool_names - assert "client_tool" in tool_names - - -async def test_empty_messages_handling() -> None: - """Test orchestrator handles empty message list gracefully.""" - orchestrator = DefaultOrchestrator() - - input_data: dict[str, Any] = {"messages": []} - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should emit run lifecycle events but not call agent - assert len(agent.messages_received) == 0 - run_started = [e for e in events if e.type == "RUN_STARTED"] - run_finished = [e for e in events if e.type == "RUN_FINISHED"] - assert len(run_started) == 1 - assert len(run_finished) == 1 - - -async def test_all_messages_filtered_handling() -> None: - """Test orchestrator handles case where all messages are filtered out.""" - orchestrator = DefaultOrchestrator() - - input_data: dict[str, Any] = { - "messages": [ - { - "role": "tool", - "content": [{"type": "tool_result", "tool_call_id": "orphan", "content": "data"}], - } - ] - } - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should finish without calling agent - assert len(agent.messages_received) == 0 - run_finished = [e for e in events if e.type == "RUN_FINISHED"] - assert len(run_finished) == 1 - - -async def test_confirm_changes_with_invalid_json_fallback() -> None: - """Test confirm_changes with invalid JSON falls back to normal processing.""" - from agent_framework import ChatMessage, FunctionCallContent, TextContent - - messages = [ - ChatMessage( - role="assistant", - contents=[ - FunctionCallContent( - name="confirm_changes", - call_id="call_confirm_invalid", - arguments='{"changes": "test"}', - ) - ], - ), - ChatMessage( - role="user", - contents=[TextContent(text="invalid json {")], - ), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Invalid JSON should fall back - user message should be included - user_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "user" - ] - assert len(user_messages) == 1 - - -async def test_confirm_changes_closes_active_message_before_finish() -> None: - """Confirm-changes flow closes any active text message before run finishes.""" - from ag_ui.core import TextMessageEndEvent, TextMessageStartEvent - from agent_framework import FunctionCallContent, FunctionResultContent - - updates = [ - AgentResponseUpdate( - contents=[ - FunctionCallContent( - name="write_document_local", - call_id="call_1", - arguments='{"document": "Draft"}', - ) - ] - ), - AgentResponseUpdate(contents=[FunctionResultContent(call_id="call_1", result="Done")]), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": [{"role": "user", "content": "Start"}]} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - updates=updates, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig( - predict_state_config={"document": {"tool": "write_document_local", "tool_argument": "document"}}, - require_confirmation=True, - ), - ) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - start_events = [e for e in events if isinstance(e, TextMessageStartEvent)] - end_events = [e for e in events if isinstance(e, TextMessageEndEvent)] - assert len(start_events) == 1 - assert len(end_events) == 1 - assert end_events[0].message_id == start_events[0].message_id - - end_index = events.index(end_events[0]) - finished_index = events.index([e for e in events if e.type == "RUN_FINISHED"][0]) - assert end_index < finished_index - - -async def test_tool_result_kept_when_call_id_matches() -> None: - """Test tool result is kept when call_id matches pending tool calls.""" - from agent_framework import ChatMessage, FunctionCallContent, FunctionResultContent - - messages = [ - ChatMessage( - role="assistant", - contents=[FunctionCallContent(name="get_data", call_id="call_match", arguments="{}")], - ), - ChatMessage( - role="tool", - contents=[FunctionResultContent(call_id="call_match", result="data")], - ), - ] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Tool result should be kept - tool_messages = [ - msg - for msg in agent.messages_received - if (msg.role.value if hasattr(msg.role, "value") else str(msg.role)) == "tool" - ] - assert len(tool_messages) == 1 - assert tool_messages[0].contents[0].result == "data" - - -async def test_agent_protocol_fallback_paths() -> None: - """Test fallback paths for non-ChatAgent implementations.""" - - class CustomAgent: - """Custom agent without ChatAgent type.""" - - def __init__(self) -> None: - self.default_options: dict[str, Any] = {"tools": [], "response_format": None} - self.chat_client = SimpleNamespace(function_invocation_configuration=SimpleNamespace()) - self.messages_received: list[Any] = [] - - async def run_stream( - self, - messages: list[Any], - *, - thread: Any = None, - tools: list[Any] | None = None, - **kwargs: Any, - ) -> AsyncGenerator[AgentResponseUpdate, None]: - self.messages_received = messages - yield AgentResponseUpdate(contents=[TextContent(text="response")], role="assistant") - - from agent_framework import ChatMessage, TextContent - - messages = [ChatMessage(role="user", contents=[TextContent(text="Hello")])] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - agent = CustomAgent() - context = TestExecutionContext( - input_data=input_data, - agent=agent, # type: ignore - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should work with custom agent implementation - assert len(agent.messages_received) > 0 - - -async def test_initial_state_snapshot_with_array_schema() -> None: - """Test state initialization with array type schema.""" - from agent_framework import ChatMessage, TextContent - - messages = [ChatMessage(role="user", contents=[TextContent(text="Hello")])] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": [], "state": {}} - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(state_schema={"items": {"type": "array"}}), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Should emit state snapshot with empty array for items - state_events: list[Any] = [e for e in events if e.type == "STATE_SNAPSHOT"] - assert len(state_events) >= 1 - - -async def test_response_format_skip_text_content() -> None: - """Test that response_format causes skip_text_content to be set.""" - - class OutputModel(BaseModel): - result: str - - from agent_framework import ChatMessage, TextContent - - messages = [ChatMessage(role="user", contents=[TextContent(text="Hello")])] - - orchestrator = DefaultOrchestrator() - input_data: dict[str, Any] = {"messages": []} - - agent = StubAgent( - default_options=DEFAULT_OPTIONS, - ) - agent.default_options["response_format"] = OutputModel - - context = TestExecutionContext( - input_data=input_data, - agent=agent, - config=AgentConfig(), - ) - context.set_messages(messages) - - events: list[Any] = [] - async for event in orchestrator.run(context): - events.append(event) - - # Test passes if no errors occur - verifies response_format code path - assert len(events) > 0 diff --git a/python/packages/ag-ui/tests/test_predictive_state.py b/python/packages/ag-ui/tests/test_predictive_state.py new file mode 100644 index 0000000000..31ad46fc3a --- /dev/null +++ b/python/packages/ag-ui/tests/test_predictive_state.py @@ -0,0 +1,320 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for predictive state handling.""" + +from ag_ui.core import StateDeltaEvent + +from agent_framework_ag_ui._orchestration._predictive_state import PredictiveStateHandler + + +class TestPredictiveStateHandlerInit: + """Tests for PredictiveStateHandler initialization.""" + + def test_default_init(self): + """Initializes with default values.""" + handler = PredictiveStateHandler() + assert handler.predict_state_config == {} + assert handler.current_state == {} + assert handler.streaming_tool_args == "" + assert handler.last_emitted_state == {} + assert handler.state_delta_count == 0 + assert handler.pending_state_updates == {} + + def test_init_with_config(self): + """Initializes with provided config.""" + config = {"document": {"tool": "write_doc", "tool_argument": "content"}} + state = {"document": "initial"} + handler = PredictiveStateHandler(predict_state_config=config, current_state=state) + assert handler.predict_state_config == config + assert handler.current_state == state + + +class TestResetStreaming: + """Tests for reset_streaming method.""" + + def test_resets_streaming_state(self): + """Resets streaming-related state.""" + handler = PredictiveStateHandler() + handler.streaming_tool_args = "some accumulated args" + handler.state_delta_count = 5 + + handler.reset_streaming() + + assert handler.streaming_tool_args == "" + assert handler.state_delta_count == 0 + + +class TestExtractStateValue: + """Tests for extract_state_value method.""" + + def test_no_config(self): + """Returns None when no config.""" + handler = PredictiveStateHandler() + result = handler.extract_state_value("some_tool", {"arg": "value"}) + assert result is None + + def test_no_args(self): + """Returns None when args is None.""" + handler = PredictiveStateHandler(predict_state_config={"key": {"tool": "tool", "tool_argument": "arg"}}) + result = handler.extract_state_value("tool", None) + assert result is None + + def test_empty_args(self): + """Returns None when args is empty string.""" + handler = PredictiveStateHandler(predict_state_config={"key": {"tool": "tool", "tool_argument": "arg"}}) + result = handler.extract_state_value("tool", "") + assert result is None + + def test_tool_not_in_config(self): + """Returns None when tool not in config.""" + handler = PredictiveStateHandler(predict_state_config={"key": {"tool": "other_tool", "tool_argument": "arg"}}) + result = handler.extract_state_value("some_tool", {"arg": "value"}) + assert result is None + + def test_extracts_specific_argument(self): + """Extracts value from specific tool argument.""" + handler = PredictiveStateHandler( + predict_state_config={"document": {"tool": "write_doc", "tool_argument": "content"}} + ) + result = handler.extract_state_value("write_doc", {"content": "Hello world"}) + assert result == ("document", "Hello world") + + def test_extracts_with_wildcard(self): + """Extracts entire args with * wildcard.""" + handler = PredictiveStateHandler(predict_state_config={"data": {"tool": "update_data", "tool_argument": "*"}}) + args = {"key1": "value1", "key2": "value2"} + result = handler.extract_state_value("update_data", args) + assert result == ("data", args) + + def test_extracts_from_json_string(self): + """Extracts value from JSON string args.""" + handler = PredictiveStateHandler( + predict_state_config={"document": {"tool": "write_doc", "tool_argument": "content"}} + ) + result = handler.extract_state_value("write_doc", '{"content": "Hello world"}') + assert result == ("document", "Hello world") + + def test_argument_not_in_args(self): + """Returns None when tool_argument not in args.""" + handler = PredictiveStateHandler( + predict_state_config={"document": {"tool": "write_doc", "tool_argument": "content"}} + ) + result = handler.extract_state_value("write_doc", {"other": "value"}) + assert result is None + + +class TestIsPredictiveTool: + """Tests for is_predictive_tool method.""" + + def test_none_tool_name(self): + """Returns False for None tool name.""" + handler = PredictiveStateHandler(predict_state_config={"key": {"tool": "some_tool", "tool_argument": "arg"}}) + assert handler.is_predictive_tool(None) is False + + def test_no_config(self): + """Returns False when no config.""" + handler = PredictiveStateHandler() + assert handler.is_predictive_tool("some_tool") is False + + def test_tool_in_config(self): + """Returns True when tool is in config.""" + handler = PredictiveStateHandler(predict_state_config={"key": {"tool": "some_tool", "tool_argument": "arg"}}) + assert handler.is_predictive_tool("some_tool") is True + + def test_tool_not_in_config(self): + """Returns False when tool not in config.""" + handler = PredictiveStateHandler(predict_state_config={"key": {"tool": "other_tool", "tool_argument": "arg"}}) + assert handler.is_predictive_tool("some_tool") is False + + +class TestEmitStreamingDeltas: + """Tests for emit_streaming_deltas method.""" + + def test_no_tool_name(self): + """Returns empty list for None tool name.""" + handler = PredictiveStateHandler(predict_state_config={"key": {"tool": "tool", "tool_argument": "arg"}}) + result = handler.emit_streaming_deltas(None, '{"arg": "value"}') + assert result == [] + + def test_no_config(self): + """Returns empty list when no config.""" + handler = PredictiveStateHandler() + result = handler.emit_streaming_deltas("some_tool", '{"arg": "value"}') + assert result == [] + + def test_accumulates_args(self): + """Accumulates argument chunks.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + handler.emit_streaming_deltas("write", '{"text') + handler.emit_streaming_deltas("write", '": "hello') + assert handler.streaming_tool_args == '{"text": "hello' + + def test_emits_delta_on_complete_json(self): + """Emits delta when JSON is complete.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + events = handler.emit_streaming_deltas("write", '{"text": "hello"}') + assert len(events) == 1 + assert isinstance(events[0], StateDeltaEvent) + assert events[0].delta[0]["path"] == "/doc" + assert events[0].delta[0]["value"] == "hello" + assert events[0].delta[0]["op"] == "replace" + + def test_emits_delta_on_partial_json(self): + """Emits delta from partial JSON using regex.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + # First chunk - partial + events = handler.emit_streaming_deltas("write", '{"text": "hel') + assert len(events) == 1 + assert events[0].delta[0]["value"] == "hel" + + def test_does_not_emit_duplicate_deltas(self): + """Does not emit delta when value unchanged.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + # First emission + events1 = handler.emit_streaming_deltas("write", '{"text": "hello"}') + assert len(events1) == 1 + + # Reset and emit same value again + handler.streaming_tool_args = "" + events2 = handler.emit_streaming_deltas("write", '{"text": "hello"}') + assert len(events2) == 0 # No duplicate + + def test_emits_delta_on_value_change(self): + """Emits delta when value changes.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + # First value + events1 = handler.emit_streaming_deltas("write", '{"text": "hello"}') + assert len(events1) == 1 + + # Reset and new value + handler.streaming_tool_args = "" + events2 = handler.emit_streaming_deltas("write", '{"text": "world"}') + assert len(events2) == 1 + assert events2[0].delta[0]["value"] == "world" + + def test_tracks_pending_updates(self): + """Tracks pending state updates.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + handler.emit_streaming_deltas("write", '{"text": "hello"}') + assert handler.pending_state_updates == {"doc": "hello"} + + +class TestEmitPartialDeltas: + """Tests for _emit_partial_deltas method.""" + + def test_unescapes_newlines(self): + """Unescapes \\n in partial values.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + handler.streaming_tool_args = '{"text": "line1\\nline2' + events = handler._emit_partial_deltas("write") + assert len(events) == 1 + assert events[0].delta[0]["value"] == "line1\nline2" + + def test_handles_escaped_quotes_partially(self): + """Handles escaped quotes - regex stops at quote character.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + # The regex pattern [^"]* stops at ANY quote, including escaped ones. + # This is expected behavior for partial streaming - the full JSON + # will be parsed correctly when complete. + handler.streaming_tool_args = '{"text": "say \\"hi' + events = handler._emit_partial_deltas("write") + assert len(events) == 1 + # Captures "say \" then the backslash gets converted to empty string + # by the replace("\\\\", "\\") first, then replace('\\"', '"') + # but since there's no closing quote, we get "say \" + # After .replace("\\\\", "\\") -> "say \" + # After .replace('\\"', '"') -> "say " (but actually still "say \" due to order) + # The actual result: backslash is preserved since it's not a valid escape sequence + assert events[0].delta[0]["value"] == "say \\" + + def test_unescapes_backslashes(self): + """Unescapes \\\\ in partial values.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + handler.streaming_tool_args = '{"text": "path\\\\to\\\\file' + events = handler._emit_partial_deltas("write") + assert len(events) == 1 + assert events[0].delta[0]["value"] == "path\\to\\file" + + +class TestEmitCompleteDeltas: + """Tests for _emit_complete_deltas method.""" + + def test_emits_for_matching_tool(self): + """Emits delta for tool matching config.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + events = handler._emit_complete_deltas("write", {"text": "content"}) + assert len(events) == 1 + assert events[0].delta[0]["value"] == "content" + + def test_skips_non_matching_tool(self): + """Skips tools not matching config.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + events = handler._emit_complete_deltas("other_tool", {"text": "content"}) + assert len(events) == 0 + + def test_handles_wildcard_argument(self): + """Handles * wildcard for entire args.""" + handler = PredictiveStateHandler(predict_state_config={"data": {"tool": "update", "tool_argument": "*"}}) + args = {"key1": "val1", "key2": "val2"} + events = handler._emit_complete_deltas("update", args) + assert len(events) == 1 + assert events[0].delta[0]["value"] == args + + def test_skips_missing_argument(self): + """Skips when tool_argument not in args.""" + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "text"}}) + events = handler._emit_complete_deltas("write", {"other": "value"}) + assert len(events) == 0 + + +class TestCreateDeltaEvent: + """Tests for _create_delta_event method.""" + + def test_creates_event(self): + """Creates StateDeltaEvent with correct structure.""" + handler = PredictiveStateHandler() + event = handler._create_delta_event("key", "value") + + assert isinstance(event, StateDeltaEvent) + assert event.delta[0]["op"] == "replace" + assert event.delta[0]["path"] == "/key" + assert event.delta[0]["value"] == "value" + + def test_increments_count(self): + """Increments state_delta_count.""" + handler = PredictiveStateHandler() + handler._create_delta_event("key", "value") + assert handler.state_delta_count == 1 + handler._create_delta_event("key", "value2") + assert handler.state_delta_count == 2 + + +class TestApplyPendingUpdates: + """Tests for apply_pending_updates method.""" + + def test_applies_pending_to_current(self): + """Applies pending updates to current state.""" + handler = PredictiveStateHandler(current_state={"existing": "value"}) + handler.pending_state_updates = {"doc": "new content", "count": 5} + + handler.apply_pending_updates() + + assert handler.current_state == {"existing": "value", "doc": "new content", "count": 5} + + def test_clears_pending_updates(self): + """Clears pending updates after applying.""" + handler = PredictiveStateHandler() + handler.pending_state_updates = {"doc": "content"} + + handler.apply_pending_updates() + + assert handler.pending_state_updates == {} + + def test_overwrites_existing_keys(self): + """Overwrites existing keys in current state.""" + handler = PredictiveStateHandler(current_state={"doc": "old"}) + handler.pending_state_updates = {"doc": "new"} + + handler.apply_pending_updates() + + assert handler.current_state["doc"] == "new" diff --git a/python/packages/ag-ui/tests/test_run.py b/python/packages/ag-ui/tests/test_run.py new file mode 100644 index 0000000000..a415000692 --- /dev/null +++ b/python/packages/ag-ui/tests/test_run.py @@ -0,0 +1,373 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for _run.py helper functions and FlowState.""" + +from agent_framework import ChatMessage, Content + +from agent_framework_ag_ui._run import ( + FlowState, + _build_safe_metadata, + _create_state_context_message, + _has_only_tool_calls, + _inject_state_context, + _should_suppress_intermediate_snapshot, +) + + +class TestBuildSafeMetadata: + """Tests for _build_safe_metadata function.""" + + def test_none_metadata(self): + """Returns empty dict for None.""" + result = _build_safe_metadata(None) + assert result == {} + + def test_empty_metadata(self): + """Returns empty dict for empty dict.""" + result = _build_safe_metadata({}) + assert result == {} + + def test_short_string_values(self): + """Preserves short string values.""" + metadata = {"key1": "short", "key2": "value"} + result = _build_safe_metadata(metadata) + assert result == metadata + + def test_truncates_long_strings(self): + """Truncates strings over 512 chars.""" + long_value = "x" * 1000 + metadata = {"key": long_value} + result = _build_safe_metadata(metadata) + assert len(result["key"]) == 512 + + def test_serializes_non_strings(self): + """Serializes non-string values to JSON.""" + metadata = {"count": 42, "items": [1, 2, 3]} + result = _build_safe_metadata(metadata) + assert result["count"] == "42" + assert result["items"] == "[1, 2, 3]" + + def test_truncates_serialized_values(self): + """Truncates serialized values over 512 chars.""" + long_list = list(range(200)) + metadata = {"data": long_list} + result = _build_safe_metadata(metadata) + assert len(result["data"]) == 512 + + +class TestHasOnlyToolCalls: + """Tests for _has_only_tool_calls function.""" + + def test_only_tool_calls(self): + """Returns True when only function_call content.""" + contents = [ + Content.from_function_call(call_id="call_1", name="tool1", arguments="{}"), + ] + assert _has_only_tool_calls(contents) is True + + def test_tool_call_with_text(self): + """Returns False when both tool call and text.""" + contents = [ + Content.from_text("Some text"), + Content.from_function_call(call_id="call_1", name="tool1", arguments="{}"), + ] + assert _has_only_tool_calls(contents) is False + + def test_only_text(self): + """Returns False when only text.""" + contents = [Content.from_text("Just text")] + assert _has_only_tool_calls(contents) is False + + def test_empty_contents(self): + """Returns False for empty contents.""" + assert _has_only_tool_calls([]) is False + + def test_tool_call_with_empty_text(self): + """Returns True when text content has empty text.""" + contents = [ + Content.from_text(""), + Content.from_function_call(call_id="call_1", name="tool1", arguments="{}"), + ] + assert _has_only_tool_calls(contents) is True + + +class TestShouldSuppressIntermediateSnapshot: + """Tests for _should_suppress_intermediate_snapshot function.""" + + def test_no_tool_name(self): + """Returns False when no tool name.""" + result = _should_suppress_intermediate_snapshot( + None, {"key": {"tool": "write_doc", "tool_argument": "content"}}, False + ) + assert result is False + + def test_no_config(self): + """Returns False when no config.""" + result = _should_suppress_intermediate_snapshot("write_doc", None, False) + assert result is False + + def test_confirmation_required(self): + """Returns False when confirmation is required.""" + config = {"key": {"tool": "write_doc", "tool_argument": "content"}} + result = _should_suppress_intermediate_snapshot("write_doc", config, True) + assert result is False + + def test_tool_not_in_config(self): + """Returns False when tool not in config.""" + config = {"key": {"tool": "other_tool", "tool_argument": "content"}} + result = _should_suppress_intermediate_snapshot("write_doc", config, False) + assert result is False + + def test_suppresses_predictive_tool(self): + """Returns True for predictive tool without confirmation.""" + config = {"document": {"tool": "write_doc", "tool_argument": "content"}} + result = _should_suppress_intermediate_snapshot("write_doc", config, False) + assert result is True + + +class TestFlowState: + """Tests for FlowState dataclass.""" + + def test_default_values(self): + """Tests default initialization.""" + flow = FlowState() + assert flow.message_id is None + assert flow.tool_call_id is None + assert flow.tool_call_name is None + assert flow.waiting_for_approval is False + assert flow.current_state == {} + assert flow.accumulated_text == "" + assert flow.pending_tool_calls == [] + assert flow.tool_calls_by_id == {} + assert flow.tool_results == [] + assert flow.tool_calls_ended == set() + + def test_get_tool_name(self): + """Tests get_tool_name method.""" + flow = FlowState() + flow.tool_calls_by_id = {"call_123": {"function": {"name": "get_weather", "arguments": "{}"}}} + + assert flow.get_tool_name("call_123") == "get_weather" + assert flow.get_tool_name("nonexistent") is None + assert flow.get_tool_name(None) is None + + def test_get_tool_name_empty_name(self): + """Tests get_tool_name with empty name.""" + flow = FlowState() + flow.tool_calls_by_id = {"call_123": {"function": {"name": "", "arguments": "{}"}}} + + assert flow.get_tool_name("call_123") is None + + def test_get_pending_without_end(self): + """Tests get_pending_without_end method.""" + flow = FlowState() + flow.pending_tool_calls = [ + {"id": "call_1", "function": {"name": "tool1"}}, + {"id": "call_2", "function": {"name": "tool2"}}, + {"id": "call_3", "function": {"name": "tool3"}}, + ] + flow.tool_calls_ended = {"call_1", "call_3"} + + result = flow.get_pending_without_end() + assert len(result) == 1 + assert result[0]["id"] == "call_2" + + +class TestCreateStateContextMessage: + """Tests for _create_state_context_message function.""" + + def test_no_state(self): + """Returns None when no state.""" + result = _create_state_context_message({}, {"properties": {}}) + assert result is None + + def test_no_schema(self): + """Returns None when no schema.""" + result = _create_state_context_message({"key": "value"}, {}) + assert result is None + + def test_creates_message(self): + """Creates state context message.""" + from agent_framework import Role + + state = {"document": "Hello world"} + schema = {"properties": {"document": {"type": "string"}}} + + result = _create_state_context_message(state, schema) + + assert result is not None + assert result.role == Role.SYSTEM + assert len(result.contents) == 1 + assert "Hello world" in result.contents[0].text + assert "Current state" in result.contents[0].text + + +class TestInjectStateContext: + """Tests for _inject_state_context function.""" + + def test_no_state_message(self): + """Returns original messages when no state context needed.""" + messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])] + result = _inject_state_context(messages, {}, {}) + assert result == messages + + def test_empty_messages(self): + """Returns empty list for empty messages.""" + result = _inject_state_context([], {"key": "value"}, {"properties": {}}) + assert result == [] + + def test_last_message_not_user(self): + """Returns original messages when last message is not from user.""" + messages = [ + ChatMessage(role="user", contents=[Content.from_text("Hello")]), + ChatMessage(role="assistant", contents=[Content.from_text("Hi")]), + ] + state = {"key": "value"} + schema = {"properties": {"key": {"type": "string"}}} + + result = _inject_state_context(messages, state, schema) + assert result == messages + + def test_injects_before_last_user_message(self): + """Injects state context before last user message.""" + from agent_framework import Role + + messages = [ + ChatMessage(role="system", contents=[Content.from_text("You are helpful")]), + ChatMessage(role="user", contents=[Content.from_text("Hello")]), + ] + state = {"document": "content"} + schema = {"properties": {"document": {"type": "string"}}} + + result = _inject_state_context(messages, state, schema) + + assert len(result) == 3 + # System message first + assert result[0].role == Role.SYSTEM + assert "helpful" in result[0].contents[0].text + # State context second + assert result[1].role == Role.SYSTEM + assert "Current state" in result[1].contents[0].text + # User message last + assert result[2].role == Role.USER + assert "Hello" in result[2].contents[0].text + + +# Additional tests for _run.py functions + + +def test_emit_text_basic(): + """Test _emit_text emits correct events.""" + from agent_framework_ag_ui._run import _emit_text + + flow = FlowState() + content = Content.from_text("Hello world") + + events = _emit_text(content, flow) + + assert len(events) == 2 # TextMessageStartEvent + TextMessageContentEvent + assert flow.message_id is not None + assert flow.accumulated_text == "Hello world" + + +def test_emit_text_skip_empty(): + """Test _emit_text skips empty text.""" + from agent_framework_ag_ui._run import _emit_text + + flow = FlowState() + content = Content.from_text("") + + events = _emit_text(content, flow) + + assert len(events) == 0 + + +def test_emit_text_continues_existing_message(): + """Test _emit_text continues existing message.""" + from agent_framework_ag_ui._run import _emit_text + + flow = FlowState() + flow.message_id = "existing-id" + content = Content.from_text("more text") + + events = _emit_text(content, flow) + + assert len(events) == 1 # Only TextMessageContentEvent, no new start + assert flow.message_id == "existing-id" + + +def test_emit_text_skips_when_waiting_for_approval(): + """Test _emit_text skips when waiting for approval.""" + from agent_framework_ag_ui._run import _emit_text + + flow = FlowState() + flow.waiting_for_approval = True + content = Content.from_text("should skip") + + events = _emit_text(content, flow) + + assert len(events) == 0 + + +def test_emit_text_skips_when_skip_text_flag(): + """Test _emit_text skips with skip_text flag.""" + from agent_framework_ag_ui._run import _emit_text + + flow = FlowState() + content = Content.from_text("should skip") + + events = _emit_text(content, flow, skip_text=True) + + assert len(events) == 0 + + +def test_emit_tool_call_basic(): + """Test _emit_tool_call emits correct events.""" + from agent_framework_ag_ui._run import _emit_tool_call + + flow = FlowState() + content = Content.from_function_call( + call_id="call_123", + name="get_weather", + arguments='{"city": "NYC"}', + ) + + events = _emit_tool_call(content, flow) + + assert len(events) >= 1 # At least ToolCallStartEvent + assert flow.tool_call_id == "call_123" + assert flow.tool_call_name == "get_weather" + + +def test_emit_tool_call_generates_id(): + """Test _emit_tool_call generates ID when not provided.""" + from agent_framework_ag_ui._run import _emit_tool_call + + flow = FlowState() + # Create content without call_id + content = Content(type="function_call", name="test_tool", arguments="{}") + + events = _emit_tool_call(content, flow) + + assert len(events) >= 1 + assert flow.tool_call_id is not None # ID should be generated + + +def test_extract_approved_state_updates_no_handler(): + """Test _extract_approved_state_updates returns empty with no handler.""" + from agent_framework_ag_ui._run import _extract_approved_state_updates + + messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])] + result = _extract_approved_state_updates(messages, None) + assert result == {} + + +def test_extract_approved_state_updates_no_approval(): + """Test _extract_approved_state_updates returns empty when no approval content.""" + from agent_framework_ag_ui._orchestration._predictive_state import PredictiveStateHandler + from agent_framework_ag_ui._run import _extract_approved_state_updates + + handler = PredictiveStateHandler(predict_state_config={"doc": {"tool": "write", "tool_argument": "content"}}) + messages = [ChatMessage(role="user", contents=[Content.from_text("Hello")])] + result = _extract_approved_state_updates(messages, handler) + assert result == {} diff --git a/python/packages/ag-ui/tests/test_service_thread_id.py b/python/packages/ag-ui/tests/test_service_thread_id.py index 8c00f7b67c..eab60abf7a 100644 --- a/python/packages/ag-ui/tests/test_service_thread_id.py +++ b/python/packages/ag-ui/tests/test_service_thread_id.py @@ -7,7 +7,7 @@ from typing import Any from ag_ui.core import RunFinishedEvent, RunStartedEvent -from agent_framework import TextContent +from agent_framework import Content from agent_framework._types import AgentResponseUpdate, ChatResponseUpdate sys.path.insert(0, str(Path(__file__).parent)) @@ -20,10 +20,10 @@ async def test_service_thread_id_when_there_are_updates(): updates: list[AgentResponseUpdate] = [ AgentResponseUpdate( - contents=[TextContent(text="Hello, user!")], + contents=[Content.from_text(text="Hello, user!")], response_id="resp_67890", raw_representation=ChatResponseUpdate( - contents=[TextContent(text="Hello, user!")], + contents=[Content.from_text(text="Hello, user!")], conversation_id="conv_12345", response_id="resp_67890", ), diff --git a/python/packages/ag-ui/tests/test_shared_state.py b/python/packages/ag-ui/tests/test_shared_state.py deleted file mode 100644 index 469f5f5ad8..0000000000 --- a/python/packages/ag-ui/tests/test_shared_state.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Tests for shared state management.""" - -import sys -from pathlib import Path -from typing import Any - -import pytest -from ag_ui.core import StateSnapshotEvent -from agent_framework import ChatAgent, ChatResponseUpdate, TextContent - -from agent_framework_ag_ui._agent import AgentFrameworkAgent -from agent_framework_ag_ui._events import AgentFrameworkEventBridge - -sys.path.insert(0, str(Path(__file__).parent)) -from utils_test_ag_ui import StreamingChatClientStub, stream_from_updates - - -@pytest.fixture -def mock_agent() -> ChatAgent: - """Create a mock agent for testing.""" - updates = [ChatResponseUpdate(contents=[TextContent(text="Hello!")])] - chat_client = StreamingChatClientStub(stream_from_updates(updates)) - return ChatAgent(name="test_agent", instructions="Test agent", chat_client=chat_client) - - -def test_state_snapshot_event(): - """Test creating state snapshot events.""" - bridge = AgentFrameworkEventBridge(run_id="test-run", thread_id="test-thread") - - state = { - "recipe": { - "name": "Chocolate Chip Cookies", - "ingredients": ["flour", "sugar", "chocolate chips"], - "instructions": ["Mix ingredients", "Bake at 350°F"], - "servings": 24, - } - } - - event = bridge.create_state_snapshot_event(state) - - assert isinstance(event, StateSnapshotEvent) - assert event.snapshot == state - assert event.snapshot["recipe"]["name"] == "Chocolate Chip Cookies" - assert len(event.snapshot["recipe"]["ingredients"]) == 3 - - -def test_state_delta_event(): - """Test creating state delta events using JSON Patch format.""" - bridge = AgentFrameworkEventBridge(run_id="test-run", thread_id="test-thread") - - # JSON Patch operations (RFC 6902) - delta = [ - {"op": "add", "path": "/recipe/ingredients/-", "value": "vanilla extract"}, - {"op": "replace", "path": "/recipe/servings", "value": 30}, - ] - - event = bridge.create_state_delta_event(delta) - - assert event.delta == delta - assert len(event.delta) == 2 - assert event.delta[0]["op"] == "add" - assert event.delta[1]["op"] == "replace" - - -async def test_agent_with_initial_state(mock_agent: ChatAgent) -> None: - """Test agent emits state snapshot when initial state provided.""" - state_schema: dict[str, Any] = {"recipe": {"type": "object", "properties": {"name": {"type": "string"}}}} - - agent = AgentFrameworkAgent( - agent=mock_agent, - state_schema=state_schema, - ) - - initial_state = {"recipe": {"name": "Test Recipe"}} - - input_data: dict[str, Any] = { - "messages": [{"role": "user", "content": "Hello"}], - "state": initial_state, - } - - events: list[Any] = [] - async for event in agent.run_agent(input_data): - events.append(event) - - # Should have RunStartedEvent, StateSnapshotEvent, RunFinishedEvent at minimum - snapshot_events = [e for e in events if isinstance(e, StateSnapshotEvent)] - assert len(snapshot_events) == 1 - assert snapshot_events[0].snapshot == initial_state - - -async def test_agent_without_state_schema(mock_agent: ChatAgent) -> None: - """Test agent doesn't emit state events without state schema.""" - agent = AgentFrameworkAgent(agent=mock_agent) - - input_data: dict[str, Any] = { - "messages": [{"role": "user", "content": "Hello"}], - "state": {"some": "state"}, - } - - events: list[Any] = [] - async for event in agent.run_agent(input_data): - events.append(event) - - # Should NOT have any StateSnapshotEvent - snapshot_events = [e for e in events if isinstance(e, StateSnapshotEvent)] - assert len(snapshot_events) == 0 diff --git a/python/packages/ag-ui/tests/test_state_manager.py b/python/packages/ag-ui/tests/test_state_manager.py deleted file mode 100644 index bc0a7b6a19..0000000000 --- a/python/packages/ag-ui/tests/test_state_manager.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from ag_ui.core import CustomEvent, EventType -from agent_framework import ChatMessage, TextContent - -from agent_framework_ag_ui._events import AgentFrameworkEventBridge -from agent_framework_ag_ui._orchestration._state_manager import StateManager - - -def test_state_manager_initializes_defaults_and_snapshot() -> None: - state_manager = StateManager( - state_schema={"items": {"type": "array"}, "metadata": {"type": "object"}}, - predict_state_config=None, - require_confirmation=True, - ) - current_state = state_manager.initialize({"metadata": {"a": 1}}) - bridge = AgentFrameworkEventBridge(run_id="run", thread_id="thread", current_state=current_state) - - snapshot_event = state_manager.initial_snapshot_event(bridge) - assert snapshot_event is not None - assert snapshot_event.snapshot["items"] == [] - assert snapshot_event.snapshot["metadata"] == {"a": 1} - - -def test_state_manager_predict_state_event_shape() -> None: - state_manager = StateManager( - state_schema=None, - predict_state_config={"doc": {"tool": "write_document_local", "tool_argument": "document"}}, - require_confirmation=True, - ) - predict_event = state_manager.predict_state_event() - assert isinstance(predict_event, CustomEvent) - assert predict_event.type == EventType.CUSTOM - assert predict_event.name == "PredictState" - assert predict_event.value[0]["state_key"] == "doc" - - -def test_state_context_only_when_new_user_turn() -> None: - state_manager = StateManager( - state_schema={"items": {"type": "array"}}, - predict_state_config=None, - require_confirmation=True, - ) - state_manager.initialize({"items": [1]}) - - assert state_manager.state_context_message(is_new_user_turn=False, conversation_has_tool_calls=False) is None - - message = state_manager.state_context_message(is_new_user_turn=True, conversation_has_tool_calls=False) - assert isinstance(message, ChatMessage) - assert isinstance(message.contents[0], TextContent) - assert "Current state of the application" in message.contents[0].text diff --git a/python/packages/ag-ui/tests/test_structured_output.py b/python/packages/ag-ui/tests/test_structured_output.py index b9a04353be..7c623f62d6 100644 --- a/python/packages/ag-ui/tests/test_structured_output.py +++ b/python/packages/ag-ui/tests/test_structured_output.py @@ -8,7 +8,7 @@ from pathlib import Path from typing import Any -from agent_framework import ChatAgent, ChatMessage, ChatOptions, ChatResponseUpdate, TextContent +from agent_framework import ChatAgent, ChatMessage, ChatOptions, ChatResponseUpdate, Content from pydantic import BaseModel sys.path.insert(0, str(Path(__file__).parent)) @@ -43,7 +43,7 @@ async def stream_fn( messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: yield ChatResponseUpdate( - contents=[TextContent(text='{"recipe": {"name": "Pasta"}, "message": "Here is your recipe"}')] + contents=[Content.from_text(text='{"recipe": {"name": "Pasta"}, "message": "Here is your recipe"}')] ) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) @@ -86,7 +86,7 @@ async def stream_fn( {"id": "2", "description": "Step 2", "status": "pending"}, ] } - yield ChatResponseUpdate(contents=[TextContent(text=json.dumps(steps_data))]) + yield ChatResponseUpdate(contents=[Content.from_text(text=json.dumps(steps_data))]) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) agent.default_options = ChatOptions(response_format=StepsOutput) @@ -118,7 +118,7 @@ async def test_structured_output_with_no_schema_match(): from agent_framework.ag_ui import AgentFrameworkAgent updates = [ - ChatResponseUpdate(contents=[TextContent(text='{"data": {"key": "value"}}')]), + ChatResponseUpdate(contents=[Content.from_text(text='{"data": {"key": "value"}}')]), ] agent = ChatAgent( @@ -156,7 +156,7 @@ class DataOutput(BaseModel): async def stream_fn( messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text='{"data": {"key": "value"}, "info": "processed"}')]) + yield ChatResponseUpdate(contents=[Content.from_text(text='{"data": {"key": "value"}, "info": "processed"}')]) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) agent.default_options = ChatOptions(response_format=DataOutput) @@ -185,7 +185,7 @@ async def test_no_structured_output_when_no_response_format(): """Test that structured output path is skipped when no response_format.""" from agent_framework.ag_ui import AgentFrameworkAgent - updates = [ChatResponseUpdate(contents=[TextContent(text="Regular text")])] + updates = [ChatResponseUpdate(contents=[Content.from_text(text="Regular text")])] agent = ChatAgent( name="test", @@ -216,7 +216,7 @@ async def stream_fn( messages: MutableSequence[ChatMessage], options: ChatOptions, **kwargs: Any ) -> AsyncIterator[ChatResponseUpdate]: output_data = {"recipe": {"name": "Salad"}, "message": "Fresh salad recipe ready"} - yield ChatResponseUpdate(contents=[TextContent(text=json.dumps(output_data))]) + yield ChatResponseUpdate(contents=[Content.from_text(text=json.dumps(output_data))]) agent = ChatAgent(name="test", instructions="Test", chat_client=StreamingChatClientStub(stream_fn)) agent.default_options = ChatOptions(response_format=RecipeOutput) diff --git a/python/packages/ag-ui/tests/test_tooling.py b/python/packages/ag-ui/tests/test_tooling.py index 23d82dda90..b8c9700cd4 100644 --- a/python/packages/ag-ui/tests/test_tooling.py +++ b/python/packages/ag-ui/tests/test_tooling.py @@ -129,3 +129,95 @@ def test_collect_server_tools_with_mcp_tools_via_public_property() -> None: assert "regular_tool" in names assert "mcp_function" in names assert len(tools) == 2 + + +# Additional tests for tooling coverage + + +def test_collect_server_tools_no_default_options() -> None: + """collect_server_tools returns empty list when agent has no default_options.""" + + class MockAgent: + pass + + agent = MockAgent() + tools = collect_server_tools(agent) + assert tools == [] + + +def test_register_additional_client_tools_no_tools() -> None: + """register_additional_client_tools does nothing with None tools.""" + mock_chat_client = MagicMock() + agent = ChatAgent(chat_client=mock_chat_client) + + # Should not raise + register_additional_client_tools(agent, None) + + +def test_register_additional_client_tools_no_chat_client() -> None: + """register_additional_client_tools does nothing when agent has no chat_client.""" + from agent_framework_ag_ui._orchestration._tooling import register_additional_client_tools + + class MockAgent: + pass + + agent = MockAgent() + tools = [DummyTool("x")] + + # Should not raise + register_additional_client_tools(agent, tools) + + +def test_merge_tools_no_client_tools() -> None: + """merge_tools returns None when no client tools.""" + server = [DummyTool("a")] + result = merge_tools(server, None) + assert result is None + + +def test_merge_tools_all_duplicates() -> None: + """merge_tools returns None when all client tools duplicate server tools.""" + server = [DummyTool("a"), DummyTool("b")] + client = [DummyTool("a"), DummyTool("b")] + result = merge_tools(server, client) + assert result is None + + +def test_merge_tools_empty_server() -> None: + """merge_tools works with empty server tools.""" + server: list = [] + client = [DummyTool("a"), DummyTool("b")] + result = merge_tools(server, client) + assert result is not None + assert len(result) == 2 + + +def test_merge_tools_with_approval_tools_no_client() -> None: + """merge_tools returns server tools when they have approval mode even without client tools.""" + + class ApprovalTool: + def __init__(self, name: str): + self.name = name + self.approval_mode = "always_require" + + server = [ApprovalTool("write_doc")] + result = merge_tools(server, None) + assert result is not None + assert len(result) == 1 + assert result[0].name == "write_doc" + + +def test_merge_tools_with_approval_tools_all_duplicates() -> None: + """merge_tools returns server tools with approval mode even when client duplicates.""" + + class ApprovalTool: + def __init__(self, name: str): + self.name = name + self.approval_mode = "always_require" + + server = [ApprovalTool("write_doc")] + client = [DummyTool("write_doc")] # Same name as server + result = merge_tools(server, client) + assert result is not None + assert len(result) == 1 + assert result[0].approval_mode == "always_require" diff --git a/python/packages/ag-ui/tests/test_types.py b/python/packages/ag-ui/tests/test_types.py index 3c61278d9e..6b0b00a687 100644 --- a/python/packages/ag-ui/tests/test_types.py +++ b/python/packages/ag-ui/tests/test_types.py @@ -2,7 +2,7 @@ """Tests for type definitions in _types.py.""" -from agent_framework_ag_ui._types import AgentState, PredictStateConfig, RunMetadata +from agent_framework_ag_ui._types import AgentState, AGUIRequest, PredictStateConfig, RunMetadata class TestPredictStateConfig: @@ -143,3 +143,83 @@ def test_agent_state_complex_messages(self) -> None: assert len(state["messages"]) == 2 assert "metadata" in state["messages"][0] assert "tool_calls" in state["messages"][1] + + +class TestAGUIRequest: + """Test AGUIRequest Pydantic model.""" + + def test_agui_request_minimal(self) -> None: + """Test creating AGUIRequest with only required fields.""" + request = AGUIRequest(messages=[{"role": "user", "content": "Hello"}]) + + assert len(request.messages) == 1 + assert request.messages[0]["content"] == "Hello" + assert request.run_id is None + assert request.thread_id is None + assert request.state is None + assert request.tools is None + assert request.context is None + assert request.forwarded_props is None + assert request.parent_run_id is None + + def test_agui_request_all_fields(self) -> None: + """Test creating AGUIRequest with all fields populated.""" + request = AGUIRequest( + messages=[{"role": "user", "content": "Hello"}], + run_id="run-123", + thread_id="thread-456", + state={"counter": 0}, + tools=[{"name": "search", "description": "Search tool"}], + context=[{"type": "document", "content": "Some context"}], + forwarded_props={"custom_key": "custom_value"}, + parent_run_id="parent-run-789", + ) + + assert request.run_id == "run-123" + assert request.thread_id == "thread-456" + assert request.state == {"counter": 0} + assert request.tools == [{"name": "search", "description": "Search tool"}] + assert request.context == [{"type": "document", "content": "Some context"}] + assert request.forwarded_props == {"custom_key": "custom_value"} + assert request.parent_run_id == "parent-run-789" + + def test_agui_request_model_dump_excludes_none(self) -> None: + """Test that model_dump(exclude_none=True) excludes None fields.""" + request = AGUIRequest( + messages=[{"role": "user", "content": "test"}], + tools=[{"name": "my_tool"}], + context=[{"id": "ctx1"}], + ) + + dumped = request.model_dump(exclude_none=True) + + assert "messages" in dumped + assert "tools" in dumped + assert "context" in dumped + assert "run_id" not in dumped + assert "thread_id" not in dumped + assert "state" not in dumped + assert "forwarded_props" not in dumped + assert "parent_run_id" not in dumped + + def test_agui_request_model_dump_includes_all_set_fields(self) -> None: + """Test that model_dump preserves all explicitly set fields. + + This is critical for the fix - ensuring tools, context, forwarded_props, + and parent_run_id are not stripped during request validation. + """ + request = AGUIRequest( + messages=[{"role": "user", "content": "test"}], + tools=[{"name": "client_tool", "parameters": {"type": "object"}}], + context=[{"type": "snippet", "content": "code here"}], + forwarded_props={"auth_token": "secret", "user_id": "user-1"}, + parent_run_id="parent-456", + ) + + dumped = request.model_dump(exclude_none=True) + + # Verify all fields are preserved (the main bug fix) + assert dumped["tools"] == [{"name": "client_tool", "parameters": {"type": "object"}}] + assert dumped["context"] == [{"type": "snippet", "content": "code here"}] + assert dumped["forwarded_props"] == {"auth_token": "secret", "user_id": "user-1"} + assert dumped["parent_run_id"] == "parent-456" diff --git a/python/packages/ag-ui/tests/test_utils.py b/python/packages/ag-ui/tests/test_utils.py index b077468b81..5d956d1ec6 100644 --- a/python/packages/ag-ui/tests/test_utils.py +++ b/python/packages/ag-ui/tests/test_utils.py @@ -122,6 +122,20 @@ def test_make_json_safe_model_dump(): assert result == {"type": "model", "data": "dump"} +class ToDictObject: + """Object with to_dict method (like SerializationMixin).""" + + def to_dict(self): + return {"type": "serialization_mixin", "method": "to_dict"} + + +def test_make_json_safe_to_dict(): + """Test object with to_dict method (SerializationMixin pattern).""" + obj = ToDictObject() + result = make_json_safe(obj) + assert result == {"type": "serialization_mixin", "method": "to_dict"} + + class DictObject: """Object with dict method.""" @@ -203,6 +217,41 @@ def test_make_json_safe_fallback(): assert isinstance(result, dict) +def test_make_json_safe_dataclass_with_nested_to_dict_object(): + """Test dataclass containing a to_dict object (like HandoffAgentUserRequest with AgentResponse). + + This test verifies the fix for the AG-UI JSON serialization error when + HandoffAgentUserRequest (a dataclass) contains an AgentResponse (SerializationMixin). + """ + + class NestedToDictObject: + """Simulates SerializationMixin objects like AgentResponse.""" + + def __init__(self, contents: list[str]): + self.contents = contents + + def to_dict(self): + return {"type": "response", "contents": self.contents} + + @dataclass + class ContainerDataclass: + """Simulates HandoffAgentUserRequest dataclass.""" + + response: NestedToDictObject + + obj = ContainerDataclass(response=NestedToDictObject(contents=["hello", "world"])) + result = make_json_safe(obj) + + # Verify the nested to_dict object was properly serialized + assert result == {"response": {"type": "response", "contents": ["hello", "world"]}} + + # Verify the result is actually JSON serializable + import json + + json_str = json.dumps(result) + assert json_str is not None + + def test_convert_tools_to_agui_format_with_ai_function(): """Test converting AIFunction to AG-UI format.""" from agent_framework import ai_function @@ -307,3 +356,173 @@ def tool2(y: str) -> str: assert len(result) == 2 assert result[0]["name"] == "tool1" assert result[1]["name"] == "tool2" + + +# Additional tests for utils coverage + + +def test_safe_json_parse_with_dict(): + """Test safe_json_parse with dict input.""" + from agent_framework_ag_ui._utils import safe_json_parse + + input_dict = {"key": "value"} + result = safe_json_parse(input_dict) + assert result == input_dict + + +def test_safe_json_parse_with_json_string(): + """Test safe_json_parse with JSON string.""" + from agent_framework_ag_ui._utils import safe_json_parse + + result = safe_json_parse('{"key": "value"}') + assert result == {"key": "value"} + + +def test_safe_json_parse_with_invalid_json(): + """Test safe_json_parse with invalid JSON.""" + from agent_framework_ag_ui._utils import safe_json_parse + + result = safe_json_parse("not json") + assert result is None + + +def test_safe_json_parse_with_non_dict_json(): + """Test safe_json_parse with JSON that parses to non-dict.""" + from agent_framework_ag_ui._utils import safe_json_parse + + result = safe_json_parse("[1, 2, 3]") + assert result is None + + +def test_safe_json_parse_with_none(): + """Test safe_json_parse with None input.""" + from agent_framework_ag_ui._utils import safe_json_parse + + result = safe_json_parse(None) + assert result is None + + +def test_get_role_value_with_enum(): + """Test get_role_value with enum role.""" + from agent_framework import ChatMessage, Content, Role + + from agent_framework_ag_ui._utils import get_role_value + + message = ChatMessage(role=Role.USER, contents=[Content.from_text("test")]) + result = get_role_value(message) + assert result == "user" + + +def test_get_role_value_with_string(): + """Test get_role_value with string role.""" + from agent_framework_ag_ui._utils import get_role_value + + class MockMessage: + role = "assistant" + + result = get_role_value(MockMessage()) + assert result == "assistant" + + +def test_get_role_value_with_none(): + """Test get_role_value with no role.""" + from agent_framework_ag_ui._utils import get_role_value + + class MockMessage: + pass + + result = get_role_value(MockMessage()) + assert result == "" + + +def test_normalize_agui_role_developer(): + """Test normalize_agui_role maps developer to system.""" + from agent_framework_ag_ui._utils import normalize_agui_role + + assert normalize_agui_role("developer") == "system" + + +def test_normalize_agui_role_valid(): + """Test normalize_agui_role with valid roles.""" + from agent_framework_ag_ui._utils import normalize_agui_role + + assert normalize_agui_role("user") == "user" + assert normalize_agui_role("assistant") == "assistant" + assert normalize_agui_role("system") == "system" + assert normalize_agui_role("tool") == "tool" + + +def test_normalize_agui_role_invalid(): + """Test normalize_agui_role with invalid role defaults to user.""" + from agent_framework_ag_ui._utils import normalize_agui_role + + assert normalize_agui_role("invalid") == "user" + assert normalize_agui_role(123) == "user" + + +def test_extract_state_from_tool_args(): + """Test extract_state_from_tool_args.""" + from agent_framework_ag_ui._utils import extract_state_from_tool_args + + # Specific key + assert extract_state_from_tool_args({"key": "value"}, "key") == "value" + + # Wildcard + args = {"a": 1, "b": 2} + assert extract_state_from_tool_args(args, "*") == args + + # Missing key + assert extract_state_from_tool_args({"other": "value"}, "key") is None + + # None args + assert extract_state_from_tool_args(None, "key") is None + + +def test_convert_agui_tools_to_agent_framework(): + """Test convert_agui_tools_to_agent_framework.""" + from agent_framework_ag_ui._utils import convert_agui_tools_to_agent_framework + + agui_tools = [ + { + "name": "test_tool", + "description": "A test tool", + "parameters": {"type": "object", "properties": {"arg": {"type": "string"}}}, + } + ] + + result = convert_agui_tools_to_agent_framework(agui_tools) + + assert result is not None + assert len(result) == 1 + assert result[0].name == "test_tool" + assert result[0].description == "A test tool" + assert result[0].declaration_only is True + + +def test_convert_agui_tools_to_agent_framework_none(): + """Test convert_agui_tools_to_agent_framework with None.""" + from agent_framework_ag_ui._utils import convert_agui_tools_to_agent_framework + + result = convert_agui_tools_to_agent_framework(None) + assert result is None + + +def test_convert_agui_tools_to_agent_framework_empty(): + """Test convert_agui_tools_to_agent_framework with empty list.""" + from agent_framework_ag_ui._utils import convert_agui_tools_to_agent_framework + + result = convert_agui_tools_to_agent_framework([]) + assert result is None + + +def test_make_json_safe_unconvertible(): + """Test make_json_safe with object that has no standard conversion.""" + + class NoConversion: + __slots__ = () # No __dict__ + + from agent_framework_ag_ui._utils import make_json_safe + + result = make_json_safe(NoConversion()) + # Falls back to str() + assert isinstance(result, str) diff --git a/python/packages/ag-ui/tests/utils_test_ag_ui.py b/python/packages/ag-ui/tests/utils_test_ag_ui.py index c3fa590cd1..5c2415583c 100644 --- a/python/packages/ag-ui/tests/utils_test_ag_ui.py +++ b/python/packages/ag-ui/tests/utils_test_ag_ui.py @@ -16,13 +16,10 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - TextContent, + Content, ) from agent_framework._clients import TOptions_co -from agent_framework_ag_ui._message_adapters import _deduplicate_messages, _sanitize_tool_history -from agent_framework_ag_ui._orchestrators import ExecutionContext - if sys.version_info >= (3, 12): from typing import override # type: ignore # pragma: no cover else: @@ -91,7 +88,7 @@ def __init__( self.id = agent_id self.name = agent_name self.description = "stub agent" - self.updates = updates or [AgentResponseUpdate(contents=[TextContent(text="response")], role="assistant")] + self.updates = updates or [AgentResponseUpdate(contents=[Content.from_text(text="response")], role="assistant")] self.default_options: dict[str, Any] = ( default_options if isinstance(default_options, dict) else {"tools": None, "response_format": None} ) @@ -125,14 +122,3 @@ async def _stream() -> AsyncIterator[AgentResponseUpdate]: def get_new_thread(self, **kwargs: Any) -> AgentThread: return AgentThread() - - -class TestExecutionContext(ExecutionContext): - """ExecutionContext helper that allows setting messages for tests.""" - - def set_messages(self, messages: list[ChatMessage], *, normalize: bool = True) -> None: - if normalize: - self._messages = _deduplicate_messages(_sanitize_tool_history(messages)) - else: - self._messages = messages - self._snapshot_messages = None diff --git a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py index c9223e614b..40066b4779 100644 --- a/python/packages/anthropic/agent_framework_anthropic/_chat_client.py +++ b/python/packages/anthropic/agent_framework_anthropic/_chat_client.py @@ -7,31 +7,19 @@ from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, AIFunction, - Annotations, + Annotation, BaseChatClient, ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, - CitationAnnotation, - CodeInterpreterToolCallContent, - CodeInterpreterToolResultContent, - Contents, - ErrorContent, + Content, FinishReason, - FunctionCallContent, - FunctionResultContent, HostedCodeInterpreterTool, - HostedFileContent, HostedMCPTool, HostedWebSearchTool, - MCPServerToolCallContent, - MCPServerToolResultContent, Role, - TextContent, - TextReasoningContent, TextSpanRegion, - UsageContent, UsageDetails, get_logger, prepare_function_call_results, @@ -57,7 +45,7 @@ from anthropic.types.beta.beta_code_execution_tool_result_error import ( BetaCodeExecutionToolResultError, ) -from pydantic import SecretStr, ValidationError +from pydantic import BaseModel, SecretStr, ValidationError if sys.version_info >= (3, 13): from typing import TypeVar @@ -79,6 +67,7 @@ ANTHROPIC_DEFAULT_MAX_TOKENS: Final[int] = 1024 BETA_FLAGS: Final[list[str]] = ["mcp-client-2025-04-04", "code-execution-2025-08-25"] +STRUCTURED_OUTPUTS_BETA_FLAG: Final[str] = "structured-outputs-2025-11-13" # region Anthropic Chat Options TypedDict @@ -157,6 +146,7 @@ class AnthropicChatOptions(ChatOptions, total=False): frequency_penalty: None # type: ignore[misc] presence_penalty: None # type: ignore[misc] store: None # type: ignore[misc] + conversation_id: None # type: ignore[misc] TAnthropicOptions = TypeVar( @@ -353,7 +343,7 @@ async def _inner_get_response( # execute message = await self.anthropic_client.beta.messages.create(**run_options, stream=False) # process - return self._process_message(message) + return self._process_message(message, options) @override async def _inner_get_streaming_response( @@ -396,8 +386,10 @@ def _prepare_options( messages = prepend_instructions_to_messages(list(messages), instructions, role="system") - # Start with a copy of options - run_options: dict[str, Any] = {k: v for k, v in options.items() if v is not None and k not in {"instructions"}} + # Start with a copy of options, excluding keys we handle separately + run_options: dict[str, Any] = { + k: v for k, v in options.items() if v is not None and k not in {"instructions", "response_format"} + } # Translation between options keys and Anthropic Messages API for old_key, new_key in OPTION_TRANSLATIONS.items(): @@ -438,6 +430,13 @@ def _prepare_options( if tools_config := self._prepare_tools_for_anthropic(options): run_options.update(tools_config) + # response_format - use native output_format for structured outputs + response_format = options.get("response_format") + if response_format is not None: + run_options["output_format"] = self._prepare_response_format(response_format) + # Add the structured outputs beta flag + run_options["betas"].add(STRUCTURED_OUTPUTS_BETA_FLAG) + run_options.update(kwargs) return run_options @@ -456,6 +455,41 @@ def _prepare_betas(self, options: dict[str, Any]) -> set[str]: *options.get("additional_beta_flags", []), } + def _prepare_response_format(self, response_format: type[BaseModel] | dict[str, Any]) -> dict[str, Any]: + """Prepare the output_format parameter for structured output. + + Args: + response_format: Either a Pydantic model class or a dict with the schema specification. + If a dict, it can be in OpenAI-style format with "json_schema" key, + or direct format with "schema" key, or the raw schema dict itself. + + Returns: + A dictionary representing the output_format for Anthropic's structured outputs. + """ + if isinstance(response_format, dict): + if "json_schema" in response_format: + schema = response_format["json_schema"].get("schema", {}) + elif "schema" in response_format: + schema = response_format["schema"] + else: + schema = response_format + + if isinstance(schema, dict): + schema["additionalProperties"] = False + + return { + "type": "json_schema", + "schema": schema, + } + + schema = response_format.model_json_schema() + schema["additionalProperties"] = False + + return { + "type": "json_schema", + "schema": schema, + } + def _prepare_messages_for_anthropic(self, messages: MutableSequence[ChatMessage]) -> list[dict[str, Any]]: """Prepare a list of ChatMessages for the Anthropic client. @@ -480,13 +514,15 @@ def _prepare_message_for_anthropic(self, message: ChatMessage) -> dict[str, Any] for content in message.contents: match content.type: case "text": - a_content.append({"type": "text", "text": content.text}) + # Skip empty text content blocks - Anthropic API rejects them + if content.text: + a_content.append({"type": "text", "text": content.text}) case "data": if content.has_top_level_media_type("image"): a_content.append({ "type": "image", "source": { - "data": content.get_data_bytes_as_str(), + "data": content.get_data_bytes_as_str(), # type: ignore[attr-defined] "media_type": content.media_type, "type": "base64", }, @@ -618,11 +654,12 @@ def _prepare_tools_for_anthropic(self, options: dict[str, Any]) -> dict[str, Any # region Response Processing Methods - def _process_message(self, message: BetaMessage) -> ChatResponse: + def _process_message(self, message: BetaMessage, options: dict[str, Any]) -> ChatResponse: """Process the response from the Anthropic client. Args: message: The message returned by the Anthropic client. + options: The options dict used for the request. Returns: A ChatResponse object containing the processed response. @@ -639,6 +676,7 @@ def _process_message(self, message: BetaMessage) -> ChatResponse: usage_details=self._parse_usage_from_anthropic(message.usage), model_id=message.model, finish_reason=FINISH_REASON_MAP.get(message.stop_reason) if message.stop_reason else None, + response_format=options.get("response_format"), raw_representation=message, ) @@ -653,9 +691,9 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons """ match event.type: case "message_start": - usage_details: list[UsageContent] = [] + usage_details: list[Content] = [] if event.message.usage and (details := self._parse_usage_from_anthropic(event.message.usage)): - usage_details.append(UsageContent(details=details)) + usage_details.append(Content.from_usage(usage_details=details)) return ChatResponseUpdate( response_id=event.message.id, @@ -672,7 +710,7 @@ def _process_stream_event(self, event: BetaRawMessageStreamEvent) -> ChatRespons case "message_delta": usage = self._parse_usage_from_anthropic(event.usage) return ChatResponseUpdate( - contents=[UsageContent(details=usage, raw_representation=event.usage)] if usage else [], + contents=[Content.from_usage(usage_details=usage, raw_representation=event.usage)] if usage else [], finish_reason=FINISH_REASON_MAP.get(event.delta.stop_reason) if event.delta.stop_reason else None, raw_representation=event, ) @@ -702,24 +740,24 @@ def _parse_usage_from_anthropic(self, usage: BetaUsage | BetaMessageDeltaUsage | return None usage_details = UsageDetails(output_token_count=usage.output_tokens) if usage.input_tokens is not None: - usage_details.input_token_count = usage.input_tokens + usage_details["input_token_count"] = usage.input_tokens if usage.cache_creation_input_tokens is not None: - usage_details.additional_counts["anthropic.cache_creation_input_tokens"] = usage.cache_creation_input_tokens + usage_details["anthropic.cache_creation_input_tokens"] = usage.cache_creation_input_tokens # type: ignore[typeddict-unknown-key] if usage.cache_read_input_tokens is not None: - usage_details.additional_counts["anthropic.cache_read_input_tokens"] = usage.cache_read_input_tokens + usage_details["anthropic.cache_read_input_tokens"] = usage.cache_read_input_tokens # type: ignore[typeddict-unknown-key] return usage_details def _parse_contents_from_anthropic( self, content: Sequence[BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock], - ) -> list[Contents]: + ) -> list[Content]: """Parse contents from the Anthropic message.""" - contents: list[Contents] = [] + contents: list[Content] = [] for content_block in content: match content_block.type: case "text" | "text_delta": contents.append( - TextContent( + Content.from_text( text=content_block.text, raw_representation=content_block, annotations=self._parse_citations_from_anthropic(content_block), @@ -729,7 +767,7 @@ def _parse_contents_from_anthropic( self._last_call_id_name = (content_block.id, content_block.name) if content_block.type == "mcp_tool_use": contents.append( - MCPServerToolCallContent( + Content.from_mcp_server_tool_call( call_id=content_block.id, tool_name=content_block.name, server_name=None, @@ -739,10 +777,10 @@ def _parse_contents_from_anthropic( ) elif "code_execution" in (content_block.name or ""): contents.append( - CodeInterpreterToolCallContent( + Content.from_code_interpreter_tool_call( call_id=content_block.id, inputs=[ - TextContent( + Content.from_text( text=str(content_block.input), raw_representation=content_block, ) @@ -752,7 +790,7 @@ def _parse_contents_from_anthropic( ) else: contents.append( - FunctionCallContent( + Content.from_function_call( call_id=content_block.id, name=content_block.name, arguments=content_block.input, @@ -760,14 +798,14 @@ def _parse_contents_from_anthropic( ) ) case "mcp_tool_result": - call_id, name = self._last_call_id_name or (None, None) - parsed_output: list[Contents] | None = None + call_id, _ = self._last_call_id_name or (None, None) + parsed_output: list[Content] | None = None if content_block.content: if isinstance(content_block.content, list): parsed_output = self._parse_contents_from_anthropic(content_block.content) elif isinstance(content_block.content, (str, bytes)): parsed_output = [ - TextContent( + Content.from_text( text=str(content_block.content), raw_representation=content_block, ) @@ -775,28 +813,27 @@ def _parse_contents_from_anthropic( else: parsed_output = self._parse_contents_from_anthropic([content_block.content]) contents.append( - MCPServerToolResultContent( + Content.from_mcp_server_tool_result( call_id=content_block.tool_use_id, output=parsed_output, raw_representation=content_block, ) ) case "web_search_tool_result" | "web_fetch_tool_result": - call_id, name = self._last_call_id_name or (None, None) + call_id, _ = self._last_call_id_name or (None, None) contents.append( - FunctionResultContent( + Content.from_function_result( call_id=content_block.tool_use_id, - name=name if name and call_id == content_block.tool_use_id else "web_tool", result=content_block.content, raw_representation=content_block, ) ) case "code_execution_tool_result": - code_outputs: list[Contents] = [] + code_outputs: list[Content] = [] if content_block.content: if isinstance(content_block.content, BetaCodeExecutionToolResultError): code_outputs.append( - ErrorContent( + Content.from_error( message=content_block.content.error_code, raw_representation=content_block.content, ) @@ -804,41 +841,41 @@ def _parse_contents_from_anthropic( else: if content_block.content.stdout: code_outputs.append( - TextContent( + Content.from_text( text=content_block.content.stdout, raw_representation=content_block.content, ) ) if content_block.content.stderr: code_outputs.append( - ErrorContent( + Content.from_error( message=content_block.content.stderr, raw_representation=content_block.content, ) ) for code_file_content in content_block.content.content: code_outputs.append( - HostedFileContent( + Content.from_hosted_file( file_id=code_file_content.file_id, raw_representation=code_file_content, ) ) contents.append( - CodeInterpreterToolResultContent( + Content.from_code_interpreter_tool_result( call_id=content_block.tool_use_id, raw_representation=content_block, outputs=code_outputs, ) ) case "bash_code_execution_tool_result": - bash_outputs: list[Contents] = [] + bash_outputs: list[Content] = [] if content_block.content: if isinstance( content_block.content, BetaBashCodeExecutionToolResultError, ): bash_outputs.append( - ErrorContent( + Content.from_error( message=content_block.content.error_code, raw_representation=content_block.content, ) @@ -846,39 +883,38 @@ def _parse_contents_from_anthropic( else: if content_block.content.stdout: bash_outputs.append( - TextContent( + Content.from_text( text=content_block.content.stdout, raw_representation=content_block.content, ) ) if content_block.content.stderr: bash_outputs.append( - ErrorContent( + Content.from_error( message=content_block.content.stderr, raw_representation=content_block.content, ) ) for bash_file_content in content_block.content.content: contents.append( - HostedFileContent( + Content.from_hosted_file( file_id=bash_file_content.file_id, raw_representation=bash_file_content, ) ) contents.append( - FunctionResultContent( + Content.from_function_result( call_id=content_block.tool_use_id, - name=content_block.type, result=bash_outputs, raw_representation=content_block, ) ) case "text_editor_code_execution_tool_result": - text_editor_outputs: list[Contents] = [] + text_editor_outputs: list[Content] = [] match content_block.content.type: case "text_editor_code_execution_tool_result_error": text_editor_outputs.append( - ErrorContent( + Content.from_error( message=content_block.content.error_code and getattr(content_block.content, "error_message", ""), raw_representation=content_block.content, @@ -887,10 +923,12 @@ def _parse_contents_from_anthropic( case "text_editor_code_execution_view_result": annotations = ( [ - CitationAnnotation( + Annotation( + type="citation", raw_representation=content_block.content, annotated_regions=[ TextSpanRegion( + type="text_span", start_index=content_block.content.start_line, end_index=content_block.content.start_line + (content_block.content.num_lines or 0), @@ -903,7 +941,7 @@ def _parse_contents_from_anthropic( else None ) text_editor_outputs.append( - TextContent( + Content.from_text( text=content_block.content.content, annotations=annotations, raw_representation=content_block.content, @@ -911,10 +949,12 @@ def _parse_contents_from_anthropic( ) case "text_editor_code_execution_str_replace_result": old_annotation = ( - CitationAnnotation( + Annotation( + type="citation", raw_representation=content_block.content, annotated_regions=[ TextSpanRegion( + type="text_span", start_index=content_block.content.old_start or 0, end_index=( (content_block.content.old_start or 0) @@ -928,13 +968,15 @@ def _parse_contents_from_anthropic( else None ) new_annotation = ( - CitationAnnotation( + Annotation( + type="citation", raw_representation=content_block.content, - snippet="\n".join(content_block.content.lines) + snippet="\n".join(content_block.content.lines) # type: ignore[typeddict-item] if content_block.content.lines else None, annotated_regions=[ TextSpanRegion( + type="text_span", start_index=content_block.content.new_start or 0, end_index=( (content_block.content.new_start or 0) @@ -950,7 +992,7 @@ def _parse_contents_from_anthropic( annotations = [ann for ann in [old_annotation, new_annotation] if ann is not None] text_editor_outputs.append( - TextContent( + Content.from_text( text=( "\n".join(content_block.content.lines) if content_block.content.lines else "" ), @@ -960,15 +1002,14 @@ def _parse_contents_from_anthropic( ) case "text_editor_code_execution_create_result": text_editor_outputs.append( - TextContent( + Content.from_text( text=f"File update: {content_block.content.is_file_update}", raw_representation=content_block.content, ) ) contents.append( - FunctionResultContent( + Content.from_function_result( call_id=content_block.tool_use_id, - name=content_block.type, result=text_editor_outputs, raw_representation=content_block, ) @@ -979,9 +1020,9 @@ def _parse_contents_from_anthropic( # since it triggers on `if content.name:`. The initial tool_use event already # provides the name, so deltas should only carry incremental arguments. # This matches OpenAI's behavior where streaming chunks have name="". - call_id, _ = self._last_call_id_name if self._last_call_id_name else ("", "") + call_id, _name = self._last_call_id_name if self._last_call_id_name else ("", "") contents.append( - FunctionCallContent( + Content.from_function_call( call_id=call_id, name="", arguments=content_block.partial_json, @@ -990,7 +1031,7 @@ def _parse_contents_from_anthropic( ) case "thinking" | "thinking_delta": contents.append( - TextReasoningContent( + Content.from_text_reasoning( text=content_block.thinking, raw_representation=content_block, ) @@ -1001,65 +1042,65 @@ def _parse_contents_from_anthropic( def _parse_citations_from_anthropic( self, content_block: BetaContentBlock | BetaRawContentBlockDelta | BetaTextBlock - ) -> list[Annotations] | None: - content_citations = getattr(content_block, "citations", None) - if not content_citations: + ) -> list[Annotation] | None: + content_blocks = getattr(content_block, "citations", None) + if not content_blocks: return None - annotations: list[Annotations] = [] - for citation in content_citations: - cit = CitationAnnotation(raw_representation=citation) + annotations: list[Annotation] = [] + for citation in content_blocks: + cit = Annotation(type="citation", raw_representation=citation) match citation.type: case "char_location": - cit.title = citation.title - cit.snippet = citation.cited_text + cit["title"] = citation.title + cit["snippet"] = citation.cited_text if citation.file_id: - cit.file_id = citation.file_id - if not cit.annotated_regions: - cit.annotated_regions = [] - cit.annotated_regions.append( + cit["file_id"] = citation.file_id + cit.setdefault("annotated_regions", []) + cit["annotated_regions"].append( # type: ignore[attr-defined] TextSpanRegion( + type="text_span", start_index=citation.start_char_index, end_index=citation.end_char_index, ) ) case "page_location": - cit.title = citation.document_title - cit.snippet = citation.cited_text + cit["title"] = citation.document_title + cit["snippet"] = citation.cited_text if citation.file_id: - cit.file_id = citation.file_id - if not cit.annotated_regions: - cit.annotated_regions = [] - cit.annotated_regions.append( + cit["file_id"] = citation.file_id + cit.setdefault("annotated_regions", []) + cit["annotated_regions"].append( # type: ignore[attr-defined] TextSpanRegion( + type="text_span", start_index=citation.start_page_number, end_index=citation.end_page_number, ) ) case "content_block_location": - cit.title = citation.document_title - cit.snippet = citation.cited_text + cit["title"] = citation.document_title + cit["snippet"] = citation.cited_text if citation.file_id: - cit.file_id = citation.file_id - if not cit.annotated_regions: - cit.annotated_regions = [] - cit.annotated_regions.append( + cit["file_id"] = citation.file_id + cit.setdefault("annotated_regions", []) + cit["annotated_regions"].append( # type: ignore[attr-defined] TextSpanRegion( + type="text_span", start_index=citation.start_block_index, end_index=citation.end_block_index, ) ) case "web_search_result_location": - cit.title = citation.title - cit.snippet = citation.cited_text - cit.url = citation.url + cit["title"] = citation.title + cit["snippet"] = citation.cited_text + cit["url"] = citation.url case "search_result_location": - cit.title = citation.title - cit.snippet = citation.cited_text - cit.url = citation.source - if not cit.annotated_regions: - cit.annotated_regions = [] - cit.annotated_regions.append( + cit["title"] = citation.title + cit["snippet"] = citation.cited_text + cit["url"] = citation.source + cit.setdefault("annotated_regions", []) + cit["annotated_regions"].append( # type: ignore[attr-defined] TextSpanRegion( + type="text_span", start_index=citation.start_block_index, end_index=citation.end_block_index, ) diff --git a/python/packages/anthropic/pyproject.toml b/python/packages/anthropic/pyproject.toml index 53eb699cd4..c97a3e2c26 100644 --- a/python/packages/anthropic/pyproject.toml +++ b/python/packages/anthropic/pyproject.toml @@ -4,7 +4,7 @@ description = "Anthropic integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/anthropic/tests/test_anthropic_client.py b/python/packages/anthropic/tests/test_anthropic_client.py index 828d9916c2..826dcf1870 100644 --- a/python/packages/anthropic/tests/test_anthropic_client.py +++ b/python/packages/anthropic/tests/test_anthropic_client.py @@ -10,16 +10,12 @@ ChatMessage, ChatOptions, ChatResponseUpdate, - DataContent, + Content, FinishReason, - FunctionCallContent, - FunctionResultContent, HostedCodeInterpreterTool, HostedMCPTool, HostedWebSearchTool, Role, - TextContent, - TextReasoningContent, ai_function, ) from agent_framework.exceptions import ServiceInitializationError @@ -170,7 +166,7 @@ def test_prepare_message_for_anthropic_function_call(mock_anthropic_client: Magi message = ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_123", name="get_weather", arguments={"location": "San Francisco"}, @@ -194,9 +190,8 @@ def test_prepare_message_for_anthropic_function_result(mock_anthropic_client: Ma message = ChatMessage( role=Role.TOOL, contents=[ - FunctionResultContent( + Content.from_function_result( call_id="call_123", - name="get_weather", result="Sunny, 72°F", ) ], @@ -219,7 +214,7 @@ def test_prepare_message_for_anthropic_text_reasoning(mock_anthropic_client: Mag chat_client = create_test_anthropic_client(mock_anthropic_client) message = ChatMessage( role=Role.ASSISTANT, - contents=[TextReasoningContent(text="Let me think about this...")], + contents=[Content.from_text_reasoning(text="Let me think about this...")], ) result = chat_client._prepare_message_for_anthropic(message) @@ -500,19 +495,19 @@ def test_process_message_basic(mock_anthropic_client: MagicMock) -> None: mock_message.usage = BetaUsage(input_tokens=10, output_tokens=5) mock_message.stop_reason = "end_turn" - response = chat_client._process_message(mock_message) + response = chat_client._process_message(mock_message, {}) assert response.response_id == "msg_123" assert response.model_id == "claude-3-5-sonnet-20241022" assert len(response.messages) == 1 assert response.messages[0].role == Role.ASSISTANT assert len(response.messages[0].contents) == 1 - assert isinstance(response.messages[0].contents[0], TextContent) + assert response.messages[0].contents[0].type == "text" assert response.messages[0].contents[0].text == "Hello there!" assert response.finish_reason == FinishReason.STOP assert response.usage_details is not None - assert response.usage_details.input_token_count == 10 - assert response.usage_details.output_token_count == 5 + assert response.usage_details["input_token_count"] == 10 + assert response.usage_details["output_token_count"] == 5 def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None: @@ -533,10 +528,10 @@ def test_process_message_with_tool_use(mock_anthropic_client: MagicMock) -> None mock_message.usage = BetaUsage(input_tokens=10, output_tokens=5) mock_message.stop_reason = "tool_use" - response = chat_client._process_message(mock_message) + response = chat_client._process_message(mock_message, {}) assert len(response.messages[0].contents) == 1 - assert isinstance(response.messages[0].contents[0], FunctionCallContent) + assert response.messages[0].contents[0].type == "function_call" assert response.messages[0].contents[0].call_id == "call_123" assert response.messages[0].contents[0].name == "get_weather" assert response.finish_reason == FinishReason.TOOL_CALLS @@ -550,8 +545,8 @@ def test_parse_usage_from_anthropic_basic(mock_anthropic_client: MagicMock) -> N result = chat_client._parse_usage_from_anthropic(usage) assert result is not None - assert result.input_token_count == 10 - assert result.output_token_count == 5 + assert result["input_token_count"] == 10 + assert result["output_token_count"] == 5 def test_parse_usage_from_anthropic_none(mock_anthropic_client: MagicMock) -> None: @@ -571,7 +566,7 @@ def test_parse_contents_from_anthropic_text(mock_anthropic_client: MagicMock) -> result = chat_client._parse_contents_from_anthropic(content) assert len(result) == 1 - assert isinstance(result[0], TextContent) + assert result[0].type == "text" assert result[0].text == "Hello!" @@ -590,7 +585,7 @@ def test_parse_contents_from_anthropic_tool_use(mock_anthropic_client: MagicMock result = chat_client._parse_contents_from_anthropic(content) assert len(result) == 1 - assert isinstance(result[0], FunctionCallContent) + assert result[0].type == "function_call" assert result[0].call_id == "call_123" assert result[0].name == "get_weather" @@ -613,7 +608,7 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a result = chat_client._parse_contents_from_anthropic([tool_use_content]) assert len(result) == 1 - assert isinstance(result[0], FunctionCallContent) + assert result[0].type == "function_call" assert result[0].call_id == "call_123" assert result[0].name == "get_weather" # Initial event has name @@ -624,7 +619,7 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a result = chat_client._parse_contents_from_anthropic([delta_content_1]) assert len(result) == 1 - assert isinstance(result[0], FunctionCallContent) + assert result[0].type == "function_call" assert result[0].call_id == "call_123" assert result[0].name == "" # Delta events should have empty name assert result[0].arguments == '{"location":' @@ -636,7 +631,7 @@ def test_parse_contents_from_anthropic_input_json_delta_no_duplicate_name(mock_a result = chat_client._parse_contents_from_anthropic([delta_content_2]) assert len(result) == 1 - assert isinstance(result[0], FunctionCallContent) + assert result[0].type == "function_call" assert result[0].call_id == "call_123" assert result[0].name == "" # Still empty name for subsequent deltas assert result[0].arguments == '"San Francisco"}' @@ -771,9 +766,7 @@ async def test_anthropic_client_integration_function_calling() -> None: assert response is not None # Should contain function call - has_function_call = any( - isinstance(content, FunctionCallContent) for msg in response.messages for content in msg.contents - ) + has_function_call = any(content.type == "function_call" for msg in response.messages for content in msg.contents) assert has_function_call @@ -872,8 +865,8 @@ async def test_anthropic_client_integration_images() -> None: ChatMessage( role=Role.USER, contents=[ - TextContent(text="Describe this image"), - DataContent(media_type="image/jpeg", data=image_bytes), + Content.from_text(text="Describe this image"), + Content.from_data(media_type="image/jpeg", data=image_bytes), ], ), ] diff --git a/python/packages/azure-ai-search/pyproject.toml b/python/packages/azure-ai-search/pyproject.toml index e43f010023..611244cde4 100644 --- a/python/packages/azure-ai-search/pyproject.toml +++ b/python/packages/azure-ai-search/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Search integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azure-ai/agent_framework_azure_ai/__init__.py b/python/packages/azure-ai/agent_framework_azure_ai/__init__.py index c0cd4d249c..e90f3e6337 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/__init__.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/__init__.py @@ -4,7 +4,7 @@ from ._agent_provider import AzureAIAgentsProvider from ._chat_client import AzureAIAgentClient, AzureAIAgentOptions -from ._client import AzureAIClient +from ._client import AzureAIClient, AzureAIProjectAgentOptions from ._project_provider import AzureAIProjectAgentProvider from ._shared import AzureAISettings @@ -18,6 +18,7 @@ "AzureAIAgentOptions", "AzureAIAgentsProvider", "AzureAIClient", + "AzureAIProjectAgentOptions", "AzureAIProjectAgentProvider", "AzureAISettings", "__version__", diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py index 931f57500e..4bb646da19 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_chat_client.py @@ -2,33 +2,33 @@ import ast import json +import os import re import sys -from collections.abc import AsyncIterable, Mapping, MutableMapping, MutableSequence, Sequence +from collections.abc import AsyncIterable, Callable, Mapping, MutableMapping, MutableSequence, Sequence from typing import Any, ClassVar, Generic, TypedDict from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, + AIFunction, + Annotation, BaseChatClient, + ChatAgent, ChatMessage, + ChatMessageStoreProtocol, ChatOptions, ChatResponse, ChatResponseUpdate, - CitationAnnotation, - Contents, - DataContent, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, - HostedFileContent, + Content, + ContextProvider, + HostedCodeInterpreterTool, + HostedFileSearchTool, HostedMCPTool, + HostedWebSearchTool, + Middleware, Role, - TextContent, TextSpanRegion, ToolProtocol, - UriContent, - UsageContent, UsageDetails, get_logger, prepare_function_call_results, @@ -46,9 +46,14 @@ AgentStreamEvent, AsyncAgentEventHandler, AsyncAgentRunStream, + BingCustomSearchTool, + BingGroundingTool, + CodeInterpreterToolDefinition, + FileSearchTool, FunctionName, FunctionToolDefinition, ListSortOrder, + McpTool, MessageDeltaChunk, MessageDeltaTextContent, MessageDeltaTextFileCitationAnnotation, @@ -418,7 +423,7 @@ async def _create_agent_stream( self, agent_id: str, run_options: dict[str, Any], - required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None, + required_action_results: list[Content] | None, ) -> tuple[AsyncAgentRunStream[AsyncAgentEventHandler[Any]] | AsyncAgentEventHandler[Any], str]: """Create the agent stream for processing. @@ -502,9 +507,9 @@ async def _prepare_thread( def _extract_url_citations( self, message_delta_chunk: MessageDeltaChunk, azure_search_tool_calls: list[dict[str, Any]] - ) -> list[CitationAnnotation]: + ) -> list[Annotation]: """Extract URL citations from MessageDeltaChunk.""" - url_citations: list[CitationAnnotation] = [] + url_citations: list[Annotation] = [] # Process each content item in the delta to find citations for content in message_delta_chunk.delta.content: @@ -516,6 +521,7 @@ def _extract_url_citations( if annotation.start_index and annotation.end_index: annotated_regions = [ TextSpanRegion( + type="text_span", start_index=annotation.start_index, end_index=annotation.end_index, ) @@ -526,11 +532,12 @@ def _extract_url_citations( annotation.url_citation.url, azure_search_tool_calls ) - # Create CitationAnnotation with real URL - citation = CitationAnnotation( - title=getattr(annotation.url_citation, "title", None), + # Create Annotation with real URL + citation = Annotation( + type="citation", + title=annotation.url_citation.title, # type: ignore[typeddict-item] url=real_url, - snippet=None, + snippet=None, # type: ignore[typeddict-item] annotated_regions=annotated_regions, raw_representation=annotation, ) @@ -538,7 +545,7 @@ def _extract_url_citations( return url_citations - def _extract_file_path_contents(self, message_delta_chunk: MessageDeltaChunk) -> list[HostedFileContent]: + def _extract_file_path_contents(self, message_delta_chunk: MessageDeltaChunk) -> list[Content]: """Extract file references from MessageDeltaChunk annotations. Code interpreter generates files that are referenced via file path or file citation @@ -555,7 +562,7 @@ def _extract_file_path_contents(self, message_delta_chunk: MessageDeltaChunk) -> Returns: List of HostedFileContent objects for any files referenced in annotations """ - file_contents: list[HostedFileContent] = [] + file_contents: list[Content] = [] for content in message_delta_chunk.delta.content: if isinstance(content, MessageDeltaTextContent) and content.text and content.text.annotations: @@ -566,14 +573,14 @@ def _extract_file_path_contents(self, message_delta_chunk: MessageDeltaChunk) -> if file_path is not None: file_id = getattr(file_path, "file_id", None) if file_id: - file_contents.append(HostedFileContent(file_id=file_id)) + file_contents.append(Content.from_hosted_file(file_id=file_id)) elif isinstance(annotation, MessageDeltaTextFileCitationAnnotation): # Extract file_id from the file_citation annotation file_citation = getattr(annotation, "file_citation", None) if file_citation is not None: file_id = getattr(file_citation, "file_id", None) if file_id: - file_contents.append(HostedFileContent(file_id=file_id)) + file_contents.append(Content.from_hosted_file(file_id=file_id)) return file_contents @@ -640,9 +647,9 @@ async def _process_stream( file_contents = self._extract_file_path_contents(event_data) # Create contents with citations if any exist - citation_content: list[Contents] = [] + citation_content: list[Content] = [] if event_data.text or url_citations: - text_content_obj = TextContent(text=event_data.text or "") + text_content_obj = Content.from_text(text=event_data.text or "") if url_citations: text_content_obj.annotations = url_citations citation_content.append(text_content_obj) @@ -718,7 +725,7 @@ async def _process_stream( self._capture_azure_search_tool_calls(event_data, azure_search_tool_calls) if event_data.usage: - usage_content = UsageContent( + usage_content = Content.from_usage( UsageDetails( input_token_count=event_data.usage.prompt_tokens, output_token_count=event_data.usage.completion_tokens, @@ -753,19 +760,21 @@ async def _process_stream( tool_call.code_interpreter, RunStepDeltaCodeInterpreterDetailItemObject, ): - code_contents: list[Contents] = [] + code_contents: list[Content] = [] if tool_call.code_interpreter.input is not None: logger.debug(f"Code Interpreter Input: {tool_call.code_interpreter.input}") if tool_call.code_interpreter.outputs is not None: for output in tool_call.code_interpreter.outputs: if isinstance(output, RunStepDeltaCodeInterpreterLogOutput) and output.logs: - code_contents.append(TextContent(text=output.logs)) + code_contents.append(Content.from_text(text=output.logs)) if ( isinstance(output, RunStepDeltaCodeInterpreterImageOutput) and output.image is not None and output.image.file_id is not None ): - code_contents.append(HostedFileContent(file_id=output.image.file_id)) + code_contents.append( + Content.from_hosted_file(file_id=output.image.file_id) + ) yield ChatResponseUpdate( role=Role.ASSISTANT, contents=code_contents, @@ -818,12 +827,12 @@ def _capture_azure_search_tool_calls( except Exception as ex: logger.debug(f"Failed to capture Azure AI Search tool call: {ex}") - def _parse_function_calls_from_azure_ai(self, event_data: ThreadRun, response_id: str | None) -> list[Contents]: + def _parse_function_calls_from_azure_ai(self, event_data: ThreadRun, response_id: str | None) -> list[Content]: """Parse function call contents from an Azure AI tool action event.""" if isinstance(event_data, ThreadRun) and event_data.required_action is not None: if isinstance(event_data.required_action, SubmitToolOutputsAction): return [ - FunctionCallContent( + Content.from_function_call( call_id=f'["{response_id}", "{tool.id}"]', name=tool.function.name, arguments=tool.function.arguments, @@ -833,9 +842,9 @@ def _parse_function_calls_from_azure_ai(self, event_data: ThreadRun, response_id ] if isinstance(event_data.required_action, SubmitToolApprovalAction): return [ - FunctionApprovalRequestContent( + Content.from_function_approval_request( id=f'["{response_id}", "{tool.id}"]', - function_call=FunctionCallContent( + function_call=Content.from_function_call( call_id=f'["{response_id}", "{tool.id}"]', name=tool.name, arguments=tool.arguments, @@ -871,7 +880,7 @@ async def _prepare_options( messages: MutableSequence[ChatMessage], options: Mapping[str, Any], **kwargs: Any, - ) -> tuple[dict[str, Any], list[FunctionResultContent | FunctionApprovalResponseContent] | None]: + ) -> tuple[dict[str, Any], list[Content] | None]: agent_definition = await self._load_agent_definition_if_needed() # Build run_options from options dict, excluding specific keys @@ -1048,7 +1057,7 @@ def _prepare_messages( ) -> tuple[ list[ThreadMessageOptions] | None, list[str], - list[FunctionResultContent | FunctionApprovalResponseContent] | None, + list[Content] | None, ]: """Prepare messages for Azure AI Agents API. @@ -1060,28 +1069,34 @@ def _prepare_messages( Tuple of (additional_messages, instructions, required_action_results) """ instructions: list[str] = [] - required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None = None + required_action_results: list[Content] | None = None additional_messages: list[ThreadMessageOptions] | None = None for chat_message in messages: if chat_message.role.value in ["system", "developer"]: - for text_content in [content for content in chat_message.contents if isinstance(content, TextContent)]: - instructions.append(text_content.text) + for text_content in [content for content in chat_message.contents if content.type == "text"]: + instructions.append(text_content.text) # type: ignore[arg-type] continue message_contents: list[MessageInputContentBlock] = [] for content in chat_message.contents: - if isinstance(content, TextContent): - message_contents.append(MessageInputTextBlock(text=content.text)) - elif isinstance(content, (DataContent, UriContent)) and content.has_top_level_media_type("image"): - message_contents.append(MessageInputImageUrlBlock(image_url=MessageImageUrlParam(url=content.uri))) - elif isinstance(content, (FunctionResultContent, FunctionApprovalResponseContent)): - if required_action_results is None: - required_action_results = [] - required_action_results.append(content) - elif isinstance(content.raw_representation, MessageInputContentBlock): - message_contents.append(content.raw_representation) + match content.type: + case "text": + message_contents.append(MessageInputTextBlock(text=content.text)) # type: ignore[arg-type] + case "data" | "uri": + if content.has_top_level_media_type("image"): + message_contents.append( + MessageInputImageUrlBlock(image_url=MessageImageUrlParam(url=content.uri)) # type: ignore[arg-type] + ) + # Only images are supported. Other media types are ignored. + case "function_result" | "function_approval_response": + if required_action_results is None: + required_action_results = [] + required_action_results.append(content) + case _: + if isinstance(content.raw_representation, MessageInputContentBlock): + message_contents.append(content.raw_representation) if message_contents: if additional_messages is None: @@ -1095,9 +1110,85 @@ def _prepare_messages( return additional_messages, instructions, required_action_results + async def _prepare_tools_for_azure_ai( + self, tools: Sequence["ToolProtocol | MutableMapping[str, Any]"], run_options: dict[str, Any] | None = None + ) -> list[ToolDefinition | dict[str, Any]]: + """Prepare tool definitions for the Azure AI Agents API.""" + tool_definitions: list[ToolDefinition | dict[str, Any]] = [] + for tool in tools: + match tool: + case AIFunction(): + tool_definitions.append(tool.to_json_schema_spec()) # type: ignore[reportUnknownArgumentType] + case HostedWebSearchTool(): + additional_props = tool.additional_properties or {} + config_args: dict[str, Any] = {} + if count := additional_props.get("count"): + config_args["count"] = count + if freshness := additional_props.get("freshness"): + config_args["freshness"] = freshness + if market := additional_props.get("market"): + config_args["market"] = market + if set_lang := additional_props.get("set_lang"): + config_args["set_lang"] = set_lang + # Bing Grounding + connection_id = additional_props.get("connection_id") or os.getenv("BING_CONNECTION_ID") + # Custom Bing Search + custom_connection_id = additional_props.get("custom_connection_id") or os.getenv( + "BING_CUSTOM_CONNECTION_ID" + ) + custom_instance_name = additional_props.get("custom_instance_name") or os.getenv( + "BING_CUSTOM_INSTANCE_NAME" + ) + bing_search: BingGroundingTool | BingCustomSearchTool | None = None + if (connection_id) and not custom_connection_id and not custom_instance_name: + if connection_id: + conn_id = connection_id + else: + raise ServiceInitializationError("Parameter connection_id is not provided.") + bing_search = BingGroundingTool(connection_id=conn_id, **config_args) + if custom_connection_id and custom_instance_name: + bing_search = BingCustomSearchTool( + connection_id=custom_connection_id, + instance_name=custom_instance_name, + **config_args, + ) + if not bing_search: + raise ServiceInitializationError( + "Bing search tool requires either 'connection_id' for Bing Grounding " + "or both 'custom_connection_id' and 'custom_instance_name' for Custom Bing Search. " + "These can be provided via additional_properties or environment variables: " + "'BING_CONNECTION_ID', 'BING_CUSTOM_CONNECTION_ID', " + "'BING_CUSTOM_INSTANCE_NAME'" + ) + tool_definitions.extend(bing_search.definitions) + case HostedCodeInterpreterTool(): + tool_definitions.append(CodeInterpreterToolDefinition()) + case HostedMCPTool(): + mcp_tool = McpTool( + server_label=tool.name.replace(" ", "_"), + server_url=str(tool.url), + allowed_tools=list(tool.allowed_tools) if tool.allowed_tools else [], + ) + tool_definitions.extend(mcp_tool.definitions) + case HostedFileSearchTool(): + vector_stores = [inp for inp in tool.inputs or [] if inp.type == "hosted_vector_store"] + if vector_stores: + file_search = FileSearchTool(vector_store_ids=[vs.vector_store_id for vs in vector_stores]) # type: ignore[misc] + tool_definitions.extend(file_search.definitions) + # Set tool_resources for file search to work properly with Azure AI + if run_options is not None and "tool_resources" not in run_options: + run_options["tool_resources"] = file_search.resources + case ToolDefinition(): + tool_definitions.append(tool) + case dict(): + tool_definitions.append(tool) + case _: + raise ServiceInitializationError(f"Unsupported tool type: {type(tool)}") + return tool_definitions + def _prepare_tool_outputs_for_azure_ai( self, - required_action_results: list[FunctionResultContent | FunctionApprovalResponseContent] | None, + required_action_results: list[Content] | None, ) -> tuple[str | None, list[ToolOutput] | None, list[ToolApproval] | None]: """Prepare function results and approvals for submission to the Azure AI API.""" run_id: str | None = None @@ -1111,9 +1202,7 @@ def _prepare_tool_outputs_for_azure_ai( # We need to extract the run ID and ensure that the Output/Approval we send back to Azure # is only the call ID. run_and_call_ids: list[str] = ( - json.loads(content.call_id) - if isinstance(content, FunctionResultContent) - else json.loads(content.id) + json.loads(content.call_id) if content.type == "function_result" else json.loads(content.id) # type: ignore[arg-type] ) if ( @@ -1128,16 +1217,16 @@ def _prepare_tool_outputs_for_azure_ai( run_id = run_and_call_ids[0] call_id = run_and_call_ids[1] - if isinstance(content, FunctionResultContent): + if content.type == "function_result": if tool_outputs is None: tool_outputs = [] tool_outputs.append( ToolOutput(tool_call_id=call_id, output=prepare_function_call_results(content.result)) ) - elif isinstance(content, FunctionApprovalResponseContent): + elif content.type == "function_approval_response": if tool_approvals is None: tool_approvals = [] - tool_approvals.append(ToolApproval(tool_call_id=call_id, approve=content.approved)) + tool_approvals.append(ToolApproval(tool_call_id=call_id, approve=content.approved)) # type: ignore[arg-type] return run_id, tool_outputs, tool_approvals @@ -1162,3 +1251,59 @@ def service_url(self) -> str: The service URL for the chat client, or None if not set. """ return self.agents_client._config.endpoint # type: ignore + + @override + def as_agent( + self, + *, + id: str | None = None, + name: str | None = None, + description: str | None = None, + instructions: str | None = None, + tools: ToolProtocol + | Callable[..., Any] + | MutableMapping[str, Any] + | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] + | None = None, + default_options: TAzureAIAgentOptions | None = None, + chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, + context_provider: ContextProvider | None = None, + middleware: Sequence[Middleware] | None = None, + **kwargs: Any, + ) -> ChatAgent[TAzureAIAgentOptions]: + """Convert this chat client to a ChatAgent. + + This method creates a ChatAgent instance with this client pre-configured. + It does NOT create an agent on the Azure AI service - the actual agent + will be created on the server during the first invocation (run). + + For creating and managing persistent agents on the server, use + :class:`~agent_framework_azure_ai.AzureAIAgentsProvider` instead. + + Keyword Args: + id: The unique identifier for the agent. Will be created automatically if not provided. + name: The name of the agent. + description: A brief description of the agent's purpose. + instructions: Optional instructions for the agent. + tools: The tools to use for the request. + default_options: A TypedDict containing chat options. + chat_message_store_factory: Factory function to create an instance of ChatMessageStoreProtocol. + context_provider: Context providers to include during agent invocation. + middleware: List of middleware to intercept agent and function invocations. + kwargs: Any additional keyword arguments. + + Returns: + A ChatAgent instance configured with this chat client. + """ + return super().as_agent( + id=id, + name=name, + description=description, + instructions=instructions, + tools=tools, + default_options=default_options, + chat_message_store_factory=chat_message_store_factory, + context_provider=context_provider, + middleware=middleware, + **kwargs, + ) diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_client.py b/python/packages/azure-ai/agent_framework_azure_ai/_client.py index c735cce049..08623c3aa4 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_client.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_client.py @@ -1,35 +1,33 @@ # Copyright (c) Microsoft. All rights reserved. import sys -from collections.abc import Mapping, MutableSequence -from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypedDict, TypeVar, cast +from collections.abc import Callable, Mapping, MutableMapping, MutableSequence, Sequence +from typing import Any, ClassVar, Generic, TypedDict, TypeVar, cast from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, + ChatAgent, ChatMessage, + ChatMessageStoreProtocol, + ContextProvider, HostedMCPTool, - TextContent, + Middleware, + ToolProtocol, get_logger, use_chat_middleware, use_function_invocation, ) from agent_framework.exceptions import ServiceInitializationError from agent_framework.observability import use_instrumentation +from agent_framework.openai import OpenAIResponsesOptions from agent_framework.openai._responses_client import OpenAIBaseResponsesClient from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import ( - MCPTool, - PromptAgentDefinition, - PromptAgentDefinitionText, -) +from azure.ai.projects.models import MCPTool, PromptAgentDefinition, PromptAgentDefinitionText, RaiConfig, Reasoning from azure.core.credentials_async import AsyncTokenCredential from azure.core.exceptions import ResourceNotFoundError from pydantic import ValidationError -from ._shared import AzureAISettings, create_text_format_config - -if TYPE_CHECKING: - from agent_framework.openai import OpenAIResponsesOptions +from ._shared import AzureAISettings, _extract_project_connection_id, create_text_format_config if sys.version_info >= (3, 13): from typing import TypeVar # type: ignore # pragma: no cover @@ -47,10 +45,21 @@ logger = get_logger("agent_framework.azure") + +class AzureAIProjectAgentOptions(OpenAIResponsesOptions, total=False): + """Azure AI Project Agent options.""" + + rai_config: RaiConfig + """Configuration for Responsible AI (RAI) content filtering and safety features.""" + + reasoning: Reasoning # type: ignore[misc] + """Configuration for enabling reasoning capabilities (requires azure.ai.projects.models.Reasoning).""" + + TAzureAIClientOptions = TypeVar( "TAzureAIClientOptions", bound=TypedDict, # type: ignore[valid-type] - default="OpenAIResponsesOptions", + default="AzureAIProjectAgentOptions", covariant=True, ) @@ -332,6 +341,10 @@ async def _get_agent_reference_or_create( args["temperature"] = run_options["temperature"] if "top_p" in run_options: args["top_p"] = run_options["top_p"] + if "reasoning" in run_options: + args["reasoning"] = run_options["reasoning"] + if "rai_config" in run_options: + args["rai_config"] = run_options["rai_config"] # response_format is accessed from chat_options or additional_properties # since the base class excludes it from run_options @@ -392,10 +405,12 @@ async def _prepare_options( "model", "tools", "response_format", + "rai_config", "temperature", "top_p", "text", "text_format", + "reasoning", ] for property in exclude: @@ -464,8 +479,8 @@ def _prepare_messages_for_azure_ai( # System/developer messages are turned into instructions, since there is no such message roles in Azure AI. for message in messages: if message.role.value in ["system", "developer"]: - for text_content in [content for content in message.contents if isinstance(content, TextContent)]: - instructions_list.append(text_content.text) + for text_content in [content for content in message.contents if content.type == "text"]: + instructions_list.append(text_content.text) # type: ignore[arg-type] else: result.append(message) @@ -497,6 +512,17 @@ def _prepare_mcp_tool(tool: HostedMCPTool) -> MCPTool: # type: ignore[override] """Get MCP tool from HostedMCPTool.""" mcp = MCPTool(server_label=tool.name.replace(" ", "_"), server_url=str(tool.url)) + if tool.description: + mcp["server_description"] = tool.description + + # Check for project_connection_id in additional_properties (for Azure AI Foundry connections) + project_connection_id = _extract_project_connection_id(tool.additional_properties) + if project_connection_id: + mcp["project_connection_id"] = project_connection_id + elif tool.headers: + # Only use headers if no project_connection_id is available + mcp["headers"] = tool.headers + if tool.allowed_tools: mcp["allowed_tools"] = list(tool.allowed_tools) @@ -511,3 +537,59 @@ def _prepare_mcp_tool(tool: HostedMCPTool) -> MCPTool: # type: ignore[override] mcp["require_approval"] = {"never": {"tool_names": list(never_require_approvals)}} return mcp + + @override + def as_agent( + self, + *, + id: str | None = None, + name: str | None = None, + description: str | None = None, + instructions: str | None = None, + tools: ToolProtocol + | Callable[..., Any] + | MutableMapping[str, Any] + | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] + | None = None, + default_options: TAzureAIClientOptions | None = None, + chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, + context_provider: ContextProvider | None = None, + middleware: Sequence[Middleware] | None = None, + **kwargs: Any, + ) -> ChatAgent[TAzureAIClientOptions]: + """Convert this chat client to a ChatAgent. + + This method creates a ChatAgent instance with this client pre-configured. + It does NOT create an agent on the Azure AI service - the actual agent + will be created on the server during the first invocation (run). + + For creating and managing persistent agents on the server, use + :class:`~agent_framework_azure_ai.AzureAIProjectAgentProvider` instead. + + Keyword Args: + id: The unique identifier for the agent. Will be created automatically if not provided. + name: The name of the agent. + description: A brief description of the agent's purpose. + instructions: Optional instructions for the agent. + tools: The tools to use for the request. + default_options: A TypedDict containing chat options. + chat_message_store_factory: Factory function to create an instance of ChatMessageStoreProtocol. + context_provider: Context providers to include during agent invocation. + middleware: List of middleware to intercept agent and function invocations. + kwargs: Any additional keyword arguments. + + Returns: + A ChatAgent instance configured with this chat client. + """ + return super().as_agent( + id=id, + name=name, + description=description, + instructions=instructions, + tools=tools, + default_options=default_options, + chat_message_store_factory=chat_message_store_factory, + context_provider=context_provider, + middleware=middleware, + **kwargs, + ) diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py index edad03f5b4..0cbb37b854 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_project_provider.py @@ -2,7 +2,7 @@ import sys from collections.abc import Callable, MutableMapping, Sequence -from typing import TYPE_CHECKING, Any, Generic, TypedDict +from typing import Any, Generic, TypedDict from agent_framework import ( AGENT_FRAMEWORK_USER_AGENT, @@ -14,6 +14,7 @@ get_logger, normalize_tools, ) +from agent_framework._mcp import MCPTool from agent_framework.exceptions import ServiceInitializationError from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ( @@ -26,12 +27,9 @@ from azure.core.credentials_async import AsyncTokenCredential from pydantic import ValidationError -from ._client import AzureAIClient +from ._client import AzureAIClient, AzureAIProjectAgentOptions from ._shared import AzureAISettings, create_text_format_config, from_azure_ai_tools, to_azure_ai_tools -if TYPE_CHECKING: - from agent_framework.openai import OpenAIResponsesOptions - if sys.version_info >= (3, 13): from typing import Self, TypeVar # pragma: no cover else: @@ -46,7 +44,7 @@ TOptions_co = TypeVar( "TOptions_co", bound=TypedDict, # type: ignore[valid-type] - default="OpenAIResponsesOptions", + default="AzureAIProjectAgentOptions", covariant=True, ) @@ -193,9 +191,11 @@ async def create_agent( "or set 'AZURE_AI_MODEL_DEPLOYMENT_NAME' environment variable." ) - # Extract response_format from default_options if present + # Extract options from default_options if present opts = dict(default_options) if default_options else {} response_format = opts.get("response_format") + rai_config = opts.get("rai_config") + reasoning = opts.get("reasoning") args: dict[str, Any] = {"model": resolved_model} @@ -205,11 +205,37 @@ async def create_agent( args["text"] = PromptAgentDefinitionText( format=create_text_format_config(response_format) # type: ignore[arg-type] ) + if rai_config: + args["rai_config"] = rai_config + if reasoning: + args["reasoning"] = reasoning - # Normalize tools once and reuse for both Azure AI API and ChatAgent + # Normalize tools and separate MCP tools from other tools normalized_tools = normalize_tools(tools) + mcp_tools: list[MCPTool] = [] + non_mcp_tools: list[ToolProtocol | MutableMapping[str, Any]] = [] + if normalized_tools: - args["tools"] = to_azure_ai_tools(normalized_tools) + for tool in normalized_tools: + if isinstance(tool, MCPTool): + mcp_tools.append(tool) + else: + non_mcp_tools.append(tool) + + # Connect MCP tools and discover their functions BEFORE creating the agent + # This is required because Azure AI Responses API doesn't accept tools at request time + mcp_discovered_functions: list[AIFunction[Any, Any]] = [] + for mcp_tool in mcp_tools: + if not mcp_tool.is_connected: + await mcp_tool.connect() + mcp_discovered_functions.extend(mcp_tool.functions) + + # Combine non-MCP tools with discovered MCP functions for Azure AI + all_tools_for_azure: list[ToolProtocol | MutableMapping[str, Any]] = list(non_mcp_tools) + all_tools_for_azure.extend(mcp_discovered_functions) + + if all_tools_for_azure: + args["tools"] = to_azure_ai_tools(all_tools_for_azure) created_agent = await self._project_client.agents.create_version( agent_name=name, @@ -404,10 +430,12 @@ def _merge_tools( continue merged.append(hosted_tool) - # Add user-provided function tools (these have the actual implementations) + # Add user-provided function tools and MCP tools if provided_tools: for provided_tool in provided_tools: - if isinstance(provided_tool, AIFunction): + # AIFunction - has implementation for function calling + # MCPTool - ChatAgent handles MCP connection and tool discovery at runtime + if isinstance(provided_tool, (AIFunction, MCPTool)): merged.append(provided_tool) # type: ignore[reportUnknownArgumentType] return merged diff --git a/python/packages/azure-ai/agent_framework_azure_ai/_shared.py b/python/packages/azure-ai/agent_framework_azure_ai/_shared.py index b99a3b5f66..aa5d114ba5 100644 --- a/python/packages/azure-ai/agent_framework_azure_ai/_shared.py +++ b/python/packages/azure-ai/agent_framework_azure_ai/_shared.py @@ -6,12 +6,11 @@ from agent_framework import ( AIFunction, - Contents, + Content, HostedCodeInterpreterTool, - HostedFileContent, HostedFileSearchTool, + HostedImageGenerationTool, HostedMCPTool, - HostedVectorStoreContent, HostedWebSearchTool, ToolProtocol, get_logger, @@ -31,6 +30,8 @@ CodeInterpreterTool, CodeInterpreterToolAuto, FunctionTool, + ImageGenTool, + ImageGenToolInputImageMask, MCPTool, ResponseTextFormatConfigurationJsonObject, ResponseTextFormatConfigurationJsonSchema, @@ -87,6 +88,37 @@ class AzureAISettings(AFBaseSettings): model_deployment_name: str | None = None +def _extract_project_connection_id(additional_properties: dict[str, Any] | None) -> str | None: + """Extract project_connection_id from HostedMCPTool additional_properties. + + Checks for both direct 'project_connection_id' key (programmatic usage) + and 'connection.name' structure (declarative/YAML usage). + + Args: + additional_properties: The additional_properties dict from a HostedMCPTool. + + Returns: + The project_connection_id if found, None otherwise. + """ + if not additional_properties: + return None + + # Check for direct project_connection_id (programmatic usage) + project_connection_id = additional_properties.get("project_connection_id") + if isinstance(project_connection_id, str): + return project_connection_id + + # Check for connection.name structure (declarative/YAML usage) + if "connection" in additional_properties: + conn = additional_properties["connection"] + if isinstance(conn, dict): + name = conn.get("name") + if isinstance(name, str): + return name + + return None + + def to_azure_ai_agent_tools( tools: Sequence[ToolProtocol | MutableMapping[str, Any]] | None, run_options: dict[str, Any] | None = None, @@ -158,9 +190,9 @@ def to_azure_ai_agent_tools( ) tool_definitions.extend(mcp_tool.definitions) case HostedFileSearchTool(): - vector_stores = [inp for inp in tool.inputs or [] if isinstance(inp, HostedVectorStoreContent)] + vector_stores = [inp for inp in tool.inputs or [] if inp.type == "hosted_vector_store"] if vector_stores: - file_search = AgentsFileSearchTool(vector_store_ids=[vs.vector_store_id for vs in vector_stores]) + file_search = AgentsFileSearchTool(vector_store_ids=[vs.vector_store_id for vs in vector_stores]) # type: ignore[misc] tool_definitions.extend(file_search.definitions) # Set tool_resources for file search to work properly with Azure AI if run_options is not None and "tool_resources" not in run_options: @@ -216,7 +248,7 @@ def _convert_dict_tool(tool: dict[str, Any]) -> ToolProtocol | dict[str, Any] | if tool_type == "file_search": file_search_config = tool.get("file_search", {}) vector_store_ids = file_search_config.get("vector_store_ids", []) - inputs = [HostedVectorStoreContent(vector_store_id=vs_id) for vs_id in vector_store_ids] + inputs = [Content.from_hosted_vector_store(vector_store_id=vs_id) for vs_id in vector_store_ids] return HostedFileSearchTool(inputs=inputs if inputs else None) # type: ignore if tool_type == "bing_grounding": @@ -256,7 +288,7 @@ def _convert_sdk_tool(tool: ToolDefinition) -> ToolProtocol | dict[str, Any] | N if tool_type == "file_search": file_search_config = getattr(tool, "file_search", None) vector_store_ids = getattr(file_search_config, "vector_store_ids", []) if file_search_config else [] - inputs = [HostedVectorStoreContent(vector_store_id=vs_id) for vs_id in vector_store_ids] + inputs = [Content.from_hosted_vector_store(vector_store_id=vs_id) for vs_id in vector_store_ids] return HostedFileSearchTool(inputs=inputs if inputs else None) # type: ignore if tool_type == "bing_grounding": @@ -322,6 +354,11 @@ def from_azure_ai_tools(tools: Sequence[Tool | dict[str, Any]] | None) -> list[T if "never" in require_approval: approval_mode["never_require_approval"] = set(require_approval["never"].get("tool_names", [])) # type: ignore + # Preserve project_connection_id in additional_properties + additional_props: dict[str, Any] | None = None + if project_connection_id := mcp_tool.get("project_connection_id"): + additional_props = {"connection": {"name": project_connection_id}} + agent_tools.append( HostedMCPTool( name=mcp_tool.get("server_label", "").replace("_", " "), @@ -330,23 +367,24 @@ def from_azure_ai_tools(tools: Sequence[Tool | dict[str, Any]] | None) -> list[T headers=mcp_tool.get("headers"), allowed_tools=mcp_tool.get("allowed_tools"), approval_mode=approval_mode, # type: ignore + additional_properties=additional_props, ) ) elif tool_type == "code_interpreter": ci_tool = cast(CodeInterpreterTool, tool_dict) container = ci_tool.get("container", {}) - ci_inputs: list[Contents] = [] + ci_inputs: list[Content] = [] if "file_ids" in container: for file_id in container["file_ids"]: - ci_inputs.append(HostedFileContent(file_id=file_id)) + ci_inputs.append(Content.from_hosted_file(file_id=file_id)) agent_tools.append(HostedCodeInterpreterTool(inputs=ci_inputs if ci_inputs else None)) # type: ignore elif tool_type == "file_search": fs_tool = cast(ProjectsFileSearchTool, tool_dict) - fs_inputs: list[Contents] = [] + fs_inputs: list[Content] = [] if "vector_store_ids" in fs_tool: for vs_id in fs_tool["vector_store_ids"]: - fs_inputs.append(HostedVectorStoreContent(vector_store_id=vs_id)) + fs_inputs.append(Content.from_hosted_vector_store(vector_store_id=vs_id)) agent_tools.append( HostedFileSearchTool( @@ -396,8 +434,8 @@ def to_azure_ai_tools( file_ids: list[str] = [] if tool.inputs: for tool_input in tool.inputs: - if isinstance(tool_input, HostedFileContent): - file_ids.append(tool_input.file_id) + if tool_input.type == "hosted_file": + file_ids.append(tool_input.file_id) # type: ignore[misc, arg-type] container = CodeInterpreterToolAuto(file_ids=file_ids if file_ids else None) ci_tool: CodeInterpreterTool = CodeInterpreterTool(container=container) azure_tools.append(ci_tool) @@ -416,11 +454,14 @@ def to_azure_ai_tools( if not tool.inputs: raise ValueError("HostedFileSearchTool requires inputs to be specified.") vector_store_ids: list[str] = [ - inp.vector_store_id for inp in tool.inputs if isinstance(inp, HostedVectorStoreContent) + inp.vector_store_id # type: ignore[misc] + for inp in tool.inputs + if inp.type == "hosted_vector_store" ] if not vector_store_ids: raise ValueError( - "HostedFileSearchTool requires inputs to be of type `HostedVectorStoreContent`." + "HostedFileSearchTool requires inputs to be of type `Content` with " + "type 'hosted_vector_store'." ) fs_tool: ProjectsFileSearchTool = ProjectsFileSearchTool(vector_store_ids=vector_store_ids) if tool.max_results: @@ -442,6 +483,31 @@ def to_azure_ai_tools( timezone=location.get("timezone"), ) azure_tools.append(ws_tool) + case HostedImageGenerationTool(): + opts = tool.options or {} + addl = tool.additional_properties or {} + # Azure ImageGenTool requires the constant model "gpt-image-1" + ig_tool: ImageGenTool = ImageGenTool( + model=opts.get("model_id", "gpt-image-1"), # type: ignore + size=cast( + Literal["1024x1024", "1024x1536", "1536x1024", "auto"] | None, opts.get("image_size") + ), + output_format=cast(Literal["png", "webp", "jpeg"] | None, opts.get("media_type")), + input_image_mask=( + ImageGenToolInputImageMask( + image_url=addl.get("input_image_mask", {}).get("image_url"), + file_id=addl.get("input_image_mask", {}).get("file_id"), + ) + if isinstance(addl.get("input_image_mask"), dict) + else None + ), + quality=cast(Literal["low", "medium", "high", "auto"] | None, addl.get("quality")), + background=cast(Literal["transparent", "opaque", "auto"] | None, addl.get("background")), + output_compression=cast(int | None, addl.get("output_compression")), + moderation=cast(Literal["auto", "low"] | None, addl.get("moderation")), + partial_images=opts.get("streaming_count"), + ) + azure_tools.append(ig_tool) case _: logger.debug("Unsupported tool passed (type: %s)", type(tool)) else: @@ -466,7 +532,13 @@ def _prepare_mcp_tool_for_azure_ai(tool: HostedMCPTool) -> MCPTool: if tool.description: mcp["server_description"] = tool.description - if tool.headers: + # Check for project_connection_id in additional_properties (for Azure AI Foundry connections) + project_connection_id = _extract_project_connection_id(tool.additional_properties) + if project_connection_id: + mcp["project_connection_id"] = project_connection_id + elif tool.headers: + # Only use headers if no project_connection_id is available + # Note: Azure AI Agent Service may reject headers with sensitive info mcp["headers"] = tool.headers if tool.allowed_tools: diff --git a/python/packages/azure-ai/pyproject.toml b/python/packages/azure-ai/pyproject.toml index 65c2b2c0c9..5151e10f7b 100644 --- a/python/packages/azure-ai/pyproject.toml +++ b/python/packages/azure-ai/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Foundry integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -24,7 +24,7 @@ classifiers = [ ] dependencies = [ "agent-framework-core", - "azure-ai-projects >= 2.0.0b2", + "azure-ai-projects >= 2.0.0b3", "azure-ai-agents == 1.2.0b5", "aiohttp", ] diff --git a/python/packages/azure-ai/tests/test_agent_provider.py b/python/packages/azure-ai/tests/test_agent_provider.py index 3df8d318ec..edfd749f4c 100644 --- a/python/packages/azure-ai/tests/test_agent_provider.py +++ b/python/packages/azure-ai/tests/test_agent_provider.py @@ -7,10 +7,10 @@ import pytest from agent_framework import ( ChatAgent, + Content, HostedCodeInterpreterTool, HostedFileSearchTool, HostedMCPTool, - HostedVectorStoreContent, HostedWebSearchTool, ai_function, ) @@ -509,7 +509,7 @@ def test_to_azure_ai_agent_tools_code_interpreter() -> None: def test_to_azure_ai_agent_tools_file_search() -> None: """Test converting HostedFileSearchTool with vector stores.""" - tool = HostedFileSearchTool(inputs=[HostedVectorStoreContent(vector_store_id="vs-123")]) + tool = HostedFileSearchTool(inputs=[Content.from_hosted_vector_store(vector_store_id="vs-123")]) run_options: dict[str, Any] = {} result = to_azure_ai_agent_tools([tool], run_options) diff --git a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py index 21bedbf710..7b20caea7d 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_agent_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_agent_client.py @@ -17,19 +17,12 @@ ChatOptions, ChatResponse, ChatResponseUpdate, - CitationAnnotation, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, + Content, HostedCodeInterpreterTool, - HostedFileContent, HostedFileSearchTool, HostedMCPTool, - HostedVectorStoreContent, + HostedWebSearchTool, Role, - TextContent, - UriContent, ) from agent_framework._serialization import SerializationMixin from agent_framework.exceptions import ServiceInitializationError @@ -368,7 +361,7 @@ async def test_azure_ai_chat_client_prepare_options_with_image_content(mock_agen # Mock get_agent mock_agents_client.get_agent = AsyncMock(return_value=None) - image_content = UriContent(uri="https://example.com/image.jpg", media_type="image/jpeg") + image_content = Content.from_uri(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [ChatMessage(role=Role.USER, contents=[image_content])] run_options, _ = await chat_client._prepare_options(messages, {}) # type: ignore @@ -551,7 +544,7 @@ def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_basic(mock_agen result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 - assert isinstance(result[0], FunctionCallContent) + assert result[0].type == "function_call" assert result[0].name == "get_weather" assert result[0].call_id == '["response_123", "call_123"]' @@ -728,6 +721,121 @@ async def test_azure_ai_chat_client_prepare_options_mcp_with_headers(mock_agents assert mcp_resource["headers"] == headers +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_grounding( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Bing Grounding.""" + + chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + + web_search_tool = HostedWebSearchTool( + additional_properties={ + "connection_id": "test-connection-id", + "count": 5, + "freshness": "Day", + "market": "en-US", + "set_lang": "en", + } + ) + + # Mock BingGroundingTool + with patch("agent_framework_azure_ai._chat_client.BingGroundingTool") as mock_bing_grounding: + mock_bing_tool = MagicMock() + mock_bing_tool.definitions = [{"type": "bing_grounding"}] + mock_bing_grounding.return_value = mock_bing_tool + + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore + + assert len(result) == 1 + assert result[0] == {"type": "bing_grounding"} + call_args = mock_bing_grounding.call_args[1] + assert call_args["count"] == 5 + assert call_args["freshness"] == "Day" + assert call_args["market"] == "en-US" + assert call_args["set_lang"] == "en" + assert "connection_id" in call_args + + +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_bing_grounding_with_connection_id( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_... with HostedWebSearchTool using Bing Grounding with connection_id (no HTTP call).""" + + chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + + web_search_tool = HostedWebSearchTool( + additional_properties={ + "connection_id": "direct-connection-id", + "count": 3, + } + ) + + # Mock BingGroundingTool + with patch("agent_framework_azure_ai._chat_client.BingGroundingTool") as mock_bing_grounding: + mock_bing_tool = MagicMock() + mock_bing_tool.definitions = [{"type": "bing_grounding"}] + mock_bing_grounding.return_value = mock_bing_tool + + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore + + assert len(result) == 1 + assert result[0] == {"type": "bing_grounding"} + mock_bing_grounding.assert_called_once_with(connection_id="direct-connection-id", count=3) + + +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_web_search_custom_bing( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_for_azure_ai with HostedWebSearchTool using Custom Bing Search.""" + + chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + + web_search_tool = HostedWebSearchTool( + additional_properties={ + "custom_connection_id": "custom-connection-id", + "custom_instance_name": "custom-instance", + "count": 10, + } + ) + + # Mock BingCustomSearchTool + with patch("agent_framework_azure_ai._chat_client.BingCustomSearchTool") as mock_custom_bing: + mock_custom_tool = MagicMock() + mock_custom_tool.definitions = [{"type": "bing_custom_search"}] + mock_custom_bing.return_value = mock_custom_tool + + result = await chat_client._prepare_tools_for_azure_ai([web_search_tool]) # type: ignore + + assert len(result) == 1 + assert result[0] == {"type": "bing_custom_search"} + + +async def test_azure_ai_chat_client_prepare_tools_for_azure_ai_file_search_with_vector_stores( + mock_agents_client: MagicMock, +) -> None: + """Test _prepare_tools_for_azure_ai with HostedFileSearchTool using vector stores.""" + + chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") + + vector_store_input = Content.from_hosted_vector_store(vector_store_id="vs-123") + file_search_tool = HostedFileSearchTool(inputs=[vector_store_input]) + + # Mock FileSearchTool + with patch("agent_framework_azure_ai._chat_client.FileSearchTool") as mock_file_search: + mock_file_tool = MagicMock() + mock_file_tool.definitions = [{"type": "file_search"}] + mock_file_tool.resources = {"vector_store_ids": ["vs-123"]} + mock_file_search.return_value = mock_file_tool + + run_options = {} + result = await chat_client._prepare_tools_for_azure_ai([file_search_tool], run_options) # type: ignore + + assert len(result) == 1 + assert result[0] == {"type": "file_search"} + assert run_options["tool_resources"] == {"vector_store_ids": ["vs-123"]} + mock_file_search.assert_called_once_with(vector_store_ids=["vs-123"]) + + async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( mock_agents_client: MagicMock, ) -> None: @@ -741,9 +849,9 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_approvals( chat_client._get_active_thread_run = AsyncMock(return_value=mock_thread_run) # type: ignore # Mock required action results with approval response that matches run ID - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( id='["test-run-id", "test-call-id"]', - function_call=FunctionCallContent( + function_call=Content.from_function_call( call_id='["test-run-id", "test-call-id"]', name="test_function", arguments="{}" ), approved=True, @@ -839,7 +947,7 @@ async def test_azure_ai_chat_client_prepare_tool_outputs_for_azure_ai_function_r chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with simple result - function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result="Simple result") + function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result="Simple result") run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore @@ -857,7 +965,7 @@ async def test_azure_ai_chat_client_convert_required_action_invalid_call_id(mock chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Invalid call_id format - should raise JSONDecodeError - function_result = FunctionResultContent(call_id="invalid_json", result="result") + function_result = Content.from_function_result(call_id="invalid_json", result="result") with pytest.raises(json.JSONDecodeError): chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore @@ -870,7 +978,7 @@ async def test_azure_ai_chat_client_convert_required_action_invalid_structure( chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Valid JSON but invalid structure (missing second element) - function_result = FunctionResultContent(call_id='["run_123"]', result="result") + function_result = Content.from_function_result(call_id='["run_123"]', result="result") run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore @@ -894,7 +1002,7 @@ def __init__(self, name: str, value: int): # Test with BaseModel result mock_result = MockResult(name="test", value=42) - function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result=mock_result) + function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result=mock_result) run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore @@ -922,7 +1030,7 @@ def __init__(self, data: str): # Test with multiple results - mix of BaseModel and regular objects mock_basemodel = MockResult(data="model_data") results_list = [mock_basemodel, {"key": "value"}, "string_result"] - function_result = FunctionResultContent(call_id='["run_123", "call_456"]', result=results_list) + function_result = Content.from_function_result(call_id='["run_123", "call_456"]', result=results_list) run_id, tool_outputs, tool_approvals = chat_client._prepare_tool_outputs_for_azure_ai([function_result]) # type: ignore @@ -948,9 +1056,11 @@ async def test_azure_ai_chat_client_convert_required_action_approval_response( chat_client = create_test_azure_ai_chat_client(mock_agents_client, agent_id="test-agent") # Test with approval response - need to provide required fields - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( id='["run_123", "call_456"]', - function_call=FunctionCallContent(call_id='["run_123", "call_456"]', name="test_function", arguments="{}"), + function_call=Content.from_function_call( + call_id='["run_123", "call_456"]', name="test_function", arguments="{}" + ), approved=True, ) @@ -985,7 +1095,7 @@ async def test_azure_ai_chat_client_parse_function_calls_from_azure_ai_approval_ result = chat_client._parse_function_calls_from_azure_ai(mock_event_data, "response_123") # type: ignore assert len(result) == 1 - assert isinstance(result[0], FunctionApprovalRequestContent) + assert result[0].type == "function_approval_request" assert result[0].id == '["response_123", "approval_call_123"]' assert result[0].function_call.name == "approve_action" assert result[0].function_call.call_id == '["response_123", "approval_call_123"]' @@ -1064,7 +1174,7 @@ async def test_azure_ai_chat_client_create_agent_stream_submit_tool_outputs( chat_client._get_active_thread_run = AsyncMock(return_value=mock_thread_run) # type: ignore # Mock required action results with matching run ID - function_result = FunctionResultContent(call_id='["test-run-id", "test-call-id"]', result="test result") + function_result = Content.from_function_result(call_id='["test-run-id", "test-call-id"]', result="test result") # Mock submit_tool_outputs_stream mock_handler = MagicMock() @@ -1115,14 +1225,13 @@ def test_azure_ai_chat_client_extract_url_citations_with_citations(mock_agents_c # Verify results assert len(citations) == 1 citation = citations[0] - assert isinstance(citation, CitationAnnotation) - assert citation.url == "https://example.com/test" - assert citation.title == "Test Title" - assert citation.snippet is None - assert citation.annotated_regions is not None - assert len(citation.annotated_regions) == 1 - assert citation.annotated_regions[0].start_index == 10 - assert citation.annotated_regions[0].end_index == 20 + assert citation["url"] == "https://example.com/test" + assert citation["title"] == "Test Title" + assert citation["snippet"] is None + assert citation["annotated_regions"] is not None + assert len(citation["annotated_regions"]) == 1 + assert citation["annotated_regions"][0]["start_index"] == 10 + assert citation["annotated_regions"][0]["end_index"] == 20 def test_azure_ai_chat_client_extract_file_path_contents_with_file_path_annotation( @@ -1158,7 +1267,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_with_file_path_annotati # Verify results assert len(file_contents) == 1 - assert isinstance(file_contents[0], HostedFileContent) + assert file_contents[0].type == "hosted_file" assert file_contents[0].file_id == "assistant-test-file-123" @@ -1195,7 +1304,7 @@ def test_azure_ai_chat_client_extract_file_path_contents_with_file_citation_anno # Verify results assert len(file_contents) == 1 - assert isinstance(file_contents[0], HostedFileContent) + assert file_contents[0].type == "hosted_file" assert file_contents[0].file_id == "cfile_test-citation-456" @@ -1305,7 +1414,7 @@ async def test_azure_ai_chat_client_streaming() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert any(word in full_message.lower() for word in ["sunny", "25"]) @@ -1331,7 +1440,7 @@ async def test_azure_ai_chat_client_streaming_tools() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert any(word in full_message.lower() for word in ["sunny", "25"]) @@ -1476,7 +1585,9 @@ async def test_azure_ai_chat_client_agent_file_search(): ) # 2. Create file search tool with uploaded resources - file_search_tool = HostedFileSearchTool(inputs=[HostedVectorStoreContent(vector_store_id=vector_store.id)]) + file_search_tool = HostedFileSearchTool( + inputs=[Content.from_hosted_vector_store(vector_store_id=vector_store.id)] + ) async with ChatAgent( chat_client=client, @@ -1795,7 +1906,7 @@ def test_azure_ai_chat_client_extract_url_citations_with_azure_search_enhanced_u # Verify real URL was used assert len(citations) == 1 citation = citations[0] - assert citation.url == "https://real-example.com/doc2" # doc_1 maps to index 1 + assert citation["url"] == "https://real-example.com/doc2" # doc_1 maps to index 1 def test_azure_ai_chat_client_init_with_auto_created_agents_client( diff --git a/python/packages/azure-ai/tests/test_azure_ai_client.py b/python/packages/azure-ai/tests/test_azure_ai_client.py index dad8f049fe..aba45b3f1b 100644 --- a/python/packages/azure-ai/tests/test_azure_ai_client.py +++ b/python/packages/azure-ai/tests/test_azure_ai_client.py @@ -16,14 +16,12 @@ ChatMessage, ChatOptions, ChatResponse, + Content, HostedCodeInterpreterTool, - HostedFileContent, HostedFileSearchTool, HostedMCPTool, - HostedVectorStoreContent, HostedWebSearchTool, Role, - TextContent, ) from agent_framework.exceptions import ServiceInitializationError from azure.ai.projects.aio import AIProjectClient @@ -298,9 +296,9 @@ async def test_prepare_messages_for_azure_ai_with_system_messages( client = create_test_azure_ai_client(mock_project_client) messages = [ - ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="You are a helpful assistant.")]), - ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")]), - ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="System response")]), + ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="You are a helpful assistant.")]), + ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")]), + ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="System response")]), ] result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore @@ -318,8 +316,8 @@ async def test_prepare_messages_for_azure_ai_no_system_messages( client = create_test_azure_ai_client(mock_project_client) messages = [ - ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")]), - ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="Hi there!")]), + ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")]), + ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="Hi there!")]), ] result_messages, instructions = client._prepare_messages_for_azure_ai(messages) # type: ignore @@ -419,7 +417,7 @@ async def test_prepare_options_basic(mock_project_client: MagicMock) -> None: """Test prepare_options basic functionality.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])] with ( patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), @@ -453,7 +451,7 @@ async def test_prepare_options_with_application_endpoint( agent_version="1", ) - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])] with ( patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), @@ -492,7 +490,7 @@ async def test_prepare_options_with_application_project_client( agent_version="1", ) - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])] with ( patch.object(client.__class__.__bases__[0], "_prepare_options", return_value={"model": "test-model"}), @@ -848,7 +846,7 @@ async def test_prepare_options_excludes_response_format( """Test that prepare_options excludes response_format, text, and text_format from final run options.""" client = create_test_azure_ai_client(mock_project_client, agent_name="test-agent", agent_version="1.0") - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")])] + messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="Hello")])] chat_options: ChatOptions = {} with ( @@ -992,7 +990,7 @@ def test_from_azure_ai_tools() -> None: tool_input = parsed_tools[0].inputs[0] - assert tool_input and isinstance(tool_input, HostedFileContent) and tool_input.file_id == "file-1" + assert tool_input and tool_input.type == "hosted_file" and tool_input.file_id == "file-1" # Test File Search tool fs_tool = FileSearchTool(vector_store_ids=["vs-1"], max_num_results=5) @@ -1004,7 +1002,7 @@ def test_from_azure_ai_tools() -> None: tool_input = parsed_tools[0].inputs[0] - assert tool_input and isinstance(tool_input, HostedVectorStoreContent) and tool_input.vector_store_id == "vs-1" + assert tool_input and tool_input.type == "hosted_vector_store" and tool_input.vector_store_id == "vs-1" assert parsed_tools[0].max_results == 5 # Test Web Search tool diff --git a/python/packages/azure-ai/tests/test_provider.py b/python/packages/azure-ai/tests/test_provider.py index e3dfa0995a..2a9808db9c 100644 --- a/python/packages/azure-ai/tests/test_provider.py +++ b/python/packages/azure-ai/tests/test_provider.py @@ -4,7 +4,8 @@ from unittest.mock import AsyncMock, MagicMock, patch import pytest -from agent_framework import ChatAgent +from agent_framework import AIFunction, ChatAgent +from agent_framework._mcp import MCPTool from agent_framework.exceptions import ServiceInitializationError from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ( @@ -207,6 +208,93 @@ async def test_provider_create_agent_missing_model(mock_project_client: MagicMoc await provider.create_agent(name="test-agent") +async def test_provider_create_agent_with_rai_config( + mock_project_client: MagicMock, + azure_ai_unit_test_env: dict[str, str], +) -> None: + """Test AzureAIProjectAgentProvider.create_agent passes rai_config from default_options.""" + with patch("agent_framework_azure_ai._project_provider.AzureAISettings") as mock_settings: + mock_settings.return_value.project_endpoint = azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"] + mock_settings.return_value.model_deployment_name = azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + + provider = AzureAIProjectAgentProvider(project_client=mock_project_client) + + # Mock agent creation response + mock_agent_version = MagicMock(spec=AgentVersionDetails) + mock_agent_version.id = "agent-id" + mock_agent_version.name = "test-agent" + mock_agent_version.version = "1.0" + mock_agent_version.description = None + mock_agent_version.definition = MagicMock(spec=PromptAgentDefinition) + mock_agent_version.definition.model = "gpt-4" + mock_agent_version.definition.instructions = None + mock_agent_version.definition.temperature = None + mock_agent_version.definition.top_p = None + mock_agent_version.definition.tools = [] + + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent_version) + + # Create a mock RaiConfig-like object + mock_rai_config = MagicMock() + mock_rai_config.rai_policy_name = "policy-name" + + # Call create_agent with rai_config in default_options + await provider.create_agent( + name="test-agent", + model="gpt-4", + default_options={"rai_config": mock_rai_config}, + ) + + # Verify rai_config was passed to PromptAgentDefinition + call_args = mock_project_client.agents.create_version.call_args + definition = call_args[1]["definition"] + assert definition.rai_config is mock_rai_config + + +async def test_provider_create_agent_with_reasoning( + mock_project_client: MagicMock, + azure_ai_unit_test_env: dict[str, str], +) -> None: + """Test AzureAIProjectAgentProvider.create_agent passes reasoning from default_options.""" + with patch("agent_framework_azure_ai._project_provider.AzureAISettings") as mock_settings: + mock_settings.return_value.project_endpoint = azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"] + mock_settings.return_value.model_deployment_name = azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + + provider = AzureAIProjectAgentProvider(project_client=mock_project_client) + + # Mock agent creation response + mock_agent_version = MagicMock(spec=AgentVersionDetails) + mock_agent_version.id = "agent-id" + mock_agent_version.name = "test-agent" + mock_agent_version.version = "1.0" + mock_agent_version.description = None + mock_agent_version.definition = MagicMock(spec=PromptAgentDefinition) + mock_agent_version.definition.model = "gpt-5.2" + mock_agent_version.definition.instructions = None + mock_agent_version.definition.temperature = None + mock_agent_version.definition.top_p = None + mock_agent_version.definition.tools = [] + + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent_version) + + # Create a mock Reasoning-like object + mock_reasoning = MagicMock() + mock_reasoning.effort = "medium" + mock_reasoning.summary = "concise" + + # Call create_agent with reasoning in default_options + await provider.create_agent( + name="test-agent", + model="gpt-5.2", + default_options={"reasoning": mock_reasoning}, + ) + + # Verify reasoning was passed to PromptAgentDefinition + call_args = mock_project_client.agents.create_version.call_args + definition = call_args[1]["definition"] + assert definition.reasoning is mock_reasoning + + async def test_provider_get_agent_with_name(mock_project_client: MagicMock) -> None: """Test AzureAIProjectAgentProvider.get_agent with name parameter.""" provider = AzureAIProjectAgentProvider(project_client=mock_project_client) @@ -398,6 +486,170 @@ class TestSchema(BaseModel): assert "schema" in result +class MockMCPTool(MCPTool): # pyright: ignore[reportGeneralTypeIssues] + """A mock MCPTool subclass for testing that passes isinstance checks. + + Note: This intentionally does NOT call super().__init__() because MCPTool's + constructor requires MCP server connection parameters that aren't needed for + unit testing. We only need isinstance(obj, MCPTool) to return True. + """ + + def __init__(self, functions: list[AIFunction] | None = None) -> None: + self.name = "MockMCPTool" + self.description = "A mock MCP tool for testing" + self.is_connected = False + self._mock_functions = functions or [] + self._connect_called = False + + @property + def functions(self) -> list[AIFunction]: + return self._mock_functions + + async def connect(self, *, reset: bool = False) -> None: + self._connect_called = True + self.is_connected = True + + +@pytest.fixture +def mock_mcp_tool() -> MockMCPTool: + """Fixture that provides a mock MCPTool.""" + mock_functions = [ + create_mock_ai_function("mcp_function_1", "First MCP function"), + create_mock_ai_function("mcp_function_2", "Second MCP function"), + ] + return MockMCPTool(functions=mock_functions) + + +def create_mock_ai_function(name: str, description: str = "A mock function") -> AIFunction: + """Create a real AIFunction for testing.""" + + def mock_func(arg: str) -> str: + return f"Result from {name}: {arg}" + + return AIFunction(func=mock_func, name=name, description=description) + + +async def test_provider_create_agent_with_mcp_tool( + mock_project_client: MagicMock, + azure_ai_unit_test_env: dict[str, str], + mock_mcp_tool: "MockMCPTool", +) -> None: + """Test that create_agent connects MCP tools and passes discovered functions to Azure AI.""" + + # Patch normalize_tools to return tools as-is in a list (avoids callable check) + def mock_normalize_tools(tools): + if tools is None: + return [] + if isinstance(tools, list): + return tools + return [tools] + + with ( + patch("agent_framework_azure_ai._project_provider.AzureAISettings") as mock_settings, + patch("agent_framework_azure_ai._project_provider.to_azure_ai_tools") as mock_to_azure_tools, + patch("agent_framework_azure_ai._project_provider.normalize_tools", side_effect=mock_normalize_tools), + ): + mock_settings.return_value.project_endpoint = azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"] + mock_settings.return_value.model_deployment_name = azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + mock_to_azure_tools.return_value = [{"type": "function", "name": "mcp_function_1"}] + + provider = AzureAIProjectAgentProvider(project_client=mock_project_client) + + # Mock agent creation response + mock_agent_version = MagicMock(spec=AgentVersionDetails) + mock_agent_version.id = "agent-id" + mock_agent_version.name = "test-agent" + mock_agent_version.version = "1.0" + mock_agent_version.description = "Test Agent" + mock_agent_version.definition = MagicMock(spec=PromptAgentDefinition) + mock_agent_version.definition.model = "gpt-4" + mock_agent_version.definition.instructions = "Test instructions" + mock_agent_version.definition.tools = [] + + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent_version) + + # Call create_agent with MCP tool + await provider.create_agent( + name="test-agent", + model="gpt-4", + instructions="Test instructions", + tools=mock_mcp_tool, + ) + + # Verify MCP tool was connected + assert mock_mcp_tool._connect_called is True + assert mock_mcp_tool.is_connected is True + + # Verify to_azure_ai_tools was called with the discovered MCP functions + mock_to_azure_tools.assert_called_once() + tools_passed = mock_to_azure_tools.call_args[0][0] + assert len(tools_passed) == 2 + assert tools_passed[0].name == "mcp_function_1" + assert tools_passed[1].name == "mcp_function_2" + + +async def test_provider_create_agent_with_mcp_and_regular_tools( + mock_project_client: MagicMock, + azure_ai_unit_test_env: dict[str, str], + mock_mcp_tool: "MockMCPTool", +) -> None: + """Test that create_agent handles both MCP tools and regular AIFunctions.""" + # Create a regular AIFunction + regular_function = create_mock_ai_function("regular_function", "A regular function") + + # Patch normalize_tools to return tools as-is in a list (avoids callable check) + def mock_normalize_tools(tools): + if tools is None: + return [] + if isinstance(tools, list): + return tools + return [tools] + + with ( + patch("agent_framework_azure_ai._project_provider.AzureAISettings") as mock_settings, + patch("agent_framework_azure_ai._project_provider.to_azure_ai_tools") as mock_to_azure_tools, + patch("agent_framework_azure_ai._project_provider.normalize_tools", side_effect=mock_normalize_tools), + ): + mock_settings.return_value.project_endpoint = azure_ai_unit_test_env["AZURE_AI_PROJECT_ENDPOINT"] + mock_settings.return_value.model_deployment_name = azure_ai_unit_test_env["AZURE_AI_MODEL_DEPLOYMENT_NAME"] + mock_to_azure_tools.return_value = [] + + provider = AzureAIProjectAgentProvider(project_client=mock_project_client) + + # Mock agent creation response + mock_agent_version = MagicMock(spec=AgentVersionDetails) + mock_agent_version.id = "agent-id" + mock_agent_version.name = "test-agent" + mock_agent_version.version = "1.0" + mock_agent_version.description = None + mock_agent_version.definition = MagicMock(spec=PromptAgentDefinition) + mock_agent_version.definition.model = "gpt-4" + mock_agent_version.definition.instructions = None + mock_agent_version.definition.tools = [] + + mock_project_client.agents.create_version = AsyncMock(return_value=mock_agent_version) + + # Pass both MCP tool and regular function + await provider.create_agent( + name="test-agent", + model="gpt-4", + tools=[mock_mcp_tool, regular_function], + ) + + # Verify to_azure_ai_tools was called with: + # - The regular AIFunction (1) + # - The 2 discovered MCP functions + mock_to_azure_tools.assert_called_once() + tools_passed = mock_to_azure_tools.call_args[0][0] + assert len(tools_passed) == 3 # 1 regular + 2 MCP functions + + # Verify the regular function is in the list + tool_names = [t.name for t in tools_passed] + assert "regular_function" in tool_names + assert "mcp_function_1" in tool_names + assert "mcp_function_2" in tool_names + + @pytest.mark.flaky @skip_if_azure_ai_integration_tests_disabled async def test_provider_create_and_get_agent_integration() -> None: diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 99b0c1f7d6..7a49214b33 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -109,13 +109,13 @@ class AgentFunctionApp(DFAppBase): from agent_framework.azure import AgentFunctionApp, AzureOpenAIChatClient # Create agents with unique names - weather_agent = AzureOpenAIChatClient(...).create_agent( + weather_agent = AzureOpenAIChatClient(...).as_agent( name="WeatherAgent", instructions="You are a helpful weather agent.", tools=[get_weather], ) - math_agent = AzureOpenAIChatClient(...).create_agent( + math_agent = AzureOpenAIChatClient(...).as_agent( name="MathAgent", instructions="You are a helpful math assistant.", tools=[calculate], @@ -616,7 +616,7 @@ async def _handle_mcp_tool_invocation( # Create or parse session ID if thread_id and isinstance(thread_id, str) and thread_id.strip(): try: - session_id = AgentSessionId.parse(thread_id) + session_id = AgentSessionId.parse(thread_id, agent_name=agent_name) except ValueError as e: logger.warning( "Failed to parse AgentSessionId from thread_id '%s': %s. Falling back to new session ID.", diff --git a/python/packages/azurefunctions/pyproject.toml b/python/packages/azurefunctions/pyproject.toml index c6a8ecbfe6..926ae104fc 100644 --- a/python/packages/azurefunctions/pyproject.toml +++ b/python/packages/azurefunctions/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure Functions integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azurefunctions/tests/test_app.py b/python/packages/azurefunctions/tests/test_app.py index 4fb1617a73..bf0943256b 100644 --- a/python/packages/azurefunctions/tests/test_app.py +++ b/python/packages/azurefunctions/tests/test_app.py @@ -12,7 +12,7 @@ import azure.durable_functions as df import azure.functions as func import pytest -from agent_framework import AgentResponse, ChatMessage, ErrorContent +from agent_framework import AgentResponse, ChatMessage from agent_framework_durabletask import ( MIMETYPE_APPLICATION_JSON, MIMETYPE_TEXT_PLAIN, @@ -636,7 +636,7 @@ async def test_entity_handles_agent_error(self) -> None: assert isinstance(result, AgentResponse) assert len(result.messages) == 1 content = result.messages[0].contents[0] - assert isinstance(content, ErrorContent) + assert content.type == "error" assert "Agent error" in (content.message or "") assert content.error_code == "Exception" @@ -650,6 +650,7 @@ def test_entity_function_handles_exception(self) -> None: mock_context = Mock() mock_context.operation_name = "run" + mock_context.operation_name = "run" mock_context.get_input.side_effect = Exception("Input error") mock_context.get_state.return_value = None @@ -1070,6 +1071,70 @@ async def test_handle_mcp_tool_invocation_runtime_error(self) -> None: with pytest.raises(RuntimeError, match="Agent execution failed"): await app._handle_mcp_tool_invocation("TestAgent", context, client) + async def test_handle_mcp_tool_invocation_ignores_agent_name_in_thread_id(self) -> None: + """Test that MCP tool invocation uses the agent_name parameter, not the name from thread_id.""" + mock_agent = Mock() + mock_agent.name = "PlantAdvisor" + + app = AgentFunctionApp(agents=[mock_agent]) + client = AsyncMock() + + # Mock the entity response + mock_state = Mock() + mock_state.entity_state = { + "schemaVersion": "1.0.0", + "data": {"conversationHistory": []}, + } + client.read_entity_state.return_value = mock_state + + # Thread ID contains a different agent name (@StockAdvisor@poc123) + # but we're invoking PlantAdvisor - it should use PlantAdvisor's entity + context = json.dumps({"arguments": {"query": "test query", "threadId": "@StockAdvisor@test123"}}) + + with patch.object(app, "_get_response_from_entity") as get_response_mock: + get_response_mock.return_value = {"status": "success", "response": "Test response"} + + await app._handle_mcp_tool_invocation("PlantAdvisor", context, client) + + # Verify signal_entity was called with PlantAdvisor's entity, not StockAdvisor's + client.signal_entity.assert_called_once() + call_args = client.signal_entity.call_args + entity_id = call_args[0][0] + + # Entity name should be dafx-PlantAdvisor, not dafx-StockAdvisor + assert entity_id.name == "dafx-PlantAdvisor" + assert entity_id.key == "test123" + + async def test_handle_mcp_tool_invocation_uses_plain_thread_id_as_key(self) -> None: + """Test that a plain thread_id (not in @name@key format) is used as-is for the key.""" + mock_agent = Mock() + mock_agent.name = "TestAgent" + + app = AgentFunctionApp(agents=[mock_agent]) + client = AsyncMock() + + mock_state = Mock() + mock_state.entity_state = { + "schemaVersion": "1.0.0", + "data": {"conversationHistory": []}, + } + client.read_entity_state.return_value = mock_state + + # Plain thread_id without @name@key format + context = json.dumps({"arguments": {"query": "test query", "threadId": "simple-thread-123"}}) + + with patch.object(app, "_get_response_from_entity") as get_response_mock: + get_response_mock.return_value = {"status": "success", "response": "Test response"} + + await app._handle_mcp_tool_invocation("TestAgent", context, client) + + client.signal_entity.assert_called_once() + call_args = client.signal_entity.call_args + entity_id = call_args[0][0] + + assert entity_id.name == "dafx-TestAgent" + assert entity_id.key == "simple-thread-123" + def test_health_check_includes_mcp_tool_enabled(self) -> None: """Test that health check endpoint includes mcp_tool_enabled field.""" mock_agent = Mock() diff --git a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py index e9e1eeff96..a6325a6603 100644 --- a/python/packages/bedrock/agent_framework_bedrock/_chat_client.py +++ b/python/packages/bedrock/agent_framework_bedrock/_chat_client.py @@ -16,14 +16,10 @@ ChatOptions, ChatResponse, ChatResponseUpdate, - Contents, + Content, FinishReason, - FunctionCallContent, - FunctionResultContent, Role, - TextContent, ToolProtocol, - UsageContent, UsageDetails, get_logger, prepare_function_call_results, @@ -328,7 +324,7 @@ async def _inner_get_streaming_response( response = await self._inner_get_response(messages=messages, options=options, **kwargs) contents = list(response.messages[0].contents if response.messages else []) if response.usage_details: - contents.append(UsageContent(details=response.usage_details)) + contents.append(Content.from_usage(usage_details=response.usage_details)) # type: ignore[arg-type] yield ChatResponseUpdate( response_id=response.response_id, contents=contents, @@ -472,37 +468,41 @@ def _convert_message_to_content_blocks(self, message: ChatMessage) -> list[dict[ blocks.append(block) return blocks - def _convert_content_to_bedrock_block(self, content: Contents) -> dict[str, Any] | None: - if isinstance(content, TextContent): - return {"text": content.text} - if isinstance(content, FunctionCallContent): - arguments = content.parse_arguments() or {} - return { - "toolUse": { - "toolUseId": content.call_id or self._generate_tool_call_id(), - "name": content.name, - "input": arguments, + def _convert_content_to_bedrock_block(self, content: Content) -> dict[str, Any] | None: + match content.type: + case "text": + return {"text": content.text} + case "function_call": + arguments = content.parse_arguments() or {} + return { + "toolUse": { + "toolUseId": content.call_id or self._generate_tool_call_id(), + "name": content.name, + "input": arguments, + } } - } - if isinstance(content, FunctionResultContent): - tool_result_block = { - "toolResult": { - "toolUseId": content.call_id, - "content": self._convert_tool_result_to_blocks(content.result), - "status": "error" if content.exception else "success", + case "function_result": + tool_result_block = { + "toolResult": { + "toolUseId": content.call_id, + "content": self._convert_tool_result_to_blocks(content.result), + "status": "error" if content.exception else "success", + } } - } - if content.exception: - tool_result = tool_result_block["toolResult"] - existing_content = tool_result.get("content") - content_list: list[dict[str, Any]] - if isinstance(existing_content, list): - content_list = existing_content - else: - content_list = [] - tool_result["content"] = content_list - content_list.append({"text": str(content.exception)}) - return tool_result_block + if content.exception: + tool_result = tool_result_block["toolResult"] + existing_content = tool_result.get("content") + content_list: list[dict[str, Any]] + if isinstance(existing_content, list): + content_list = existing_content + else: + content_list = [] + tool_result["content"] = content_list + content_list.append({"text": str(content.exception)}) + return tool_result_block + case _: + # Bedrock does not support other content types at this time + pass return None def _convert_tool_result_to_blocks(self, result: Any) -> list[dict[str, Any]]: @@ -531,7 +531,7 @@ def _normalize_tool_result_value(self, value: Any) -> dict[str, Any]: return {"text": value} if isinstance(value, (int, float, bool)) or value is None: return {"json": value} - if isinstance(value, TextContent) and getattr(value, "text", None): + if isinstance(value, Content) and value.type == "text": return {"text": value.text} if hasattr(value, "to_dict"): try: @@ -586,23 +586,23 @@ def _process_converse_response(self, response: dict[str, Any]) -> ChatResponse: def _parse_usage(self, usage: dict[str, Any] | None) -> UsageDetails | None: if not usage: return None - details = UsageDetails() + details: UsageDetails = {} if (input_tokens := usage.get("inputTokens")) is not None: - details.input_token_count = input_tokens + details["input_token_count"] = input_tokens if (output_tokens := usage.get("outputTokens")) is not None: - details.output_token_count = output_tokens + details["output_token_count"] = output_tokens if (total_tokens := usage.get("totalTokens")) is not None: - details.additional_counts["bedrock.total_tokens"] = total_tokens + details["total_token_count"] = total_tokens return details def _parse_message_contents(self, content_blocks: Sequence[MutableMapping[str, Any]]) -> list[Any]: contents: list[Any] = [] for block in content_blocks: if text_value := block.get("text"): - contents.append(TextContent(text=text_value, raw_representation=block)) + contents.append(Content.from_text(text=text_value, raw_representation=block)) continue if (json_value := block.get("json")) is not None: - contents.append(TextContent(text=json.dumps(json_value), raw_representation=block)) + contents.append(Content.from_text(text=json.dumps(json_value), raw_representation=block)) continue tool_use = block.get("toolUse") if isinstance(tool_use, MutableMapping): @@ -610,7 +610,7 @@ def _parse_message_contents(self, content_blocks: Sequence[MutableMapping[str, A if not tool_name: raise ServiceInvalidResponseError("Bedrock response missing required tool name in toolUse block.") contents.append( - FunctionCallContent( + Content.from_function_call( call_id=tool_use.get("toolUseId") or self._generate_tool_call_id(), name=tool_name, arguments=tool_use.get("input"), @@ -626,10 +626,10 @@ def _parse_message_contents(self, content_blocks: Sequence[MutableMapping[str, A exception = RuntimeError(f"Bedrock tool result status: {status}") result_value = self._convert_bedrock_tool_result_to_value(tool_result.get("content")) contents.append( - FunctionResultContent( + Content.from_function_result( call_id=tool_result.get("toolUseId") or self._generate_tool_call_id(), result=result_value, - exception=exception, + exception=str(exception) if exception else None, # type: ignore[arg-type] raw_representation=block, ) ) diff --git a/python/packages/bedrock/pyproject.toml b/python/packages/bedrock/pyproject.toml index d94035dd53..f8280a82a9 100644 --- a/python/packages/bedrock/pyproject.toml +++ b/python/packages/bedrock/pyproject.toml @@ -4,7 +4,7 @@ description = "Amazon Bedrock integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/bedrock/tests/test_bedrock_client.py b/python/packages/bedrock/tests/test_bedrock_client.py index 5842426483..704eb2138a 100644 --- a/python/packages/bedrock/tests/test_bedrock_client.py +++ b/python/packages/bedrock/tests/test_bedrock_client.py @@ -6,7 +6,7 @@ from typing import Any import pytest -from agent_framework import ChatMessage, Role, TextContent +from agent_framework import ChatMessage, Content, Role from agent_framework.exceptions import ServiceInitializationError from agent_framework_bedrock import BedrockChatClient @@ -42,8 +42,8 @@ def test_get_response_invokes_bedrock_runtime() -> None: ) messages = [ - ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="You are concise.")]), - ChatMessage(role=Role.USER, contents=[TextContent(text="hello")]), + ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="You are concise.")]), + ChatMessage(role=Role.USER, contents=[Content.from_text(text="hello")]), ] response = asyncio.run(client.get_response(messages=messages, options={"max_tokens": 32})) @@ -53,7 +53,7 @@ def test_get_response_invokes_bedrock_runtime() -> None: assert payload["modelId"] == "amazon.titan-text" assert payload["messages"][0]["content"][0]["text"] == "hello" assert response.messages[0].contents[0].text == "Bedrock says hi" - assert response.usage_details and response.usage_details.input_token_count == 10 + assert response.usage_details and response.usage_details["input_token_count"] == 10 def test_build_request_requires_non_system_messages() -> None: @@ -63,7 +63,7 @@ def test_build_request_requires_non_system_messages() -> None: client=_StubBedrockRuntime(), ) - messages = [ChatMessage(role=Role.SYSTEM, contents=[TextContent(text="Only system text")])] + messages = [ChatMessage(role=Role.SYSTEM, contents=[Content.from_text(text="Only system text")])] with pytest.raises(ServiceInitializationError): client._prepare_options(messages, {}) diff --git a/python/packages/bedrock/tests/test_bedrock_settings.py b/python/packages/bedrock/tests/test_bedrock_settings.py index 1924c750c6..07898303de 100644 --- a/python/packages/bedrock/tests/test_bedrock_settings.py +++ b/python/packages/bedrock/tests/test_bedrock_settings.py @@ -9,10 +9,8 @@ AIFunction, ChatMessage, ChatOptions, - FunctionCallContent, - FunctionResultContent, + Content, Role, - TextContent, ) from pydantic import BaseModel @@ -49,7 +47,7 @@ def test_build_request_includes_tool_config() -> None: "tools": [tool], "tool_choice": {"mode": "required", "required_function_name": "get_weather"}, } - messages = [ChatMessage(role=Role.USER, contents=[TextContent(text="hi")])] + messages = [ChatMessage(role=Role.USER, contents=[Content.from_text(text="hi")])] request = client._prepare_options(messages, options) @@ -61,14 +59,16 @@ def test_build_request_serializes_tool_history() -> None: client = _build_client() options: ChatOptions = {} messages = [ - ChatMessage(role=Role.USER, contents=[TextContent(text="how's weather?")]), + ChatMessage(role=Role.USER, contents=[Content.from_text(text="how's weather?")]), ChatMessage( role=Role.ASSISTANT, - contents=[FunctionCallContent(call_id="call-1", name="get_weather", arguments='{"location": "SEA"}')], + contents=[ + Content.from_function_call(call_id="call-1", name="get_weather", arguments='{"location": "SEA"}') + ], ), ChatMessage( role=Role.TOOL, - contents=[FunctionResultContent(call_id="call-1", result={"answer": "72F"})], + contents=[Content.from_function_result(call_id="call-1", result={"answer": "72F"})], ), ] @@ -101,9 +101,9 @@ def test_process_response_parses_tool_use_and_result() -> None: chat_response = client._process_converse_response(response) contents = chat_response.messages[0].contents - assert isinstance(contents[0], FunctionCallContent) + assert contents[0].type == "function_call" assert contents[0].name == "get_weather" - assert isinstance(contents[1], TextContent) + assert contents[1].type == "text" assert chat_response.finish_reason == client._map_finish_reason("tool_use") @@ -131,5 +131,5 @@ def test_process_response_parses_tool_result() -> None: chat_response = client._process_converse_response(response) contents = chat_response.messages[0].contents - assert isinstance(contents[0], FunctionResultContent) + assert contents[0].type == "function_result" assert contents[0].result == {"answer": 42} diff --git a/python/packages/chatkit/agent_framework_chatkit/_converter.py b/python/packages/chatkit/agent_framework_chatkit/_converter.py index 252ac8a753..b83fd40812 100644 --- a/python/packages/chatkit/agent_framework_chatkit/_converter.py +++ b/python/packages/chatkit/agent_framework_chatkit/_converter.py @@ -8,12 +8,8 @@ from agent_framework import ( ChatMessage, - DataContent, - FunctionCallContent, - FunctionResultContent, + Content, Role, - TextContent, - UriContent, ) from chatkit.types import ( AssistantMessageItem, @@ -91,8 +87,8 @@ async def user_message_to_input( if isinstance(content_part, UserMessageTextContent): text_content += content_part.text - # Convert attachments to DataContent or UriContent - data_contents: list[DataContent | UriContent] = [] + # Convert attachments to Content + data_contents: list[Content] = [] if item.attachments: for attachment in item.attachments: content = await self.attachment_to_message_content(attachment) @@ -108,9 +104,9 @@ async def user_message_to_input( user_message = ChatMessage(role=Role.USER, text=text_content.strip()) else: # Build contents list with both text and attachments - contents: list[TextContent | DataContent | UriContent] = [] + contents: list[Content] = [] if text_content.strip(): - contents.append(TextContent(text=text_content.strip())) + contents.append(Content.from_text(text=text_content.strip())) contents.extend(data_contents) user_message = ChatMessage(role=Role.USER, contents=contents) @@ -126,7 +122,7 @@ async def user_message_to_input( return messages - async def attachment_to_message_content(self, attachment: Attachment) -> DataContent | UriContent | None: + async def attachment_to_message_content(self, attachment: Attachment) -> Content | None: """Convert a ChatKit attachment to Agent Framework content. This method is called internally by `user_message_to_input()` to handle attachments. @@ -169,14 +165,14 @@ async def fetch_data(attachment_id: str) -> bytes: if self.attachment_data_fetcher is not None: try: data = await self.attachment_data_fetcher(attachment.id) - return DataContent(data=data, media_type=attachment.mime_type) + return Content.from_data(data=data, media_type=attachment.mime_type) except Exception as e: # If fetch fails, fall through to URL-based approach logger.debug(f"Failed to fetch attachment data for {attachment.id}: {e}") # For ImageAttachment, try to use preview_url if isinstance(attachment, ImageAttachment) and attachment.preview_url: - return UriContent(uri=str(attachment.preview_url), media_type=attachment.mime_type) + return Content.from_uri(uri=str(attachment.preview_url), media_type=attachment.mime_type) # For FileAttachment without data fetcher, skip the attachment # Subclasses can override this method to provide custom handling @@ -220,7 +216,7 @@ def hidden_context_to_input( """ return ChatMessage(role=Role.SYSTEM, text=f"{item.content}") - def tag_to_message_content(self, tag: UserMessageTagContent) -> TextContent: + def tag_to_message_content(self, tag: UserMessageTagContent) -> Content: """Convert a ChatKit tag (@-mention) to Agent Framework content. This method is called internally by `user_message_to_input()` to handle tags. @@ -248,10 +244,10 @@ def tag_to_message_content(self, tag: UserMessageTagContent) -> TextContent: type="input_tag", id="tag_1", text="john", data={"name": "John Doe"}, interactive=False ) content = converter.tag_to_message_content(tag) - # Returns: TextContent(text="Name:John Doe") + # Returns: Content.from_text(text="Name:John Doe") """ name = getattr(tag.data, "name", tag.text if hasattr(tag, "text") else "unknown") - return TextContent(text=f"Name:{name}") + return Content.from_text(text=f"Name:{name}") def task_to_input(self, item: TaskItem) -> ChatMessage | list[ChatMessage] | None: """Convert a ChatKit TaskItem to Agent Framework ChatMessage(s). @@ -448,7 +444,7 @@ async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessa function_call_msg = ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id=item.call_id, name=item.name, arguments=json.dumps(item.arguments), @@ -460,7 +456,7 @@ async def client_tool_call_to_input(self, item: ClientToolCallItem) -> ChatMessa function_result_msg = ChatMessage( role=Role.TOOL, contents=[ - FunctionResultContent( + Content.from_function_result( call_id=item.call_id, result=json.dumps(item.output) if item.output is not None else "", ) diff --git a/python/packages/chatkit/agent_framework_chatkit/_streaming.py b/python/packages/chatkit/agent_framework_chatkit/_streaming.py index b0273c5944..df44fa005d 100644 --- a/python/packages/chatkit/agent_framework_chatkit/_streaming.py +++ b/python/packages/chatkit/agent_framework_chatkit/_streaming.py @@ -6,7 +6,7 @@ from collections.abc import AsyncIterable, AsyncIterator, Callable from datetime import datetime -from agent_framework import AgentResponseUpdate, TextContent +from agent_framework import AgentResponseUpdate from chatkit.types import ( AssistantMessageContent, AssistantMessageContentPartTextDelta, @@ -77,7 +77,7 @@ def _default_id_generator(item_type: str) -> str: if update.contents: for content in update.contents: # Handle text content - only TextContent has a text attribute - if isinstance(content, TextContent) and content.text is not None: + if content.type == "text" and content.text is not None: # Yield incremental text delta for streaming display yield ThreadItemUpdated( type="thread.item.updated", diff --git a/python/packages/chatkit/pyproject.toml b/python/packages/chatkit/pyproject.toml index 8621411503..bbc80e0d44 100644 --- a/python/packages/chatkit/pyproject.toml +++ b/python/packages/chatkit/pyproject.toml @@ -4,7 +4,7 @@ description = "OpenAI ChatKit integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/chatkit/tests/test_converter.py b/python/packages/chatkit/tests/test_converter.py index 457017f647..b75139bf58 100644 --- a/python/packages/chatkit/tests/test_converter.py +++ b/python/packages/chatkit/tests/test_converter.py @@ -5,7 +5,7 @@ from unittest.mock import Mock import pytest -from agent_framework import ChatMessage, Role, TextContent +from agent_framework import ChatMessage, Role from chatkit.types import UserMessageTextContent from agent_framework_chatkit import ThreadItemConverter, simple_to_agent_input @@ -133,7 +133,7 @@ def test_tag_to_message_content(self, converter): ) result = converter.tag_to_message_content(tag) - assert isinstance(result, TextContent) + assert result.type == "text" # Since data is a dict, getattr won't work, so it will fall back to text assert result.text == "Name:john" @@ -150,7 +150,7 @@ def test_tag_to_message_content_no_name(self, converter): ) result = converter.tag_to_message_content(tag) - assert isinstance(result, TextContent) + assert result.type == "text" assert result.text == "Name:jane" async def test_attachment_to_message_content_file_without_fetcher(self, converter): @@ -169,7 +169,6 @@ async def test_attachment_to_message_content_file_without_fetcher(self, converte async def test_attachment_to_message_content_image_with_preview_url(self, converter): """Test that ImageAttachment with preview_url creates UriContent.""" - from agent_framework import UriContent from chatkit.types import ImageAttachment attachment = ImageAttachment( @@ -181,13 +180,12 @@ async def test_attachment_to_message_content_image_with_preview_url(self, conver ) result = await converter.attachment_to_message_content(attachment) - assert isinstance(result, UriContent) + assert result.type == "uri" assert result.uri == "https://example.com/photo.jpg" assert result.media_type == "image/jpeg" async def test_attachment_to_message_content_with_data_fetcher(self): """Test attachment conversion with data fetcher.""" - from agent_framework import DataContent from chatkit.types import FileAttachment # Mock data fetcher @@ -204,14 +202,13 @@ async def fetch_data(attachment_id: str) -> bytes: ) result = await converter.attachment_to_message_content(attachment) - assert isinstance(result, DataContent) + assert result.type == "data" assert result.media_type == "application/pdf" async def test_to_agent_input_with_image_attachment(self): """Test converting user message with text and image attachment.""" from datetime import datetime - from agent_framework import UriContent from chatkit.types import ImageAttachment, UserMessageItem attachment = ImageAttachment( @@ -241,11 +238,11 @@ async def test_to_agent_input_with_image_attachment(self): assert len(message.contents) == 2 # First content should be text - assert isinstance(message.contents[0], TextContent) + assert message.contents[0].type == "text" assert message.contents[0].text == "Check out this photo!" # Second content should be UriContent for the image - assert isinstance(message.contents[1], UriContent) + assert message.contents[1].type == "uri" assert message.contents[1].uri == "https://example.com/photo.jpg" assert message.contents[1].media_type == "image/jpeg" @@ -253,7 +250,6 @@ async def test_to_agent_input_with_file_attachment_and_fetcher(self): """Test converting user message with file attachment using data fetcher.""" from datetime import datetime - from agent_framework import DataContent from chatkit.types import FileAttachment, UserMessageItem attachment = FileAttachment( @@ -285,10 +281,10 @@ async def fetch_data(attachment_id: str) -> bytes: assert len(message.contents) == 2 # First content should be text - assert isinstance(message.contents[0], TextContent) + assert message.contents[0].type == "text" # Second content should be DataContent for the file - assert isinstance(message.contents[1], DataContent) + assert message.contents[1].type == "data" assert message.contents[1].media_type == "application/pdf" def test_task_to_input(self, converter): diff --git a/python/packages/chatkit/tests/test_streaming.py b/python/packages/chatkit/tests/test_streaming.py index ead7c5f33e..ff552d79e8 100644 --- a/python/packages/chatkit/tests/test_streaming.py +++ b/python/packages/chatkit/tests/test_streaming.py @@ -4,7 +4,7 @@ from unittest.mock import Mock -from agent_framework import AgentResponseUpdate, Role, TextContent +from agent_framework import AgentResponseUpdate, Content, Role from chatkit.types import ( ThreadItemAddedEvent, ThreadItemDoneEvent, @@ -34,7 +34,7 @@ async def test_stream_single_text_update(self): """Test streaming single text update.""" async def single_update_stream(): - yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[TextContent(text="Hello world")]) + yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="Hello world")]) events = [] async for event in stream_agent_response(single_update_stream(), thread_id="test_thread"): @@ -59,8 +59,8 @@ async def test_stream_multiple_text_updates(self): """Test streaming multiple text updates.""" async def multiple_updates_stream(): - yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[TextContent(text="Hello ")]) - yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[TextContent(text="world!")]) + yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="Hello ")]) + yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="world!")]) events = [] async for event in stream_agent_response(multiple_updates_stream(), thread_id="test_thread"): @@ -91,7 +91,7 @@ def custom_id_generator(item_type: str) -> str: return f"custom_{item_type}_123" async def single_update_stream(): - yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[TextContent(text="Test")]) + yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[Content.from_text(text="Test")]) events = [] async for event in stream_agent_response( @@ -125,9 +125,10 @@ async def empty_content_stream(): async def test_stream_non_text_content(self): """Test streaming updates with non-text content.""" # Mock a content object without text attribute - non_text_content = Mock() + non_text_content = Mock(spec=Content) + non_text_content.type = "image" # Don't set text attribute - del non_text_content.text + non_text_content.text = None async def non_text_stream(): yield AgentResponseUpdate(role=Role.ASSISTANT, contents=[non_text_content]) diff --git a/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py b/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py index 606e1e83b6..98d5a2b475 100644 --- a/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py +++ b/python/packages/copilotstudio/agent_framework_copilotstudio/_agent.py @@ -10,9 +10,10 @@ AgentThread, BaseAgent, ChatMessage, + Content, ContextProvider, Role, - TextContent, + normalize_messages, ) from agent_framework._pydantic import AFBaseSettings from agent_framework.exceptions import ServiceException, ServiceInitializationError @@ -237,7 +238,7 @@ async def run( thread = self.get_new_thread() thread.service_thread_id = await self._start_new_conversation() - input_messages = self._normalize_messages(messages) + input_messages = normalize_messages(messages) question = "\n".join([message.text for message in input_messages]) @@ -278,7 +279,7 @@ async def run_stream( thread = self.get_new_thread() thread.service_thread_id = await self._start_new_conversation() - input_messages = self._normalize_messages(messages) + input_messages = normalize_messages(messages) question = "\n".join([message.text for message in input_messages]) @@ -331,7 +332,7 @@ async def _process_activities(self, activities: AsyncIterable[Any], streaming: b ): yield ChatMessage( role=Role.ASSISTANT, - contents=[TextContent(activity.text)], + contents=[Content.from_text(activity.text)], author_name=activity.from_property.name if activity.from_property else None, message_id=activity.id, raw_representation=activity, diff --git a/python/packages/copilotstudio/pyproject.toml b/python/packages/copilotstudio/pyproject.toml index 83e1202016..a30e08afc4 100644 --- a/python/packages/copilotstudio/pyproject.toml +++ b/python/packages/copilotstudio/pyproject.toml @@ -4,7 +4,7 @@ description = "Copilot Studio integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/copilotstudio/tests/test_copilot_agent.py b/python/packages/copilotstudio/tests/test_copilot_agent.py index 4777557d32..c4e2ff3e08 100644 --- a/python/packages/copilotstudio/tests/test_copilot_agent.py +++ b/python/packages/copilotstudio/tests/test_copilot_agent.py @@ -4,14 +4,7 @@ from unittest.mock import MagicMock, patch import pytest -from agent_framework import ( - AgentResponse, - AgentResponseUpdate, - AgentThread, - ChatMessage, - Role, - TextContent, -) +from agent_framework import AgentResponse, AgentResponseUpdate, AgentThread, ChatMessage, Content, Role from agent_framework.exceptions import ServiceException, ServiceInitializationError from microsoft_agents.copilotstudio.client import CopilotClient @@ -136,7 +129,7 @@ async def test_run_with_string_message(self, mock_copilot_client: MagicMock, moc assert isinstance(response, AgentResponse) assert len(response.messages) == 1 content = response.messages[0].contents[0] - assert isinstance(content, TextContent) + assert content.type == "text" assert content.text == "Test response" assert response.messages[0].role == Role.ASSISTANT @@ -150,13 +143,13 @@ async def test_run_with_chat_message(self, mock_copilot_client: MagicMock, mock_ mock_copilot_client.start_conversation.return_value = create_async_generator([conversation_activity]) mock_copilot_client.ask_question.return_value = create_async_generator([mock_activity]) - chat_message = ChatMessage(role=Role.USER, contents=[TextContent("test message")]) + chat_message = ChatMessage(role=Role.USER, contents=[Content.from_text("test message")]) response = await agent.run(chat_message) assert isinstance(response, AgentResponse) assert len(response.messages) == 1 content = response.messages[0].contents[0] - assert isinstance(content, TextContent) + assert content.type == "text" assert content.text == "Test response" assert response.messages[0].role == Role.ASSISTANT @@ -206,7 +199,7 @@ async def test_run_stream_with_string_message(self, mock_copilot_client: MagicMo async for response in agent.run_stream("test message"): assert isinstance(response, AgentResponseUpdate) content = response.contents[0] - assert isinstance(content, TextContent) + assert content.type == "text" assert content.text == "Streaming response" response_count += 1 @@ -233,7 +226,7 @@ async def test_run_stream_with_thread(self, mock_copilot_client: MagicMock) -> N async for response in agent.run_stream("test message", thread=thread): assert isinstance(response, AgentResponseUpdate) content = response.contents[0] - assert isinstance(content, TextContent) + assert content.type == "text" assert content.text == "Streaming response" response_count += 1 diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py index 628ac7fb17..2092ebcb32 100644 --- a/python/packages/core/agent_framework/_agents.py +++ b/python/packages/core/agent_framework/_agents.py @@ -38,7 +38,7 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - Role, + normalize_messages, ) from .exceptions import AgentExecutionException, AgentInitializationError from .observability import use_agent_instrumentation @@ -89,8 +89,10 @@ def _merge_options(base: dict[str, Any], override: dict[str, Any]) -> dict[str, if value is None: continue if key == "tools" and result.get("tools"): - # Combine tool lists - result["tools"] = list(result["tools"]) + list(value) + # Combine tool lists, avoiding duplicates by name + existing_names = {getattr(t, "name", None) for t in result["tools"]} + unique_new = [t for t in value if getattr(t, "name", None) not in existing_names] + result["tools"] = list(result["tools"]) + unique_new elif key == "logit_bias" and result.get("logit_bias"): # Merge logit_bias dicts result["logit_bias"] = {**result["logit_bias"], **value} @@ -470,8 +472,8 @@ async def agent_wrapper(**kwargs: Any) -> str: # Extract the input from kwargs using the specified arg_name input_text = kwargs.get(arg_name, "") - # Forward all kwargs except the arg_name to support runtime context propagation - forwarded_kwargs = {k: v for k, v in kwargs.items() if k != arg_name} + # Forward runtime context kwargs, excluding arg_name and conversation_id. + forwarded_kwargs = {k: v for k, v in kwargs.items() if k not in (arg_name, "conversation_id")} if stream_callback is None: # Use non-streaming mode @@ -498,21 +500,6 @@ async def agent_wrapper(**kwargs: Any) -> str: agent_tool._forward_runtime_kwargs = True # type: ignore return agent_tool - def _normalize_messages( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - ) -> list[ChatMessage]: - if messages is None: - return [] - - if isinstance(messages, str): - return [ChatMessage(role=Role.USER, text=messages)] - - if isinstance(messages, ChatMessage): - return [messages] - - return [ChatMessage(role=Role.USER, text=msg) if isinstance(msg, str) else msg for msg in messages] - # region ChatAgent @@ -797,7 +784,7 @@ async def run( # Get tools from options or named parameter (named param takes precedence) tools_ = tools if tools is not None else opts.pop("tools", None) - input_messages = self._normalize_messages(messages) + input_messages = normalize_messages(messages) thread, run_chat_options, thread_messages = await self._prepare_thread_and_messages( thread=thread, input_messages=input_messages, **kwargs ) @@ -925,7 +912,7 @@ async def run_stream( # Get tools from options or named parameter (named param takes precedence) tools_ = tools if tools is not None else opts.pop("tools", None) - input_messages = self._normalize_messages(messages) + input_messages = normalize_messages(messages) thread, run_chat_options, thread_messages = await self._prepare_thread_and_messages( thread=thread, input_messages=input_messages, **kwargs ) @@ -1155,9 +1142,9 @@ async def _call_tool( # type: ignore # Convert result to MCP content if isinstance(result, str): - return [types.TextContent(type="text", text=result)] + return [types.TextContent(type="text", text=result)] # type: ignore[attr-defined] - return [types.TextContent(type="text", text=str(result))] + return [types.TextContent(type="text", text=str(result))] # type: ignore[attr-defined] @server.set_logging_level() # type: ignore async def _set_logging_level(level: types.LoggingLevel) -> None: # type: ignore diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index f48e8af86a..12e975df6c 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -377,7 +377,7 @@ def service_url(self) -> str: """ return "Unknown" - def create_agent( + def as_agent( self, *, id: str | None = None, @@ -428,7 +428,7 @@ def create_agent( client = OpenAIChatClient(model_id="gpt-4") # Create an agent using the convenience method - agent = client.create_agent( + agent = client.as_agent( name="assistant", instructions="You are a helpful assistant.", default_options={"temperature": 0.7, "max_tokens": 500}, diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py index 3a6d5b818c..333af611c1 100644 --- a/python/packages/core/agent_framework/_mcp.py +++ b/python/packages/core/agent_framework/_mcp.py @@ -1,5 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +import asyncio +import base64 import logging import re import sys @@ -29,13 +31,8 @@ ) from ._types import ( ChatMessage, - Contents, - DataContent, - FunctionCallContent, - FunctionResultContent, + Content, Role, - TextContent, - UriContent, ) from .exceptions import ToolException, ToolExecutionException @@ -82,7 +79,7 @@ def _parse_message_from_mcp( def _parse_contents_from_mcp_tool_result( mcp_type: types.CallToolResult, -) -> list[Contents]: +) -> list[Content]: """Parse an MCP CallToolResult into Agent Framework content types. This function extracts the complete _meta field from CallToolResult objects @@ -147,25 +144,27 @@ def _parse_content_from_mcp( | types.ToolUseContent | types.ToolResultContent ], -) -> list[Contents]: +) -> list[Content]: """Parse an MCP type into an Agent Framework type.""" mcp_types = mcp_type if isinstance(mcp_type, Sequence) else [mcp_type] - return_types: list[Contents] = [] + return_types: list[Content] = [] for mcp_type in mcp_types: match mcp_type: case types.TextContent(): - return_types.append(TextContent(text=mcp_type.text, raw_representation=mcp_type)) + return_types.append(Content.from_text(text=mcp_type.text, raw_representation=mcp_type)) case types.ImageContent() | types.AudioContent(): + # MCP protocol uses base64-encoded strings, convert to bytes + data_bytes = base64.b64decode(mcp_type.data) if isinstance(mcp_type.data, str) else mcp_type.data return_types.append( - DataContent( - data=mcp_type.data, + Content.from_data( + data=data_bytes, media_type=mcp_type.mimeType, raw_representation=mcp_type, ) ) case types.ResourceLink(): return_types.append( - UriContent( + Content.from_uri( uri=str(mcp_type.uri), media_type=mcp_type.mimeType or "application/json", raw_representation=mcp_type, @@ -173,7 +172,7 @@ def _parse_content_from_mcp( ) case types.ToolUseContent(): return_types.append( - FunctionCallContent( + Content.from_function_call( call_id=mcp_type.id, name=mcp_type.name, arguments=mcp_type.input, @@ -182,12 +181,12 @@ def _parse_content_from_mcp( ) case types.ToolResultContent(): return_types.append( - FunctionResultContent( + Content.from_function_result( call_id=mcp_type.toolUseId, result=_parse_content_from_mcp(mcp_type.content) if mcp_type.content else mcp_type.structuredContent, - exception=Exception() if mcp_type.isError else None, + exception=str(Exception()) if mcp_type.isError else None, # type: ignore[arg-type] raw_representation=mcp_type, ) ) @@ -195,7 +194,7 @@ def _parse_content_from_mcp( match mcp_type.resource: case types.TextResourceContents(): return_types.append( - TextContent( + Content.from_text( text=mcp_type.resource.text, raw_representation=mcp_type, additional_properties=( @@ -205,7 +204,7 @@ def _parse_content_from_mcp( ) case types.BlobResourceContents(): return_types.append( - DataContent( + Content.from_uri( uri=mcp_type.resource.blob, media_type=mcp_type.resource.mimeType, raw_representation=mcp_type, @@ -218,45 +217,41 @@ def _parse_content_from_mcp( def _prepare_content_for_mcp( - content: Contents, + content: Content, ) -> types.TextContent | types.ImageContent | types.AudioContent | types.EmbeddedResource | types.ResourceLink | None: """Prepare an Agent Framework content type for MCP.""" - match content: - case TextContent(): - return types.TextContent(type="text", text=content.text) - case DataContent(): - if content.media_type and content.media_type.startswith("image/"): - return types.ImageContent(type="image", data=content.uri, mimeType=content.media_type) - if content.media_type and content.media_type.startswith("audio/"): - return types.AudioContent(type="audio", data=content.uri, mimeType=content.media_type) - if content.media_type and content.media_type.startswith("application/"): - return types.EmbeddedResource( - type="resource", - resource=types.BlobResourceContents( - blob=content.uri, - mimeType=content.media_type, - # uri's are not limited in MCP but they have to be set. - # the uri of data content, contains the data uri, which - # is not the uri meant here, UriContent would match this. - uri=( - content.additional_properties.get("uri", "af://binary") - if content.additional_properties - else "af://binary" - ), # type: ignore[reportArgumentType] - ), - ) - return None - case UriContent(): - return types.ResourceLink( - type="resource_link", - uri=content.uri, # type: ignore[reportArgumentType] - mimeType=content.media_type, - name=( - content.additional_properties.get("name", "Unknown") if content.additional_properties else "Unknown" + if content.type == "text": + return types.TextContent(type="text", text=content.text) # type: ignore[attr-defined] + if content.type == "data": + if content.media_type and content.media_type.startswith("image/"): # type: ignore[attr-defined] + return types.ImageContent(type="image", data=content.uri, mimeType=content.media_type) # type: ignore[attr-defined] + if content.media_type and content.media_type.startswith("audio/"): # type: ignore[attr-defined] + return types.AudioContent(type="audio", data=content.uri, mimeType=content.media_type) # type: ignore[attr-defined] + if content.media_type and content.media_type.startswith("application/"): # type: ignore[attr-defined] + return types.EmbeddedResource( + type="resource", + resource=types.BlobResourceContents( + blob=content.uri, # type: ignore[attr-defined] + mimeType=content.media_type, # type: ignore[attr-defined] + # uri's are not limited in MCP but they have to be set. + # the uri of data content, contains the data uri, which + # is not the uri meant here, UriContent would match this. + uri=( + content.additional_properties.get("uri", "af://binary") + if content.additional_properties + else "af://binary" + ), # type: ignore[reportArgumentType] ), ) - case _: - return None + return None + if content.type == "uri": + return types.ResourceLink( + type="resource_link", + uri=content.uri, # type: ignore[reportArgumentType,attr-defined] + mimeType=content.media_type, # type: ignore[attr-defined] + name=(content.additional_properties.get("name", "Unknown") if content.additional_properties else "Unknown"), + ) + return None def _prepare_message_for_mcp( @@ -376,6 +371,38 @@ def functions(self) -> list[AIFunction[Any, Any]]: return self._functions return [func for func in self._functions if func.name in self.allowed_tools] + async def _safe_close_exit_stack(self) -> None: + """Safely close the exit stack, handling cross-task boundary errors. + + anyio's cancel scopes are bound to the task they were created in. + If aclose() is called from a different task (e.g., during streaming reconnection), + anyio will raise a RuntimeError or CancelledError. In this case, we log a warning + and allow garbage collection to clean up the resources. + + Known error variants: + - "Attempted to exit cancel scope in a different task than it was entered in" + - "Attempted to exit a cancel scope that isn't the current task's current cancel scope" + - CancelledError from anyio cancel scope cleanup + """ + try: + await self._exit_stack.aclose() + except RuntimeError as e: + error_msg = str(e).lower() + # Check for anyio cancel scope errors (multiple variants exist) + if "cancel scope" in error_msg: + logger.warning( + "Could not cleanly close MCP exit stack due to cancel scope error. " + "Old resources will be garbage collected. Error: %s", + e, + ) + else: + raise + except asyncio.CancelledError: + # CancelledError can occur during cleanup when cancel scopes are involved + logger.warning( + "Could not cleanly close MCP exit stack due to cancellation. Old resources will be garbage collected." + ) + async def connect(self, *, reset: bool = False) -> None: """Connect to the MCP server. @@ -389,7 +416,7 @@ async def connect(self, *, reset: bool = False) -> None: ToolException: If connection or session initialization fails. """ if reset: - await self._exit_stack.aclose() + await self._safe_close_exit_stack() self.session = None self.is_connected = False self._exit_stack = AsyncExitStack() @@ -397,7 +424,7 @@ async def connect(self, *, reset: bool = False) -> None: try: transport = await self._exit_stack.enter_async_context(self.get_mcp_client()) except Exception as ex: - await self._exit_stack.aclose() + await self._safe_close_exit_stack() command = getattr(self, "command", None) if command: error_msg = f"Failed to start MCP server '{command}': {ex}" @@ -418,7 +445,7 @@ async def connect(self, *, reset: bool = False) -> None: ) ) except Exception as ex: - await self._exit_stack.aclose() + await self._safe_close_exit_stack() raise ToolException( message="Failed to create MCP session. Please check your configuration.", inner_exception=ex, @@ -426,7 +453,7 @@ async def connect(self, *, reset: bool = False) -> None: try: await session.initialize() except Exception as ex: - await self._exit_stack.aclose() + await self._safe_close_exit_stack() # Provide context about initialization failure command = getattr(self, "command", None) if command: @@ -650,7 +677,7 @@ async def load_tools(self) -> None: input_model = _get_input_model_from_mcp_tool(tool) approval_mode = self._determine_approval_mode(local_name) # Create AIFunctions out of each tool - func: AIFunction[BaseModel, list[Contents] | Any | types.CallToolResult] = AIFunction( + func: AIFunction[BaseModel, list[Content] | Any | types.CallToolResult] = AIFunction( func=partial(self.call_tool, tool.name), name=local_name, description=tool.description or "", @@ -670,7 +697,7 @@ async def close(self) -> None: Closes the connection and cleans up resources. """ - await self._exit_stack.aclose() + await self._safe_close_exit_stack() self.session = None self.is_connected = False @@ -704,7 +731,7 @@ async def _ensure_connected(self) -> None: inner_exception=ex, ) from ex - async def call_tool(self, tool_name: str, **kwargs: Any) -> list[Contents] | Any | types.CallToolResult: + async def call_tool(self, tool_name: str, **kwargs: Any) -> list[Content] | Any | types.CallToolResult: """Call a tool with the given arguments. Args: @@ -727,8 +754,12 @@ async def call_tool(self, tool_name: str, **kwargs: Any) -> list[Contents] | Any # Filter out framework kwargs that cannot be serialized by the MCP SDK. # These are internal objects passed through the function invocation pipeline # that should not be forwarded to external MCP servers. + # conversation_id is an internal tracking ID used by services like Azure AI. + # options contains metadata/store used by AG-UI for Azure AI client requirements. filtered_kwargs = { - k: v for k, v in kwargs.items() if k not in {"chat_options", "tools", "tool_choice", "thread"} + k: v + for k, v in kwargs.items() + if k not in {"chat_options", "tools", "tool_choice", "thread", "conversation_id", "options"} } # Try the operation, reconnecting once if the connection is closed @@ -842,7 +873,7 @@ async def __aenter__(self) -> Self: except ToolException: raise except Exception as ex: - await self._exit_stack.aclose() + await self._safe_close_exit_stack() raise ToolExecutionException("Failed to enter context manager.", inner_exception=ex) from ex async def __aexit__( diff --git a/python/packages/core/agent_framework/_middleware.py b/python/packages/core/agent_framework/_middleware.py index 0e26565c5a..aeafd91ac6 100644 --- a/python/packages/core/agent_framework/_middleware.py +++ b/python/packages/core/agent_framework/_middleware.py @@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeAlias, TypedDict, TypeVar from ._serialization import SerializationMixin -from ._types import AgentResponse, AgentResponseUpdate, ChatMessage, prepare_messages +from ._types import AgentResponse, AgentResponseUpdate, ChatMessage, normalize_messages, prepare_messages from .exceptions import MiddlewareException if TYPE_CHECKING: @@ -1225,7 +1225,7 @@ async def middleware_enabled_run( if chat_middlewares: kwargs["middleware"] = chat_middlewares - normalized_messages = self._normalize_messages(messages) + normalized_messages = normalize_messages(messages) # Execute with middleware if available if agent_pipeline.has_middlewares: @@ -1273,7 +1273,7 @@ def middleware_enabled_run_stream( if chat_middlewares: kwargs["middleware"] = chat_middlewares - normalized_messages = self._normalize_messages(messages) + normalized_messages = normalize_messages(messages) # Execute with middleware if available if agent_pipeline.has_middlewares: diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index a0d0a13dc2..60d8783b08 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -54,9 +54,7 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - Contents, - FunctionApprovalResponseContent, - FunctionCallContent, + Content, ) from typing import overload @@ -104,15 +102,15 @@ def record(self, *args: Any, **kwargs: Any) -> None: # pragma: no cover - trivi def _parse_inputs( - inputs: "Contents | dict[str, Any] | str | list[Contents | dict[str, Any] | str] | None", -) -> list["Contents"]: - """Parse the inputs for a tool, ensuring they are of type Contents. + inputs: "Content | dict[str, Any] | str | list[Content | dict[str, Any] | str] | None", +) -> list["Content"]: + """Parse the inputs for a tool, ensuring they are of type Content. Args: - inputs: The inputs to parse. Can be a single item or list of Contents, dicts, or strings. + inputs: The inputs to parse. Can be a single item or list of Content, dicts, or strings. Returns: - A list of Contents objects. + A list of Content objects. Raises: ValueError: If an unsupported input type is encountered. @@ -122,43 +120,39 @@ def _parse_inputs( return [] from ._types import ( - BaseContent, - DataContent, - HostedFileContent, - HostedVectorStoreContent, - UriContent, + Content, ) - parsed_inputs: list["Contents"] = [] + parsed_inputs: list["Content"] = [] if not isinstance(inputs, list): inputs = [inputs] for input_item in inputs: if isinstance(input_item, str): # If it's a string, we assume it's a URI or similar identifier. # Convert it to a UriContent or similar type as needed. - parsed_inputs.append(UriContent(uri=input_item, media_type="text/plain")) + parsed_inputs.append(Content.from_uri(uri=input_item, media_type="text/plain")) elif isinstance(input_item, dict): # If it's a dict, we assume it contains properties for a specific content type. # we check if the required keys are present to determine the type. # for instance, if it has "uri" and "media_type", we treat it as UriContent. - # if is only has uri, then we treat it as DataContent. + # if it only has uri and media_type without a specific type indicator, we treat it as DataContent. # etc. if "uri" in input_item: - parsed_inputs.append( - UriContent(**input_item) if "media_type" in input_item else DataContent(**input_item) - ) + # Use Content.from_uri for proper URI content, DataContent for backwards compatibility + parsed_inputs.append(Content.from_uri(**input_item)) elif "file_id" in input_item: - parsed_inputs.append(HostedFileContent(**input_item)) + parsed_inputs.append(Content.from_hosted_file(**input_item)) elif "vector_store_id" in input_item: - parsed_inputs.append(HostedVectorStoreContent(**input_item)) + parsed_inputs.append(Content.from_hosted_vector_store(**input_item)) elif "data" in input_item: - parsed_inputs.append(DataContent(**input_item)) + # DataContent helper handles both uri and data parameters + parsed_inputs.append(Content.from_data(**input_item)) else: raise ValueError(f"Unsupported input type: {input_item}") - elif isinstance(input_item, BaseContent): + elif isinstance(input_item, Content): parsed_inputs.append(input_item) else: - raise TypeError(f"Unsupported input type: {type(input_item).__name__}. Expected Contents or dict.") + raise TypeError(f"Unsupported input type: {type(input_item).__name__}. Expected Content or dict.") return parsed_inputs @@ -254,7 +248,7 @@ class HostedCodeInterpreterTool(BaseTool): def __init__( self, *, - inputs: "Contents | dict[str, Any] | str | list[Contents | dict[str, Any] | str] | None" = None, + inputs: "Content | dict[str, Any] | str | list[Content | dict[str, Any] | str] | None" = None, description: str | None = None, additional_properties: dict[str, Any] | None = None, **kwargs: Any, @@ -266,8 +260,8 @@ def __init__( This should mostly be HostedFileContent or HostedVectorStoreContent. Can also be DataContent, depending on the service used. When supplying a list, it can contain: - - Contents instances - - dicts with properties for Contents (e.g., {"uri": "http://example.com", "media_type": "text/html"}) + - Content instances + - dicts with properties for Content (e.g., {"uri": "http://example.com", "media_type": "text/html"}) - strings (which will be converted to UriContent with media_type "text/plain"). If None, defaults to an empty list. description: A description of the tool. @@ -503,7 +497,7 @@ class HostedFileSearchTool(BaseTool): def __init__( self, *, - inputs: "Contents | dict[str, Any] | str | list[Contents | dict[str, Any] | str] | None" = None, + inputs: "Content | dict[str, Any] | str | list[Content | dict[str, Any] | str] | None" = None, max_results: int | None = None, description: str | None = None, additional_properties: dict[str, Any] | None = None, @@ -515,8 +509,8 @@ def __init__( inputs: A list of contents that the tool can accept as input. Defaults to None. This should be one or more HostedVectorStoreContents. When supplying a list, it can contain: - - Contents instances - - dicts with properties for Contents (e.g., {"uri": "http://example.com", "media_type": "text/html"}) + - Content instances + - dicts with properties for Content (e.g., {"uri": "http://example.com", "media_type": "text/html"}) - strings (which will be converted to UriContent with media_type "text/plain"). If None, defaults to an empty list. max_results: The maximum number of results to return from the file search. @@ -1480,7 +1474,7 @@ class FunctionExecutionResult: __slots__ = ("content", "terminate") - def __init__(self, content: "Contents", terminate: bool = False) -> None: + def __init__(self, content: "Content", terminate: bool = False) -> None: """Initialize FunctionExecutionResult. Args: @@ -1492,7 +1486,7 @@ def __init__(self, content: "Contents", terminate: bool = False) -> None: async def _auto_invoke_function( - function_call_content: "FunctionCallContent | FunctionApprovalResponseContent", + function_call_content: "Content", custom_args: dict[str, Any] | None = None, *, config: FunctionInvocationConfiguration, @@ -1500,7 +1494,7 @@ async def _auto_invoke_function( sequence_index: int | None = None, request_index: int | None = None, middleware_pipeline: Any = None, # Optional MiddlewarePipeline -) -> "FunctionExecutionResult | Contents": +) -> "FunctionExecutionResult | Content": """Invoke a function call requested by the agent, applying middleware that is defined. Args: @@ -1516,49 +1510,51 @@ async def _auto_invoke_function( Returns: A FunctionExecutionResult wrapping the content and terminate signal, - or a Contents object for approval/hosted tool scenarios. + or a Content object for approval/hosted tool scenarios. Raises: KeyError: If the requested function is not found in the tool map. """ + from ._types import Content + # Note: The scenarios for approval_mode="always_require", declaration_only, and # terminate_on_unknown_calls are all handled in _try_execute_function_calls before # this function is called. This function only handles the actual execution of approved, # non-declaration-only functions. - from ._types import FunctionCallContent, FunctionResultContent tool: AIFunction[BaseModel, Any] | None = None if function_call_content.type == "function_call": - tool = tool_map.get(function_call_content.name) + tool = tool_map.get(function_call_content.name) # type: ignore[arg-type] # Tool should exist because _try_execute_function_calls validates this if tool is None: exc = KeyError(f'Function "{function_call_content.name}" not found.') return FunctionExecutionResult( - content=FunctionResultContent( - call_id=function_call_content.call_id, + content=Content.from_function_result( + call_id=function_call_content.call_id, # type: ignore[arg-type] result=f'Error: Requested function "{function_call_content.name}" not found.', - exception=exc, + exception=str(exc), # type: ignore[arg-type] ) ) else: # Note: Unapproved tools (approved=False) are handled in _replace_approval_contents_with_results # and never reach this function, so we only handle approved=True cases here. - inner_call = function_call_content.function_call - if not isinstance(inner_call, FunctionCallContent): + inner_call = function_call_content.function_call # type: ignore[attr-defined] + if inner_call.type != "function_call": # type: ignore[union-attr] return function_call_content - tool = tool_map.get(inner_call.name) + tool = tool_map.get(inner_call.name) # type: ignore[attr-defined, union-attr, arg-type] if tool is None: # we assume it is a hosted tool return function_call_content - function_call_content = inner_call + function_call_content = inner_call # type: ignore[assignment] parsed_args: dict[str, Any] = dict(function_call_content.parse_arguments() or {}) # Filter out internal framework kwargs before passing to tools. + # conversation_id is an internal tracking ID that should not be forwarded to tools. runtime_kwargs: dict[str, Any] = { key: value for key, value in (custom_args or {}).items() - if key not in {"_function_middleware_pipeline", "middleware"} + if key not in {"_function_middleware_pipeline", "middleware", "conversation_id"} } try: args = tool.input_model.model_validate(parsed_args) @@ -1567,7 +1563,11 @@ async def _auto_invoke_function( if config.include_detailed_errors: message = f"{message} Exception: {exc}" return FunctionExecutionResult( - content=FunctionResultContent(call_id=function_call_content.call_id, result=message, exception=exc) + content=Content.from_function_result( + call_id=function_call_content.call_id, # type: ignore[arg-type] + result=message, + exception=str(exc), # type: ignore[arg-type] + ) ) if not middleware_pipeline or ( @@ -1581,8 +1581,8 @@ async def _auto_invoke_function( **runtime_kwargs if getattr(tool, "_forward_runtime_kwargs", False) else {}, ) return FunctionExecutionResult( - content=FunctionResultContent( - call_id=function_call_content.call_id, + content=Content.from_function_result( + call_id=function_call_content.call_id, # type: ignore[arg-type] result=function_result, ) ) @@ -1591,7 +1591,11 @@ async def _auto_invoke_function( if config.include_detailed_errors: message = f"{message} Exception: {exc}" return FunctionExecutionResult( - content=FunctionResultContent(call_id=function_call_content.call_id, result=message, exception=exc) + content=Content.from_function_result( + call_id=function_call_content.call_id, # type: ignore[arg-type] + result=message, + exception=str(exc), + ) ) # Execute through middleware pipeline if available from ._middleware import FunctionInvocationContext @@ -1617,8 +1621,8 @@ async def final_function_handler(context_obj: Any) -> Any: final_handler=final_function_handler, ) return FunctionExecutionResult( - content=FunctionResultContent( - call_id=function_call_content.call_id, + content=Content.from_function_result( + call_id=function_call_content.call_id, # type: ignore[arg-type] result=function_result, ), terminate=middleware_context.terminate, @@ -1628,7 +1632,11 @@ async def final_function_handler(context_obj: Any) -> Any: if config.include_detailed_errors: message = f"{message} Exception: {exc}" return FunctionExecutionResult( - content=FunctionResultContent(call_id=function_call_content.call_id, result=message, exception=exc) + content=Content.from_function_result( + call_id=function_call_content.call_id, # type: ignore[arg-type] + result=message, + exception=str(exc), # type: ignore[arg-type] + ) ) @@ -1653,14 +1661,14 @@ def _get_tool_map( async def _try_execute_function_calls( custom_args: dict[str, Any], attempt_idx: int, - function_calls: Sequence["FunctionCallContent"] | Sequence["FunctionApprovalResponseContent"], + function_calls: Sequence["Content"], tools: "ToolProtocol \ | Callable[..., Any] \ | MutableMapping[str, Any] \ | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]]", config: FunctionInvocationConfiguration, middleware_pipeline: Any = None, # Optional MiddlewarePipeline to avoid circular imports -) -> tuple[Sequence["Contents"], bool]: +) -> tuple[Sequence["Content"], bool]: """Execute multiple function calls concurrently. Args: @@ -1673,15 +1681,20 @@ async def _try_execute_function_calls( Returns: A tuple of: - - A list of Contents containing the results of each function call, + - A list of Content containing the results of each function call, or the approval requests if any function requires approval, or the original function calls if any are declaration only. - A boolean indicating whether to terminate the function calling loop. """ - from ._types import FunctionApprovalRequestContent, FunctionCallContent + from ._types import Content tool_map = _get_tool_map(tools) approval_tools = [tool_name for tool_name, tool in tool_map.items() if tool.approval_mode == "always_require"] + logger.debug( + "_try_execute_function_calls: tool_map keys=%s, approval_tools=%s", + list(tool_map.keys()), + approval_tools, + ) declaration_only = [tool_name for tool_name, tool in tool_map.items() if tool.declaration_only] additional_tool_names = [tool.name for tool in config.additional_tools] if config.additional_tools else [] # check if any are calling functions that need approval @@ -1689,27 +1702,36 @@ async def _try_execute_function_calls( approval_needed = False declaration_only_flag = False for fcc in function_calls: - if isinstance(fcc, FunctionCallContent) and fcc.name in approval_tools: + fcc_name = getattr(fcc, "name", None) + logger.debug( + "Checking function call: type=%s, name=%s, in approval_tools=%s", + fcc.type, + fcc_name, + fcc_name in approval_tools, + ) + if fcc.type == "function_call" and fcc.name in approval_tools: # type: ignore[attr-defined] + logger.debug("Approval needed for function: %s", fcc.name) approval_needed = True break - if isinstance(fcc, FunctionCallContent) and (fcc.name in declaration_only or fcc.name in additional_tool_names): + if fcc.type == "function_call" and (fcc.name in declaration_only or fcc.name in additional_tool_names): # type: ignore[attr-defined] declaration_only_flag = True break - if config.terminate_on_unknown_calls and isinstance(fcc, FunctionCallContent) and fcc.name not in tool_map: - raise KeyError(f'Error: Requested function "{fcc.name}" not found.') + if config.terminate_on_unknown_calls and fcc.type == "function_call" and fcc.name not in tool_map: # type: ignore[attr-defined] + raise KeyError(f'Error: Requested function "{fcc.name}" not found.') # type: ignore[attr-defined] if approval_needed: - # approval can only be needed for Function Call Contents, not Approval Responses. + # approval can only be needed for Function Call Content, not Approval Responses. + logger.debug("Returning function_approval_request contents") return ( [ - FunctionApprovalRequestContent(id=fcc.call_id, function_call=fcc) + Content.from_function_approval_request(id=fcc.call_id, function_call=fcc) # type: ignore[attr-defined, arg-type] for fcc in function_calls - if isinstance(fcc, FunctionCallContent) + if fcc.type == "function_call" ], False, ) if declaration_only_flag: # return the declaration only tools to the user, since we cannot execute them. - return ([fcc for fcc in function_calls if isinstance(fcc, FunctionCallContent)], False) + return ([fcc for fcc in function_calls if fcc.type == "function_call"], False) # Run all function calls concurrently execution_results = await asyncio.gather(*[ @@ -1726,7 +1748,7 @@ async def _try_execute_function_calls( ]) # Unpack FunctionExecutionResult wrappers and check for terminate signal - contents: list[Contents] = [] + contents: list[Content] = [] should_terminate = False for result in execution_results: if isinstance(result, FunctionExecutionResult): @@ -1734,7 +1756,7 @@ async def _try_execute_function_calls( if result.terminate: should_terminate = True else: - # Direct Contents (e.g., from hosted tools) + # Direct Content (e.g., from hosted tools) contents.append(result) return (contents, should_terminate) @@ -1772,30 +1794,27 @@ def _extract_tools(options: dict[str, Any] | None) -> Any: def _collect_approval_responses( messages: "list[ChatMessage]", -) -> dict[str, "FunctionApprovalResponseContent"]: +) -> dict[str, "Content"]: """Collect approval responses (both approved and rejected) from messages.""" - from ._types import ChatMessage, FunctionApprovalResponseContent + from ._types import ChatMessage, Content - fcc_todo: dict[str, FunctionApprovalResponseContent] = {} + fcc_todo: dict[str, Content] = {} for msg in messages: for content in msg.contents if isinstance(msg, ChatMessage) else []: # Collect BOTH approved and rejected responses - if isinstance(content, FunctionApprovalResponseContent): - fcc_todo[content.id] = content + if content.type == "function_approval_response": + fcc_todo[content.id] = content # type: ignore[attr-defined, index] return fcc_todo def _replace_approval_contents_with_results( messages: "list[ChatMessage]", - fcc_todo: dict[str, "FunctionApprovalResponseContent"], - approved_function_results: "list[Contents]", + fcc_todo: dict[str, "Content"], + approved_function_results: "list[Content]", ) -> None: """Replace approval request/response contents with function call/result contents in-place.""" from ._types import ( - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, + Content, Role, ) @@ -1803,23 +1822,25 @@ def _replace_approval_contents_with_results( for msg in messages: # First pass - collect existing function call IDs to avoid duplicates existing_call_ids = { - content.call_id for content in msg.contents if isinstance(content, FunctionCallContent) and content.call_id + content.call_id # type: ignore[union-attr, operator] + for content in msg.contents + if content.type == "function_call" and content.call_id # type: ignore[attr-defined] } # Track approval requests that should be removed (duplicates) contents_to_remove = [] for content_idx, content in enumerate(msg.contents): - if isinstance(content, FunctionApprovalRequestContent): + if content.type == "function_approval_request": # Don't add the function call if it already exists (would create duplicate) - if content.function_call.call_id in existing_call_ids: + if content.function_call.call_id in existing_call_ids: # type: ignore[attr-defined, union-attr, operator] # Just mark for removal - the function call already exists contents_to_remove.append(content_idx) else: # Put back the function call content only if it doesn't exist - msg.contents[content_idx] = content.function_call - elif isinstance(content, FunctionApprovalResponseContent): - if content.approved and content.id in fcc_todo: + msg.contents[content_idx] = content.function_call # type: ignore[attr-defined, assignment] + elif content.type == "function_approval_response": + if content.approved and content.id in fcc_todo: # type: ignore[attr-defined] # Replace with the corresponding result if result_idx < len(approved_function_results): msg.contents[content_idx] = approved_function_results[result_idx] @@ -1828,8 +1849,8 @@ def _replace_approval_contents_with_results( else: # Create a "not approved" result for rejected calls # Use function_call.call_id (the function's ID), not content.id (approval's ID) - msg.contents[content_idx] = FunctionResultContent( - call_id=content.function_call.call_id, + msg.contents[content_idx] = Content.from_function_result( + call_id=content.function_call.call_id, # type: ignore[union-attr, arg-type] result="Error: Tool call invocation was rejected by user.", ) msg.role = Role.TOOL @@ -1867,9 +1888,6 @@ async def function_invocation_wrapper( from ._middleware import extract_and_merge_function_middleware from ._types import ( ChatMessage, - FunctionApprovalRequestContent, - FunctionCallContent, - FunctionResultContent, prepare_messages, ) @@ -1893,7 +1911,7 @@ async def function_invocation_wrapper( tools = _extract_tools(options) # Only execute APPROVED function calls, not rejected ones approved_responses = [resp for resp in fcc_todo.values() if resp.approved] - approved_function_results: list[Contents] = [] + approved_function_results: list[Content] = [] if approved_responses: results, _ = await _try_execute_function_calls( custom_args=kwargs, @@ -1907,7 +1925,7 @@ async def function_invocation_wrapper( if any( fcr.exception is not None for fcr in approved_function_results - if isinstance(fcr, FunctionResultContent) + if fcr.type == "function_result" ): errors_in_a_row += 1 # no need to reset the counter here, since this is the start of a new attempt. @@ -1926,13 +1944,11 @@ async def function_invocation_wrapper( filtered_kwargs = {k: v for k, v in kwargs.items() if k not in ("thread", "tools", "tool_choice")} response = await func(self, messages=prepped_messages, options=options, **filtered_kwargs) # if there are function calls, we will handle them first - function_results = { - it.call_id for it in response.messages[0].contents if isinstance(it, FunctionResultContent) - } + function_results = {it.call_id for it in response.messages[0].contents if it.type == "function_result"} function_calls = [ it for it in response.messages[0].contents - if isinstance(it, FunctionCallContent) and it.call_id not in function_results + if it.type == "function_call" and it.call_id not in function_results ] if response.conversation_id is not None: @@ -1953,7 +1969,7 @@ async def function_invocation_wrapper( config=config, ) # Check if we have approval requests or function calls (not results) in the results - if any(isinstance(fccr, FunctionApprovalRequestContent) for fccr in function_call_results): + if any(fccr.type == "function_approval_request" for fccr in function_call_results): # Add approval requests to the existing assistant message (with tool_calls) # instead of creating a separate tool message from ._types import Role @@ -1965,7 +1981,7 @@ async def function_invocation_wrapper( result_message = ChatMessage(role="assistant", contents=function_call_results) response.messages.append(result_message) return response - if any(isinstance(fccr, FunctionCallContent) for fccr in function_call_results): + if any(fccr.type == "function_call" for fccr in function_call_results): # the function calls are already in the response, so we just continue return response @@ -1980,11 +1996,7 @@ async def function_invocation_wrapper( response.messages.insert(0, msg) return response - if any( - fcr.exception is not None - for fcr in function_call_results - if isinstance(fcr, FunctionResultContent) - ): + if any(fcr.exception is not None for fcr in function_call_results if fcr.type == "function_result"): errors_in_a_row += 1 if errors_in_a_row >= config.max_consecutive_errors_per_request: logger.warning( @@ -2071,8 +2083,6 @@ async def streaming_function_invocation_wrapper( ChatMessage, ChatResponse, ChatResponseUpdate, - FunctionCallContent, - FunctionResultContent, prepare_messages, ) @@ -2094,7 +2104,7 @@ async def streaming_function_invocation_wrapper( tools = _extract_tools(options) # Only execute APPROVED function calls, not rejected ones approved_responses = [resp for resp in fcc_todo.values() if resp.approved] - approved_function_results: list[Contents] = [] + approved_function_results: list[Content] = [] if approved_responses: results, _ = await _try_execute_function_calls( custom_args=kwargs, @@ -2108,7 +2118,7 @@ async def streaming_function_invocation_wrapper( if any( fcr.exception is not None for fcr in approved_function_results - if isinstance(fcr, FunctionResultContent) + if fcr.type == "function_result" ): errors_in_a_row += 1 # no need to reset the counter here, since this is the start of a new attempt. @@ -2124,10 +2134,9 @@ async def streaming_function_invocation_wrapper( # efficient check for FunctionCallContent in the updates # if there is at least one, this stops and continuous # if there are no FCC's then it returns - from ._types import FunctionApprovalRequestContent if not any( - isinstance(item, (FunctionCallContent, FunctionApprovalRequestContent)) + item.type in ("function_call", "function_approval_request") for upd in all_updates for item in upd.contents ): @@ -2139,13 +2148,11 @@ async def streaming_function_invocation_wrapper( response: "ChatResponse" = ChatResponse.from_chat_response_updates(all_updates) # get the function calls (excluding ones that already have results) - function_results = { - it.call_id for it in response.messages[0].contents if isinstance(it, FunctionResultContent) - } + function_results = {it.call_id for it in response.messages[0].contents if it.type == "function_result"} function_calls = [ it for it in response.messages[0].contents - if isinstance(it, FunctionCallContent) and it.call_id not in function_results + if it.type == "function_call" and it.call_id not in function_results ] # When conversation id is present, it means that messages are hosted on the server. @@ -2156,6 +2163,17 @@ async def streaming_function_invocation_wrapper( # we load the tools here, since middleware might have changed them compared to before calling func. tools = _extract_tools(options) + fc_count = len(function_calls) if function_calls else 0 + logger.debug( + "Streaming: tools extracted=%s, function_calls=%d", + tools is not None, + fc_count, + ) + if tools: + for t in tools if isinstance(tools, list) else [tools]: + t_name = getattr(t, "name", "unknown") + t_approval = getattr(t, "approval_mode", None) + logger.debug(" Tool %s: approval_mode=%s", t_name, t_approval) if function_calls and tools: # Use the stored middleware pipeline instead of extracting from kwargs # because kwargs may have been modified by the underlying function @@ -2169,7 +2187,7 @@ async def streaming_function_invocation_wrapper( ) # Check if we have approval requests or function calls (not results) in the results - if any(isinstance(fccr, FunctionApprovalRequestContent) for fccr in function_call_results): + if any(fccr.type == "function_approval_request" for fccr in function_call_results): # Add approval requests to the existing assistant message (with tool_calls) # instead of creating a separate tool message from ._types import Role @@ -2184,7 +2202,7 @@ async def streaming_function_invocation_wrapper( yield ChatResponseUpdate(contents=function_call_results, role="assistant") response.messages.append(result_message) return - if any(isinstance(fccr, FunctionCallContent) for fccr in function_call_results): + if any(fccr.type == "function_call" for fccr in function_call_results): # the function calls were already yielded. return @@ -2195,11 +2213,7 @@ async def streaming_function_invocation_wrapper( yield ChatResponseUpdate(contents=function_call_results, role="tool") return - if any( - fcr.exception is not None - for fcr in function_call_results - if isinstance(fcr, FunctionResultContent) - ): + if any(fcr.exception is not None for fcr in function_call_results if fcr.type == "function_result"): errors_in_a_row += 1 if errors_in_a_row >= config.max_consecutive_errors_per_request: logger.warning( diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index 4ee640304e..d8c34c769e 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -3,7 +3,6 @@ import base64 import json import re -import sys from collections.abc import ( AsyncIterable, Callable, @@ -13,7 +12,7 @@ Sequence, ) from copy import deepcopy -from typing import Any, ClassVar, Literal, TypedDict, TypeVar, cast, overload +from typing import Any, ClassVar, Final, Literal, TypedDict, TypeVar, overload from pydantic import BaseModel, ValidationError @@ -22,50 +21,24 @@ from ._tools import ToolProtocol, ai_function from .exceptions import AdditionItemMismatch, ContentError -if sys.version_info >= (3, 11): - from typing import Self # pragma: no cover -else: - from typing_extensions import Self # pragma: no cover - - __all__ = [ "AgentResponse", "AgentResponseUpdate", - "AnnotatedRegions", - "Annotations", - "BaseAnnotation", - "BaseContent", + "Annotation", "ChatMessage", - "ChatOptions", # Backward compatibility alias "ChatOptions", "ChatResponse", "ChatResponseUpdate", - "CitationAnnotation", - "CodeInterpreterToolCallContent", - "CodeInterpreterToolResultContent", - "Contents", - "DataContent", - "ErrorContent", + "Content", "FinishReason", - "FunctionApprovalRequestContent", - "FunctionApprovalResponseContent", - "FunctionCallContent", - "FunctionResultContent", - "HostedFileContent", - "HostedVectorStoreContent", - "ImageGenerationToolCallContent", - "ImageGenerationToolResultContent", - "MCPServerToolCallContent", - "MCPServerToolResultContent", "Role", - "TextContent", - "TextReasoningContent", "TextSpanRegion", "ToolMode", - "UriContent", - "UsageContent", "UsageDetails", + "add_usage_details", + "detect_media_type_from_base64", "merge_chat_options", + "normalize_messages", "normalize_tools", "prepare_function_call_results", "prepend_instructions_to_messages", @@ -102,84 +75,233 @@ def __new__(mcs, name: str, bases: tuple[type, ...], namespace: dict[str, Any]) return cls -def _parse_content(content_data: MutableMapping[str, Any]) -> "Contents": - """Parse a single content data dictionary into the appropriate Content object. +def _parse_content_list(contents_data: Sequence[Any]) -> list["Content"]: + """Parse a list of content data dictionaries into appropriate Content objects. Args: - content_data: Content data (dict) + contents_data: List of content data (dicts or already constructed objects) Returns: - Content object + List of Content objects with unknown types logged and ignored + """ + contents: list["Content"] = [] + for content_data in contents_data: + if isinstance(content_data, Content): + contents.append(content_data) + continue + try: + contents.append(Content.from_dict(content_data)) + except ContentError as exc: + logger.warning(f"Skipping unknown content type or invalid content: {exc}") + + return contents + + +# region Internal Helper functions for unified Content + + +def detect_media_type_from_base64( + *, + data_bytes: bytes | None = None, + data_str: str | None = None, + data_uri: str | None = None, +) -> str | None: + """Detect media type from base64-encoded data by examining magic bytes. + + This function examines the binary signature (magic bytes) at the start of the data + to identify common media types. It's reliable for binary formats like images, audio, + video, and documents, but cannot detect text-based formats like JSON or plain text. + + Args: + data_bytes: Raw binary data. + data_str: Base64-encoded data (without data URI prefix). + data_uri: Full data URI string (e.g., "data:image/png;base64,iVBORw0KGgo..."). + This will look at the actual data to determine the media_type and not at the URI prefix. + Will also not compare those two values. Raises: - ContentError if parsing fails + ValueError: If not exactly 1 of data_bytes, data_str, or data_uri is provided, or if base64 decoding fails. + + Returns: + The detected media type (e.g., 'image/png', 'audio/wav', 'application/pdf') + or None if the format is not recognized. + + Examples: + .. code-block:: python + + from agent_framework import detect_media_type_from_base64 + + # Detect from base64 string + base64_data = "iVBORw0KGgo..." + media_type = detect_media_type_from_base64(base64_data) + # Returns: "image/png" + + # Works with data URIs too + data_uri = "data:image/png;base64,iVBORw0KGgo..." + media_type = detect_media_type_from_base64(data_uri) + # Returns: "image/png" """ - content_type: str | None = content_data.get("type", None) - match content_type: - case "text": - return TextContent.from_dict(content_data) - case "data": - return DataContent.from_dict(content_data) - case "uri": - return UriContent.from_dict(content_data) - case "error": - return ErrorContent.from_dict(content_data) - case "function_call": - return FunctionCallContent.from_dict(content_data) - case "function_result": - return FunctionResultContent.from_dict(content_data) - case "usage": - return UsageContent.from_dict(content_data) - case "hosted_file": - return HostedFileContent.from_dict(content_data) - case "hosted_vector_store": - return HostedVectorStoreContent.from_dict(content_data) - case "code_interpreter_tool_call": - return CodeInterpreterToolCallContent.from_dict(content_data) - case "code_interpreter_tool_result": - return CodeInterpreterToolResultContent.from_dict(content_data) - case "image_generation_tool_call": - return ImageGenerationToolCallContent.from_dict(content_data) - case "image_generation_tool_result": - return ImageGenerationToolResultContent.from_dict(content_data) - case "mcp_server_tool_call": - return MCPServerToolCallContent.from_dict(content_data) - case "mcp_server_tool_result": - return MCPServerToolResultContent.from_dict(content_data) - case "function_approval_request": - return FunctionApprovalRequestContent.from_dict(content_data) - case "function_approval_response": - return FunctionApprovalResponseContent.from_dict(content_data) - case "text_reasoning": - return TextReasoningContent.from_dict(content_data) - case None: - raise ContentError("Content type is missing") - case _: - raise ContentError(f"Unknown content type '{content_type}'") - - -def _parse_content_list(contents_data: Sequence[Any]) -> list["Contents"]: - """Parse a list of content data dictionaries into appropriate Content objects. + data: bytes | None = None + if data_bytes is not None: + data = data_bytes + if data_uri is not None: + if data is not None: + raise ValueError("Provide exactly one of data_bytes, data_str, or data_uri.") + # Remove data URI prefix if present + data_str = data_uri.split(";base64,", 1)[1] + if data_str is not None: + if data is not None: + raise ValueError("Provide exactly one of data_bytes, data_str, or data_uri.") + try: + data = base64.b64decode(data_str) + except Exception as exc: + raise ValueError("Invalid base64 data provided.") from exc + if data is None: + raise ValueError("Provide exactly one of data_bytes, data_str, or data_uri.") + + # Check magic bytes for common formats + # Images + if data.startswith(b"\x89PNG\r\n\x1a\n"): + return "image/png" + if data.startswith(b"\xff\xd8\xff"): + return "image/jpeg" + if data.startswith(b"GIF87a") or data.startswith(b"GIF89a"): + return "image/gif" + if data.startswith(b"RIFF") and len(data) > 11 and data[8:12] == b"WEBP": + return "image/webp" + if data.startswith(b"BM"): + return "image/bmp" + if data.startswith(b" 11 and data[8:12] == b"WAVE": + return "audio/wav" + if data.startswith(b"ID3") or data.startswith(b"\xff\xfb") or data.startswith(b"\xff\xf3"): + return "audio/mpeg" + if data.startswith(b"OggS"): + return "audio/ogg" + if data.startswith(b"fLaC"): + return "audio/flac" + + return None + + +def _get_data_bytes_as_str(content: "Content") -> str | None: + """Extract base64 data string from data URI. Args: - contents_data: List of content data (dicts or already constructed objects) + content: The Content instance to extract data from. Returns: - List of Content objects with unknown types logged and ignored + The base64-encoded data as a string, or None if not a data content type. + + Raises: + ContentError: If the URI is not a valid data URI. """ - contents: list["Contents"] = [] - for content_data in contents_data: - if isinstance(content_data, dict): - try: - content = _parse_content(content_data) - contents.append(content) - except ContentError as exc: - logger.warning(f"Skipping unknown content type or invalid content: {exc}") - else: - # If it's already a content object, keep it as is - contents.append(content_data) + if content.type not in ("data", "uri"): + return None + + uri = getattr(content, "uri", None) + if not uri: + return None + + if not uri.startswith("data:"): + return None + + if ";base64," not in uri: + raise ContentError("Data URI must use base64 encoding") + + _, data = uri.split(";base64,", 1) + return data # type: ignore[return-value, no-any-return] + + +def _get_data_bytes(content: "Content") -> bytes | None: + """Extract and decode binary data from data URI. + + Args: + content: The Content instance to extract data from. + + Returns: + The decoded binary data, or None if not a data content type. + + Raises: + ContentError: If the URI is not a valid data URI or decoding fails. + """ + data_str = _get_data_bytes_as_str(content) + if data_str is None: + return None + + try: + return base64.b64decode(data_str) + except Exception as e: + raise ContentError(f"Failed to decode base64 data: {e}") from e + + +KNOWN_URI_SCHEMAS: Final[set[str]] = {"http", "https", "ftp", "ftps", "file", "s3", "gs", "azure", "blob"} - return contents + +def _validate_uri(uri: str, media_type: str | None) -> dict[str, Any]: + """Validate URI format and return validation result. + + Args: + uri: The URI to validate. + media_type: Optional media type associated with the URI. + + Returns: + If valid, returns a dict, with "type" key indicating "data" or "uri", along with the uri and media_type. + """ + if not uri: + raise ContentError("URI cannot be empty") + + # Check for data URI + if uri.startswith("data:"): + if "," not in uri: + raise ContentError("Data URI must contain a comma separating metadata and data") + prefix, _ = uri.split(",", 1) + if ";" in prefix: + parts = prefix.split(";") + if len(parts) < 2: + raise ContentError("Invalid data URI format") + # Check encoding + encoding = parts[-1] + if encoding not in ("base64", ""): + raise ContentError(f"Unsupported data URI encoding: {encoding}") + if media_type is None: + # attempt to extract: + media_type = parts[0][5:] # Remove 'data:' + return {"type": "data", "uri": uri, "media_type": media_type} + + # Check for common URI schemes + if ":" in uri: + scheme = uri.split(":", 1)[0].lower() + if not media_type: + logger.warning("Using URI without media type is not recommended.") + if scheme not in KNOWN_URI_SCHEMAS: + logger.info(f"Unknown URI scheme: {scheme}, allowed schemes are {KNOWN_URI_SCHEMAS}.") + return {"type": "uri", "uri": uri, "media_type": media_type} + + # No scheme found + raise ContentError("URI must contain a scheme (e.g., http://, data:, file://)") + + +def _serialize_value(value: Any, exclude_none: bool) -> Any: + """Recursively serialize a value for to_dict.""" + if value is None: + return None + if isinstance(value, Content): + return value.to_dict(exclude_none=exclude_none) + if isinstance(value, Sequence) and not isinstance(value, (str, bytes, bytearray)): + return [_serialize_value(item, exclude_none) for item in value] + if isinstance(value, Mapping): + return {k: _serialize_value(v, exclude_none) for k, v in value.items()} + if hasattr(value, "to_dict"): + return value.to_dict() # type: ignore[call-arg] + return value # endregion @@ -222,1918 +344,1040 @@ def _parse_content_list(contents_data: Sequence[Any]) -> list["Contents"]: "text/xml", ] +# region Unified Content Types + +ContentType = Literal[ + "text", + "text_reasoning", + "data", + "uri", + "error", + "function_call", + "function_result", + "usage", + "hosted_file", + "hosted_vector_store", + "code_interpreter_tool_call", + "code_interpreter_tool_result", + "image_generation_tool_call", + "image_generation_tool_result", + "mcp_server_tool_call", + "mcp_server_tool_result", + "function_approval_request", + "function_approval_response", +] -class UsageDetails(SerializationMixin): - """Provides usage details about a request/response. - - Attributes: - input_token_count: The number of tokens in the input. - output_token_count: The number of tokens in the output. - total_token_count: The total number of tokens used to produce the response. - additional_counts: A dictionary of additional token counts, can be set by passing kwargs. - - Examples: - .. code-block:: python - - from agent_framework import UsageDetails - - # Create usage details - usage = UsageDetails( - input_token_count=100, - output_token_count=50, - total_token_count=150, - ) - print(usage.total_token_count) # 150 - - # With additional counts - usage = UsageDetails( - input_token_count=100, - output_token_count=50, - total_token_count=150, - reasoning_tokens=25, - ) - print(usage.additional_counts["reasoning_tokens"]) # 25 - # Combine usage details - usage1 = UsageDetails(input_token_count=100, output_token_count=50) - usage2 = UsageDetails(input_token_count=200, output_token_count=100) - combined = usage1 + usage2 - print(combined.input_token_count) # 300 - """ +class TextSpanRegion(TypedDict, total=False): + """TypedDict representation of a text span region annotation.""" - DEFAULT_EXCLUDE: ClassVar[set[str]] = {"_extra_counts"} + type: Literal["text_span"] + start_index: int + end_index: int - def __init__( - self, - input_token_count: int | None = None, - output_token_count: int | None = None, - total_token_count: int | None = None, - **kwargs: int, - ) -> None: - """Initializes the UsageDetails instance. - Args: - input_token_count: The number of tokens in the input. - output_token_count: The number of tokens in the output. - total_token_count: The total number of tokens used to produce the response. +class Annotation(TypedDict, total=False): + """TypedDict representation of an annotation.""" - Keyword Args: - **kwargs: Additional token counts, can be set by passing keyword arguments. - They can be retrieved through the `additional_counts` property. - """ - self.input_token_count = input_token_count - self.output_token_count = output_token_count - self.total_token_count = total_token_count + type: Literal["citation"] + title: str + url: str + file_id: str + tool_name: str + snippet: str + annotated_regions: Sequence[TextSpanRegion] + additional_properties: dict[str, Any] + raw_representation: Any - # Validate that all kwargs are integers (preserving Pydantic behavior) - self._extra_counts: dict[str, int] = {} - for key, value in kwargs.items(): - if not isinstance(value, int): - raise ValueError(f"Additional counts must be integers, got {type(value).__name__}") - self._extra_counts[key] = value - def to_dict(self, *, exclude_none: bool = True, exclude: set[str] | None = None) -> dict[str, Any]: - """Convert the UsageDetails instance to a dictionary. +TContent = TypeVar("TContent", bound="Content") - Keyword Args: - exclude_none: Whether to exclude None values from the output. - exclude: Set of field names to exclude from the output. +# endregion - Returns: - Dictionary representation of the UsageDetails instance. - """ - # Get the base dict from parent class - result = super().to_dict(exclude_none=exclude_none, exclude=exclude) - # Add additional counts (extra fields) - if exclude is None: - exclude = set() +class UsageDetails(TypedDict, total=False): + """A dictionary representing usage details. - for key, value in self._extra_counts.items(): - if key in exclude: - continue - if exclude_none and value is None: - continue - result[key] = value + This is a non-closed dictionary, so any specific provider fields can be added as needed. + Whenever they can be mapped to standard fields, they will be. + """ - return result + input_token_count: int | None + output_token_count: int | None + total_token_count: int | None - def __str__(self) -> str: - """Returns a string representation of the usage details.""" - return self.to_json() - @property - def additional_counts(self) -> dict[str, int]: - """Represents well-known additional counts for usage. This is not an exhaustive list. +def add_usage_details(usage1: UsageDetails | None, usage2: UsageDetails | None) -> UsageDetails: + """Add two UsageDetails dictionaries by summing all numeric values. - Remarks: - To make it possible to avoid collisions between similarly-named, but unrelated, additional counts - between different AI services, any keys not explicitly defined here should be prefixed with the - name of the AI service, e.g., "openai." or "azure.". The separator "." was chosen because it cannot - be a legal character in a JSON key. + Args: + usage1: First usage details dictionary. + usage2: Second usage details dictionary. - Over time additional counts may be added to the base class. - """ - return self._extra_counts - - def __setitem__(self, key: str, value: int) -> None: - """Sets an additional count for the usage details.""" - if not isinstance(value, int): - raise ValueError("Additional counts must be integers.") - self._extra_counts[key] = value - - def __add__(self, other: "UsageDetails | None") -> "UsageDetails": - """Combines two `UsageDetails` instances.""" - if not other: - return self - if not isinstance(other, UsageDetails): - raise ValueError("Can only add two usage details objects together.") - - additional_counts = self.additional_counts.copy() - if other.additional_counts: - for key, value in other.additional_counts.items(): - additional_counts[key] = additional_counts.get(key, 0) + (value or 0) - - return UsageDetails( - input_token_count=(self.input_token_count or 0) + (other.input_token_count or 0), - output_token_count=(self.output_token_count or 0) + (other.output_token_count or 0), - total_token_count=(self.total_token_count or 0) + (other.total_token_count or 0), - **additional_counts, - ) + Returns: + A new UsageDetails dictionary with summed values. - def __iadd__(self, other: "UsageDetails | None") -> Self: - if not other: - return self - if not isinstance(other, UsageDetails): - raise ValueError("Can only add usage details objects together.") + Examples: + .. code-block:: python - self.input_token_count = (self.input_token_count or 0) + (other.input_token_count or 0) - self.output_token_count = (self.output_token_count or 0) + (other.output_token_count or 0) - self.total_token_count = (self.total_token_count or 0) + (other.total_token_count or 0) + from agent_framework import UsageDetails, add_usage_details - for key, value in other.additional_counts.items(): - self.additional_counts[key] = self.additional_counts.get(key, 0) + (value or 0) + usage1 = UsageDetails(input_token_count=5, output_token_count=10) + usage2 = UsageDetails(input_token_count=3, output_token_count=6) + combined = add_usage_details(usage1, usage2) + # Result: {'input_token_count': 8, 'output_token_count': 16} + """ + if usage1 is None: + return usage2 or UsageDetails() + if usage2 is None: + return usage1 - return self + result = UsageDetails() - def __eq__(self, other: object) -> bool: - """Check if two UsageDetails instances are equal.""" - if not isinstance(other, UsageDetails): - return False + # Combine all keys from both dictionaries + all_keys = set(usage1.keys()) | set(usage2.keys()) - return ( - self.input_token_count == other.input_token_count - and self.output_token_count == other.output_token_count - and self.total_token_count == other.total_token_count - and self.additional_counts == other.additional_counts - ) + for key in all_keys: + val1 = usage1.get(key) + val2 = usage2.get(key) + # Sum if both present, otherwise use the non-None value + if val1 is not None and val2 is not None: + result[key] = val1 + val2 # type: ignore[literal-required, operator] + elif val1 is not None: + result[key] = val1 # type: ignore[literal-required] + elif val2 is not None: + result[key] = val2 # type: ignore[literal-required] -# region BaseAnnotation + return result -class TextSpanRegion(SerializationMixin): - """Represents a region of text that has been annotated. +# region Content Class - Examples: - .. code-block:: python - from agent_framework import TextSpanRegion +class Content: + """Unified content container covering all content variants. - # Create a text span region - region = TextSpanRegion(start_index=0, end_index=10) - print(region.type) # "text_span" + This class provides a single unified type that handles all content variants. + Use the class methods like `Content.from_text()`, `Content.from_data()`, + `Content.from_uri()`, etc. to create instances. """ def __init__( self, + type: ContentType, *, - start_index: int | None = None, - end_index: int | None = None, - **kwargs: Any, + # Text content fields + text: str | None = None, + protected_data: str | None = None, + # Data/URI content fields + uri: str | None = None, + media_type: str | None = None, + # Error content fields + message: str | None = None, + error_code: str | None = None, + error_details: str | None = None, + # Usage content fields + usage_details: dict[str, Any] | UsageDetails | None = None, + # Function call/result fields + call_id: str | None = None, + name: str | None = None, + arguments: str | Mapping[str, Any] | None = None, + exception: str | None = None, + result: Any = None, + # Hosted file/vector store fields + file_id: str | None = None, + vector_store_id: str | None = None, + # Code interpreter tool fields + inputs: list["Content"] | None = None, + outputs: list["Content"] | Any | None = None, + # Image generation tool fields + image_id: str | None = None, + # MCP server tool fields + tool_name: str | None = None, + server_name: str | None = None, + output: Any = None, + # Function approval fields + id: str | None = None, + function_call: "Content | None" = None, + user_input_request: bool | None = None, + approved: bool | None = None, + # Common fields + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any | None = None, ) -> None: - """Initialize TextSpanRegion. + """Create a content instance. - Keyword Args: - start_index: The start index of the text span. - end_index: The end index of the text span. - **kwargs: Additional keyword arguments. + Prefer using the classmethod constructors like `Content.from_text()` instead of calling __init__ directly. """ - self.type: Literal["text_span"] = "text_span" - self.start_index = start_index - self.end_index = end_index - - # Handle any additional kwargs - for key, value in kwargs.items(): - if not hasattr(self, key): - setattr(self, key, value) - - -AnnotatedRegions = TextSpanRegion + self.type = type + self.annotations = annotations + self.additional_properties: dict[str, Any] = additional_properties or {} # type: ignore[assignment] + self.raw_representation = raw_representation + # Set all content-specific attributes + self.text = text + self.protected_data = protected_data + self.uri = uri + self.media_type = media_type + self.message = message + self.error_code = error_code + self.error_details = error_details + self.usage_details = usage_details + self.call_id = call_id + self.name = name + self.arguments = arguments + self.exception = exception + self.result = result + self.file_id = file_id + self.vector_store_id = vector_store_id + self.inputs = inputs + self.outputs = outputs + self.image_id = image_id + self.tool_name = tool_name + self.server_name = server_name + self.output = output + self.id = id + self.function_call = function_call + self.user_input_request = user_input_request + self.approved = approved -class BaseAnnotation(SerializationMixin): - """Base class for all AI Annotation types.""" + @classmethod + def from_text( + cls: type[TContent], + text: str, + *, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create text content.""" + return cls( + "text", + text=text, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) - DEFAULT_EXCLUDE: ClassVar[set[str]] = {"raw_representation", "additional_properties"} + @classmethod + def from_text_reasoning( + cls: type[TContent], + *, + text: str | None = None, + protected_data: str | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create text reasoning content.""" + return cls( + "text_reasoning", + text=text, + protected_data=protected_data, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) - def __init__( - self, + @classmethod + def from_data( + cls: type[TContent], + data: bytes, + media_type: str, *, - annotated_regions: list[AnnotatedRegions] | list[MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initialize BaseAnnotation. + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + r"""Create data content from raw binary data. - Keyword Args: - annotated_regions: A list of regions that have been annotated. Can be region objects or dicts. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content from an underlying implementation. - **kwargs: Additional keyword arguments (merged into additional_properties). - """ - # Handle annotated_regions conversion from dict format (for SerializationMixin support) - self.annotated_regions: list[AnnotatedRegions] | None = None - if annotated_regions is not None: - converted_regions: list[AnnotatedRegions] = [] - for region_data in annotated_regions: - if isinstance(region_data, MutableMapping): - if region_data.get("type", "") == "text_span": - converted_regions.append(TextSpanRegion.from_dict(region_data)) - else: - logger.warning(f"Unknown region type: {region_data.get('type', '')} in {region_data}") - else: - # Already a region object, keep as is - converted_regions.append(region_data) - self.annotated_regions = converted_regions + Use this to create content from binary data (images, audio, documents, etc.). + The data will be automatically base64-encoded into a data URI. - # Merge kwargs into additional_properties - self.additional_properties = additional_properties or {} - self.additional_properties.update(kwargs) + Args: + data: Raw binary data as bytes. This should be the actual binary data, + not a base64-encoded string. If you have a base64 string, + decode it first: base64.b64decode(base64_string) + media_type: The MIME type of the data (e.g., "image/png", "application/pdf"). + If you don't know the media type and have base64 data, you can detect it in some cases: - self.raw_representation = raw_representation + .. code-block:: python - def to_dict(self, *, exclude: set[str] | None = None, exclude_none: bool = True) -> dict[str, Any]: - """Convert the instance to a dictionary. + from agent_framework import detect_media_type_from_base64, Content - Extracts additional_properties fields to the root level. + media_type = detect_media_type_from_base64(base64_string) + if media_type is None: + raise ValueError("Could not detect media type") + data_bytes = base64.b64decode(base64_string) + content = Content.from_data(data=data_bytes, media_type=media_type) Keyword Args: - exclude: Set of field names to exclude from serialization. - exclude_none: Whether to exclude None values from the output. Defaults to True. + annotations: Optional annotations associated with the content. + additional_properties: Optional additional properties. + raw_representation: Optional raw representation from an underlying implementation. Returns: - Dictionary representation of the instance. - """ - # Get the base dict from SerializationMixin - result = super().to_dict(exclude=exclude, exclude_none=exclude_none) + A Content instance with type="data". - # Extract additional_properties to root level - if self.additional_properties: - result.update(self.additional_properties) - - return result - - -class CitationAnnotation(BaseAnnotation): - """Represents a citation annotation. - - Attributes: - type: The type of content, which is always "citation" for this class. - title: The title of the cited content. - url: The URL of the cited content. - file_id: The file identifier of the cited content, if applicable. - tool_name: The name of the tool that generated the citation, if applicable. - snippet: A snippet of the cited content, if applicable. - annotated_regions: A list of regions that have been annotated with this citation. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content from an underlying implementation. - - Examples: - .. code-block:: python + Raises: + TypeError: If data is not bytes. - from agent_framework import CitationAnnotation, TextSpanRegion + Examples: + .. code-block:: python - # Create a citation annotation - citation = CitationAnnotation( - title="Agent Framework Documentation", - url="https://example.com/docs", - snippet="This is a relevant excerpt...", - annotated_regions=[TextSpanRegion(start_index=0, end_index=25)], - ) - print(citation.title) # "Agent Framework Documentation" - """ + from agent_framework import Content, detect_media_type_from_base64 + import base64 - def __init__( - self, - *, - title: str | None = None, - url: str | None = None, - file_id: str | None = None, - tool_name: str | None = None, - snippet: str | None = None, - annotated_regions: list[AnnotatedRegions] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initialize CitationAnnotation. + # Create from raw binary data with known media type + image_bytes = b"\x89PNG\r\n\x1a\n..." + content = Content.from_data(data=image_bytes, media_type="image/png") - Keyword Args: - title: The title of the cited content. - url: The URL of the cited content. - file_id: The file identifier of the cited content, if applicable. - tool_name: The name of the tool that generated the citation, if applicable. - snippet: A snippet of the cited content, if applicable. - annotated_regions: A list of regions that have been annotated with this citation. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content from an underlying implementation. - **kwargs: Additional keyword arguments. + # If you have a base64 string and need to detect media type + base64_string = "iVBORw0KGgo..." + media_type = detect_media_type_from_base64(base64_string) + if media_type is None: + raise ValueError("Unknown media type") + image_bytes = base64.b64decode(base64_string) + content = Content.from_data(data=image_bytes, media_type=media_type) """ - super().__init__( - annotated_regions=annotated_regions, + try: + encoded_data = base64.b64encode(data).decode("utf-8") + except TypeError as e: + raise TypeError( + "Could not encode data to base64. Ensure 'data' is of type bytes.Or another b64encode compatible type." + ) from e + return cls( + "data", + uri=f"data:{media_type};base64,{encoded_data}", + media_type=media_type, + annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.title = title - self.url = url - self.file_id = file_id - self.tool_name = tool_name - self.snippet = snippet - self.type: Literal["citation"] = "citation" - - -Annotations = CitationAnnotation - - -# region BaseContent - -TContents = TypeVar("TContents", bound="BaseContent") + @classmethod + def from_uri( + cls: type[TContent], + uri: str, + *, + media_type: str | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create content from a URI, can be both data URI or external URI. -class BaseContent(SerializationMixin): - """Represents content used by AI services. - - Attributes: - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content from an underlying implementation. - - """ - - DEFAULT_EXCLUDE: ClassVar[set[str]] = {"raw_representation", "additional_properties"} + Use this when you already have a properly formed data URI + (e.g., "data:image/png;base64,iVBORw0KGgo..."). + Or when you receive a link to a online resource (e.g., "https://example.com/image.png"). - def __init__( - self, - *, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initialize BaseContent. + Args: + uri: A URI string, + that either includes the media type and base64-encoded data, + or a valid URL to an external resource. Keyword Args: - annotations: Optional annotations associated with the content. Can be annotation objects or dicts. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content from an underlying implementation. - **kwargs: Additional keyword arguments (merged into additional_properties). - """ - self.annotations: list[Annotations] | None = None - # Handle annotations conversion from dict format (for SerializationMixin support) - if annotations is not None: - converted_annotations: list[Annotations] = [] - for annotation_data in annotations: - if isinstance(annotation_data, Annotations): - # If it's already an annotation object, keep it as is - converted_annotations.append(annotation_data) - elif isinstance(annotation_data, MutableMapping) and annotation_data.get("type", "") == "citation": - converted_annotations.append(CitationAnnotation.from_dict(annotation_data)) - else: - logger.debug( - f"Unknown annotation found: {annotation_data.get('type', 'no_type')}" - f" with data: {annotation_data}" - ) - self.annotations = converted_annotations + media_type: The MIME type of the data (e.g., "image/png", "application/pdf"). + This is optional but recommended for external URIs. + annotations: Optional annotations associated with the content. + additional_properties: Optional additional properties. + raw_representation: Optional raw representation from an underlying implementation. - # Merge kwargs into additional_properties - self.additional_properties = additional_properties or {} - self.additional_properties.update(kwargs) + Raises: + ContentError: If the URI is not valid. - self.raw_representation = raw_representation + Examples: + .. code-block:: python - def to_dict(self, *, exclude: set[str] | None = None, exclude_none: bool = True) -> dict[str, Any]: - """Convert the instance to a dictionary. + from agent_framework import Content - Extracts additional_properties fields to the root level. + # Create from a data URI + content = Content.from_uri(uri="data:image/png;base64,iVBORw0KGgo...", media_type="image/png") + assert content.type == "data" - Keyword Args: - exclude: Set of field names to exclude from serialization. - exclude_none: Whether to exclude None values from the output. Defaults to True. + # Create from an external URI + content = Content.from_uri(uri="https://example.com/image.png", media_type="image/png") + assert content.type == "uri" + + # When receiving a raw already encode data string, you can do this: + raw_base64_string = "iVBORw0KGgo..." + content = Content.from_uri( + uri=f"data:{(detect_media_type_from_base64(data_str=raw_base64_string) or 'image/png')};base64,{ + raw_base64_string + }" + ) Returns: - Dictionary representation of the instance. + A Content instance with type="data" for data URIs or type="uri" for external URIs. """ - # Get the base dict from SerializationMixin - result = super().to_dict(exclude=exclude, exclude_none=exclude_none) - - # Extract additional_properties to root level - if self.additional_properties: - result.update(self.additional_properties) - - return result - - -class TextContent(BaseContent): - """Represents text content in a chat. - - Attributes: - text: The text content represented by this instance. - type: The type of content, which is always "text" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. + return cls( + **_validate_uri(uri, media_type), + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) - Examples: - .. code-block:: python - - from agent_framework import TextContent - - # Create basic text content - text = TextContent(text="Hello, world!") - print(text.text) # "Hello, world!" - - # Concatenate text content - text1 = TextContent(text="Hello, ") - text2 = TextContent(text="world!") - combined = text1 + text2 - print(combined.text) # "Hello, world!" - """ - - def __init__( - self, - text: str, - *, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - **kwargs: Any, - ): - """Initializes a TextContent instance. - - Args: - text: The text content represented by this instance. - - Keyword Args: - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - annotations: Optional annotations associated with the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( - annotations=annotations, - additional_properties=additional_properties, - raw_representation=raw_representation, - **kwargs, - ) - self.text = text - self.type: Literal["text"] = "text" - - def __add__(self, other: "TextContent") -> "TextContent": - """Concatenate two TextContent instances. - - The following things happen: - The text is concatenated. - The annotations are combined. - The additional properties are merged, with the values of shared keys of the first instance taking precedence. - The raw_representations are combined into a list of them, if they both have one. - """ - if not isinstance(other, TextContent): - raise TypeError("Incompatible type") - - # Merge raw representations - if self.raw_representation is None: - raw_representation = other.raw_representation - elif other.raw_representation is None: - raw_representation = self.raw_representation - else: - raw_representation = ( - self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] - ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) - - # Merge annotations - if self.annotations is None: - annotations = other.annotations - elif other.annotations is None: - annotations = self.annotations - else: - annotations = self.annotations + other.annotations - - # Create new instance using from_dict for proper deserialization - result_dict = { - "text": self.text + other.text, - "type": "text", - "annotations": [ann.to_dict(exclude_none=False) for ann in annotations] if annotations else None, - "additional_properties": { - **(other.additional_properties or {}), - **(self.additional_properties or {}), - }, - "raw_representation": raw_representation, - } - return TextContent.from_dict(result_dict) - - def __iadd__(self, other: "TextContent") -> Self: - """In-place concatenation of two TextContent instances. - - The following things happen: - The text is concatenated. - The annotations are combined. - The additional properties are merged, with the values of shared keys of the first instance taking precedence. - The raw_representations are combined into a list of them, if they both have one. - """ - if not isinstance(other, TextContent): - raise TypeError("Incompatible type") - - # Concatenate text - self.text += other.text - - # Merge additional properties (self takes precedence) - if self.additional_properties is None: - self.additional_properties = {} - if other.additional_properties: - # Update from other first, then restore self's values to maintain precedence - self_props = self.additional_properties.copy() - self.additional_properties.update(other.additional_properties) - self.additional_properties.update(self_props) - - # Merge raw representations - if self.raw_representation is None: - self.raw_representation = other.raw_representation - elif other.raw_representation is not None: - self.raw_representation = ( - self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] - ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) - - # Merge annotations - if other.annotations: - if self.annotations is None: - self.annotations = [] - self.annotations.extend(other.annotations) - - return self - - -class TextReasoningContent(BaseContent): - """Represents text reasoning content in a chat. - - Remarks: - This class and `TextContent` are superficially similar, but distinct. - - Attributes: - text: The text content represented by this instance. - type: The type of content, which is always "text_reasoning" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - - from agent_framework import TextReasoningContent - - # Create reasoning content - reasoning = TextReasoningContent(text="Let me think step by step...") - print(reasoning.text) # "Let me think step by step..." - - # Concatenate reasoning content - reasoning1 = TextReasoningContent(text="First, ") - reasoning2 = TextReasoningContent(text="second, ") - combined = reasoning1 + reasoning2 - print(combined.text) # "First, second, " - """ - - def __init__( - self, - text: str | None, - *, - protected_data: str | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - **kwargs: Any, - ): - """Initializes a TextReasoningContent instance. - - Args: - text: The text content represented by this instance. - - Keyword Args: - protected_data: This property is used to store data from a provider that should be roundtripped back to the - provider but that is not intended for human consumption. It is often encrypted or otherwise redacted - information that is only intended to be sent back to the provider and not displayed to the user. It's - possible for a TextReasoningContent to contain only `protected_data` and have an empty `text` property. - This data also may be associated with the corresponding `text`, acting as a validation signature for it. - - Note that whereas `text` can be provider agnostic, `protected_data` is provider-specific, and is likely - to only be understood by the provider that created it. The data is often represented as a more complex - object, so it should be serialized to a string before storing so that the whole object is easily - serializable without loss. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - annotations: Optional annotations associated with the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( - annotations=annotations, - additional_properties=additional_properties, - raw_representation=raw_representation, - **kwargs, - ) - self.text = text - self.protected_data = protected_data - self.type: Literal["text_reasoning"] = "text_reasoning" - - def __add__(self, other: "TextReasoningContent") -> "TextReasoningContent": - """Concatenate two TextReasoningContent instances. - - The following things happen: - The text is concatenated. - The annotations are combined. - The additional properties are merged, with the values of shared keys of the first instance taking precedence. - The raw_representations are combined into a list of them, if they both have one. - """ - if not isinstance(other, TextReasoningContent): - raise TypeError("Incompatible type") - - # Merge raw representations - if self.raw_representation is None: - raw_representation = other.raw_representation - elif other.raw_representation is None: - raw_representation = self.raw_representation - else: - raw_representation = ( - self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] - ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) - - # Merge annotations - if self.annotations is None: - annotations = other.annotations - elif other.annotations is None: - annotations = self.annotations - else: - annotations = self.annotations + other.annotations - - # Replace protected data. - # Discussion: https://github.com/microsoft/agent-framework/pull/2950#discussion_r2634345613 - protected_data = other.protected_data or self.protected_data - - # Create new instance using from_dict for proper deserialization - result_dict = { - "text": (self.text or "") + (other.text or "") if self.text is not None or other.text is not None else None, - "type": "text_reasoning", - "annotations": [ann.to_dict(exclude_none=False) for ann in annotations] if annotations else None, - "additional_properties": {**(self.additional_properties or {}), **(other.additional_properties or {})}, - "raw_representation": raw_representation, - "protected_data": protected_data, - } - return TextReasoningContent.from_dict(result_dict) - - def __iadd__(self, other: "TextReasoningContent") -> Self: - """In-place concatenation of two TextReasoningContent instances. - - The following things happen: - The text is concatenated. - The annotations are combined. - The additional properties are merged, with the values of shared keys of the first instance taking precedence. - The raw_representations are combined into a list of them, if they both have one. - """ - if not isinstance(other, TextReasoningContent): - raise TypeError("Incompatible type") - - # Concatenate text - if self.text is not None or other.text is not None: - self.text = (self.text or "") + (other.text or "") - # if both are None, should keep as None - - # Merge additional properties (self takes precedence) - if self.additional_properties is None: - self.additional_properties = {} - if other.additional_properties: - # Update from other first, then restore self's values to maintain precedence - self_props = self.additional_properties.copy() - self.additional_properties.update(other.additional_properties) - self.additional_properties.update(self_props) - - # Merge raw representations - if self.raw_representation is None: - self.raw_representation = other.raw_representation - elif other.raw_representation is not None: - self.raw_representation = ( - self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] - ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) - - # Replace protected data. - # Discussion: https://github.com/microsoft/agent-framework/pull/2950#discussion_r2634345613 - if other.protected_data is not None: - self.protected_data = other.protected_data - - # Merge annotations - if other.annotations: - if self.annotations is None: - self.annotations = [] - self.annotations.extend(other.annotations) - - return self - - -TDataContent = TypeVar("TDataContent", bound="DataContent") - - -class DataContent(BaseContent): - """Represents binary data content with an associated media type (also known as a MIME type). - - Important: - This is for binary data that is represented as a data URI, not for online resources. - Use ``UriContent`` for online resources. - - Attributes: - uri: The URI of the data represented by this instance, typically in the form of a data URI. - Should be in the form: "data:{media_type};base64,{base64_data}". - media_type: The media type of the data. - type: The type of content, which is always "data" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - - from agent_framework import DataContent - - # Create from binary data - image_data = b"raw image bytes" - data_content = DataContent(data=image_data, media_type="image/png") - - # Create from base64-encoded string - base64_string = "iVBORw0KGgoAAAANS..." - data_content = DataContent(data=base64_string, media_type="image/png") - - # Create from data URI - data_uri = "data:image/png;base64,iVBORw0KGgoAAAANS..." - data_content = DataContent(uri=data_uri) - - # Check media type - if data_content.has_top_level_media_type("image"): - print("This is an image") - """ - - @overload - def __init__( - self, - *, - uri: str, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a DataContent instance with a URI. - - Important: - This is for binary data that is represented as a data URI, not for online resources. - Use ``UriContent`` for online resources. - - Keyword Args: - uri: The URI of the data represented by this instance. - Should be in the form: "data:{media_type};base64,{base64_data}". - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - - @overload - def __init__( - self, - *, - data: bytes, - media_type: str, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a DataContent instance with binary data. - - Important: - This is for binary data that is represented as a data URI, not for online resources. - Use ``UriContent`` for online resources. - - Keyword Args: - data: The binary data represented by this instance. - The data is transformed into a base64-encoded data URI. - media_type: The media type of the data. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - - @overload - def __init__( - self, - *, - data: str, - media_type: str, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a DataContent instance with base64-encoded string data. - - Important: - This is for binary data that is represented as a data URI, not for online resources. - Use ``UriContent`` for online resources. - - Keyword Args: - data: The base64-encoded string data represented by this instance. - The data is used directly to construct a data URI. - media_type: The media type of the data. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - - def __init__( - self, - *, - uri: str | None = None, - data: bytes | str | None = None, - media_type: str | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a DataContent instance. - - Important: - This is for binary data that is represented as a data URI, not for online resources. - Use ``UriContent`` for online resources. - - Keyword Args: - uri: The URI of the data represented by this instance. - Should be in the form: "data:{media_type};base64,{base64_data}". - data: The binary data or base64-encoded string represented by this instance. - If bytes, the data is transformed into a base64-encoded data URI. - If str, it is assumed to be already base64-encoded and used directly. - media_type: The media type of the data. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - if uri is None: - if data is None or media_type is None: - raise ValueError("Either 'data' and 'media_type' or 'uri' must be provided.") - - base64_data: str = base64.b64encode(data).decode("utf-8") if isinstance(data, bytes) else data - uri = f"data:{media_type};base64,{base64_data}" - - # Validate URI format and extract media type if not provided - validated_uri = self._validate_uri(uri) - if media_type is None: - match = URI_PATTERN.match(validated_uri) - if match: - media_type = match.group("media_type") - - super().__init__( - annotations=annotations, - additional_properties=additional_properties, - raw_representation=raw_representation, - **kwargs, - ) - self.uri = validated_uri - self.media_type = media_type - self.type: Literal["data"] = "data" - - @classmethod - def _validate_uri(cls, uri: str) -> str: - """Validates the URI format and extracts the media type. - - Minimal data URI parser based on RFC 2397: https://datatracker.ietf.org/doc/html/rfc2397. - """ - match = URI_PATTERN.match(uri) - if not match: - raise ValueError(f"Invalid data URI format: {uri}") - media_type = match.group("media_type") - if media_type not in KNOWN_MEDIA_TYPES: - raise ValueError(f"Unknown media type: {media_type}") - return uri - - def has_top_level_media_type(self, top_level_media_type: Literal["application", "audio", "image", "text"]) -> bool: - return _has_top_level_media_type(self.media_type, top_level_media_type) - - @staticmethod - def detect_image_format_from_base64(image_base64: str) -> str: - """Detect image format from base64 data by examining the binary header. - - Args: - image_base64: Base64 encoded image data - - Returns: - Image format as string (png, jpeg, webp, gif) with png as fallback - """ - try: - # Constants for image format detection - # ~75 bytes of binary data should be enough to detect most image formats - FORMAT_DETECTION_BASE64_CHARS = 100 - - # Decode a small portion to detect format - decoded_data = base64.b64decode(image_base64[:FORMAT_DETECTION_BASE64_CHARS]) - if decoded_data.startswith(b"\x89PNG"): - return "png" - if decoded_data.startswith(b"\xff\xd8\xff"): - return "jpeg" - if decoded_data.startswith(b"RIFF") and b"WEBP" in decoded_data[:12]: - return "webp" - if decoded_data.startswith(b"GIF87a") or decoded_data.startswith(b"GIF89a"): - return "gif" - return "png" # Default fallback - except Exception: - return "png" # Fallback if decoding fails - - @staticmethod - def create_data_uri_from_base64(image_base64: str) -> tuple[str, str]: - """Create a data URI and media type from base64 image data. - - Args: - image_base64: Base64 encoded image data - - Returns: - Tuple of (data_uri, media_type) - """ - format_type = DataContent.detect_image_format_from_base64(image_base64) - uri = f"data:image/{format_type};base64,{image_base64}" - media_type = f"image/{format_type}" - return uri, media_type - - def get_data_bytes_as_str(self) -> str: - """Extracts and returns the base64-encoded data from the data URI. - - Returns: - The binary data as str. - """ - match = URI_PATTERN.match(self.uri) - if not match: - raise ValueError(f"Invalid data URI format: {self.uri}") - return match.group("base64_data") - - def get_data_bytes(self) -> bytes: - """Extracts and returns the binary data from the data URI. - - Returns: - The binary data as bytes. - """ - base64_data = self.get_data_bytes_as_str() - return base64.b64decode(base64_data) - - -class UriContent(BaseContent): - """Represents a URI content. - - Important: - This is used for content that is identified by a URI, such as an image or a file. - For (binary) data URIs, use ``DataContent`` instead. - - Attributes: - uri: The URI of the content, e.g., 'https://example.com/image.png'. - media_type: The media type of the content, e.g., 'image/png', 'application/json', etc. - type: The type of content, which is always "uri" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - - from agent_framework import UriContent - - # Create URI content for an image - image_uri = UriContent( - uri="https://example.com/image.png", - media_type="image/png", - ) - - # Create URI content for a document - doc_uri = UriContent( - uri="https://example.com/document.pdf", - media_type="application/pdf", - ) - - # Check if it's an image - if image_uri.has_top_level_media_type("image"): - print("This is an image URI") - """ - - def __init__( - self, - uri: str, - media_type: str, - *, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a UriContent instance. - - Remarks: - This is used for content that is identified by a URI, such as an image or a file. - For (binary) data URIs, use `DataContent` instead. - - Args: - uri: The URI of the content. - media_type: The media type of the content. - - Keyword Args: - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( - annotations=annotations, - additional_properties=additional_properties, - raw_representation=raw_representation, - **kwargs, - ) - self.uri = uri - self.media_type = media_type - self.type: Literal["uri"] = "uri" - - def has_top_level_media_type(self, top_level_media_type: Literal["application", "audio", "image", "text"]) -> bool: - """Returns a boolean indicating if the media type has the specified top-level media type. - - Args: - top_level_media_type: The top-level media type to check for, allowed values: - "image", "text", "application", "audio". - - """ - return _has_top_level_media_type(self.media_type, top_level_media_type) - - -def _has_top_level_media_type( - media_type: str | None, top_level_media_type: Literal["application", "audio", "image", "text"] -) -> bool: - if media_type is None: - return False - - slash_index = media_type.find("/") - span = media_type[:slash_index] if slash_index >= 0 else media_type - span = span.strip() - return span.lower() == top_level_media_type.lower() - - -class ErrorContent(BaseContent): - """Represents an error. - - Remarks: - Typically used for non-fatal errors, where something went wrong as part of the operation, - but the operation was still able to continue. - - Attributes: - error_code: The error code associated with the error. - details: Additional details about the error. - message: The error message. - type: The type of content, which is always "error" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - - from agent_framework import ErrorContent - - # Create an error content - error = ErrorContent( - message="Failed to process request", - error_code="PROCESSING_ERROR", - details="The input format was invalid", - ) - print(str(error)) # "Error PROCESSING_ERROR: Failed to process request" - - # Error without code - simple_error = ErrorContent(message="Something went wrong") - print(str(simple_error)) # "Something went wrong" - """ - - def __init__( - self, - *, - message: str | None = None, - error_code: str | None = None, - details: str | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes an ErrorContent instance. - - Keyword Args: - message: The error message. - error_code: The error code associated with the error. - details: Additional details about the error. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( - annotations=annotations, - additional_properties=additional_properties, - raw_representation=raw_representation, - **kwargs, - ) - self.message = message - self.error_code = error_code - self.details = details - self.type: Literal["error"] = "error" - - def __str__(self) -> str: - """Returns a string representation of the error.""" - return f"Error {self.error_code}: {self.message}" if self.error_code else self.message or "Unknown error" - - -class FunctionCallContent(BaseContent): - """Represents a function call request. - - Attributes: - call_id: The function call identifier. - name: The name of the function requested. - arguments: The arguments requested to be provided to the function. - exception: Any exception that occurred while mapping the original function call data to this representation. - type: The type of content, which is always "function_call" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - - from agent_framework import FunctionCallContent - - # Create a function call - func_call = FunctionCallContent( - call_id="call_123", - name="get_weather", - arguments={"location": "Seattle", "unit": "celsius"}, - ) - - # Parse arguments - args = func_call.parse_arguments() - print(args["location"]) # "Seattle" - - # Create with string arguments (gradual completion) - func_call_partial_1 = FunctionCallContent( - call_id="call_124", - name="search", - arguments='{"query": ', - ) - func_call_partial_2 = FunctionCallContent( - call_id="call_124", - name="search", - arguments='"latest news"}', - ) - full_call = func_call_partial_1 + func_call_partial_2 - args = full_call.parse_arguments() - print(args["query"]) # "latest news" - """ - - def __init__( - self, + @classmethod + def from_error( + cls: type[TContent], *, - call_id: str, - name: str, - arguments: str | dict[str, Any | None] | None = None, - exception: Exception | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a FunctionCallContent instance. - - Keyword Args: - call_id: The function call identifier. - name: The name of the function requested. - arguments: The arguments requested to be provided to the function, - can be a string to allow gradual completion of the args. - exception: Any exception that occurred while mapping the original function call data to this representation. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( + message: str | None = None, + error_code: str | None = None, + error_details: str | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create error content.""" + return cls( + "error", + message=message, + error_code=error_code, + error_details=error_details, annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.call_id = call_id - self.name = name - self.arguments = arguments - self.exception = exception - self.type: Literal["function_call"] = "function_call" - - def parse_arguments(self) -> dict[str, Any | None] | None: - """Parse the arguments into a dictionary. - - If they cannot be parsed as json or if the resulting json is not a dict, - they are returned as a dictionary with a single key "raw". - """ - if isinstance(self.arguments, str): - # If arguments are a string, try to parse it as JSON - try: - loaded = json.loads(self.arguments) - if isinstance(loaded, dict): - return loaded # type:ignore - return {"raw": loaded} - except (json.JSONDecodeError, TypeError): - return {"raw": self.arguments} - return self.arguments - def __add__(self, other: "FunctionCallContent") -> "FunctionCallContent": - if not isinstance(other, FunctionCallContent): - raise TypeError("Incompatible type") - if other.call_id and self.call_id != other.call_id: - raise AdditionItemMismatch("", log_level=None) - if not self.arguments: - arguments = other.arguments - elif not other.arguments: - arguments = self.arguments - elif isinstance(self.arguments, str) and isinstance(other.arguments, str): - arguments = self.arguments + other.arguments - elif isinstance(self.arguments, dict) and isinstance(other.arguments, dict): - arguments = {**self.arguments, **other.arguments} - else: - raise TypeError("Incompatible argument types") - return FunctionCallContent( - call_id=self.call_id, - name=self.name, + @classmethod + def from_function_call( + cls: type[TContent], + call_id: str, + name: str, + *, + arguments: str | Mapping[str, Any] | None = None, + exception: str | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create function call content.""" + return cls( + "function_call", + call_id=call_id, + name=name, arguments=arguments, - exception=self.exception or other.exception, - additional_properties={**(self.additional_properties or {}), **(other.additional_properties or {})}, - raw_representation=self.raw_representation or other.raw_representation, + exception=exception, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, ) - -class FunctionResultContent(BaseContent): - """Represents the result of a function call. - - Attributes: - call_id: The identifier of the function call for which this is the result. - result: The result of the function call, or a generic error message if the function call failed. - exception: An exception that occurred if the function call failed. - type: The type of content, which is always "function_result" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - - from agent_framework import FunctionResultContent - - # Create a successful function result - result = FunctionResultContent( - call_id="call_123", - result={"temperature": 22, "condition": "sunny"}, - ) - - # Create a failed function result - failed_result = FunctionResultContent( - call_id="call_124", - result="Function execution failed", - exception=ValueError("Invalid location"), - ) - """ - - def __init__( - self, - *, + @classmethod + def from_function_result( + cls: type[TContent], call_id: str, - result: Any | None = None, - exception: Exception | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a FunctionResultContent instance. - - Keyword Args: - call_id: The identifier of the function call for which this is the result. - result: The result of the function call, or a generic error message if the function call failed. - exception: An exception that occurred if the function call failed. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( + *, + result: Any = None, + exception: str | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create function result content.""" + return cls( + "function_result", + call_id=call_id, + result=result, + exception=exception, annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.call_id = call_id - self.result = result - self.exception = exception - self.type: Literal["function_result"] = "function_result" - - -class UsageContent(BaseContent): - """Represents usage information associated with a chat request and response. - - Attributes: - details: The usage information, including input and output token counts, and any additional counts. - type: The type of content, which is always "usage" for this class. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - Examples: - .. code-block:: python - - from agent_framework import UsageContent, UsageDetails - - # Create usage content - usage = UsageContent( - details=UsageDetails( - input_token_count=100, - output_token_count=50, - total_token_count=150, - ), - ) - print(usage.details.total_token_count) # 150 - """ - - def __init__( - self, - details: UsageDetails | MutableMapping[str, Any], + @classmethod + def from_usage( + cls: type[TContent], + usage_details: UsageDetails, *, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a UsageContent instance.""" - super().__init__( + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create usage content.""" + return cls( + "usage", + usage_details=usage_details, annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - # Convert dict to UsageDetails if needed - if isinstance(details, MutableMapping): - details = UsageDetails.from_dict(details) - self.details = details - self.type: Literal["usage"] = "usage" - -class HostedFileContent(BaseContent): - """Represents a hosted file content. - - Attributes: - file_id: The identifier of the hosted file. - type: The type of content, which is always "hosted_file" for this class. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - - from agent_framework import HostedFileContent - - # Create hosted file content - file_content = HostedFileContent(file_id="file-abc123") - print(file_content.file_id) # "file-abc123" - """ - - def __init__( - self, + @classmethod + def from_hosted_file( + cls: type[TContent], file_id: str, *, media_type: str | None = None, name: str | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a HostedFileContent instance. - - Args: - file_id: The identifier of the hosted file. - media_type: Optional media type of the hosted file. - name: Optional display name of the hosted file. - - Keyword Args: - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create hosted file content.""" + return cls( + "hosted_file", + file_id=file_id, + media_type=media_type, + name=name, + annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.file_id = file_id - self.media_type = media_type - self.name = name - self.type: Literal["hosted_file"] = "hosted_file" - - def has_top_level_media_type(self, top_level_media_type: Literal["application", "audio", "image", "text"]) -> bool: - """Returns a boolean indicating if the media type has the specified top-level media type.""" - return _has_top_level_media_type(self.media_type, top_level_media_type) - - -class HostedVectorStoreContent(BaseContent): - """Represents a hosted vector store content. - - Attributes: - vector_store_id: The identifier of the hosted vector store. - type: The type of content, which is always "hosted_vector_store" for this class. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - - Examples: - .. code-block:: python - from agent_framework import HostedVectorStoreContent - - # Create hosted vector store content - vs_content = HostedVectorStoreContent(vector_store_id="vs-xyz789") - print(vs_content.vector_store_id) # "vs-xyz789" - """ - - def __init__( - self, + @classmethod + def from_hosted_vector_store( + cls: type[TContent], vector_store_id: str, *, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a HostedVectorStoreContent instance. - - Args: - vector_store_id: The identifier of the hosted vector store. - - Keyword Args: - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - super().__init__( + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create hosted vector store content.""" + return cls( + "hosted_vector_store", + vector_store_id=vector_store_id, + annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.vector_store_id = vector_store_id - self.type: Literal["hosted_vector_store"] = "hosted_vector_store" - -class CodeInterpreterToolCallContent(BaseContent): - """Represents a code interpreter tool call invocation by a hosted service.""" - - def __init__( - self, + @classmethod + def from_code_interpreter_tool_call( + cls: type[TContent], *, call_id: str | None = None, - inputs: Sequence["Contents | MutableMapping[str, Any]"] | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - super().__init__( + inputs: Sequence["Content"] | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create code interpreter tool call content.""" + return cls( + "code_interpreter_tool_call", + call_id=call_id, + inputs=list(inputs) if inputs is not None else None, annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.call_id = call_id - self.inputs: list["Contents"] | None = None - if inputs: - normalized_inputs: Sequence["Contents | MutableMapping[str, Any]"] = ( - inputs - if isinstance(inputs, Sequence) and not isinstance(inputs, (str, bytes, MutableMapping)) - else [inputs] - ) - self.inputs = _parse_content_list(list(normalized_inputs)) - self.type: Literal["code_interpreter_tool_call"] = "code_interpreter_tool_call" - -class CodeInterpreterToolResultContent(BaseContent): - """Represents the result of a code interpreter tool invocation by a hosted service.""" - - def __init__( - self, + @classmethod + def from_code_interpreter_tool_result( + cls: type[TContent], *, call_id: str | None = None, - outputs: Sequence["Contents | MutableMapping[str, Any]"] | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - super().__init__( + outputs: Sequence["Content"] | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create code interpreter tool result content.""" + return cls( + "code_interpreter_tool_result", + call_id=call_id, + outputs=list(outputs) if outputs is not None else None, annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.call_id = call_id - self.outputs: list["Contents"] | None = None - if outputs: - normalized_outputs: Sequence["Contents | MutableMapping[str, Any]"] = ( - outputs - if isinstance(outputs, Sequence) and not isinstance(outputs, (str, bytes, MutableMapping)) - else [outputs] - ) - self.outputs = _parse_content_list(list(normalized_outputs)) - self.type: Literal["code_interpreter_tool_result"] = "code_interpreter_tool_result" - -class ImageGenerationToolCallContent(BaseContent): - """Represents the invocation of an image generation tool call by a hosted service.""" + @classmethod + def from_image_generation_tool_call( + cls: type[TContent], + *, + image_id: str | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create image generation tool call content.""" + return cls( + "image_generation_tool_call", + image_id=image_id, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) - def __init__( - self, + @classmethod + def from_image_generation_tool_result( + cls: type[TContent], *, image_id: str | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes an ImageGenerationToolCallContent instance. + outputs: Any = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create image generation tool result content.""" + return cls( + "image_generation_tool_result", + image_id=image_id, + outputs=outputs, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) - Keyword Args: - image_id: The identifier of the image to be generated. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. + @classmethod + def from_mcp_server_tool_call( + cls: type[TContent], + call_id: str, + tool_name: str, + *, + server_name: str | None = None, + arguments: str | Mapping[str, Any] | None = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create MCP server tool call content.""" + return cls( + "mcp_server_tool_call", + call_id=call_id, + tool_name=tool_name, + server_name=server_name, + arguments=arguments, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) - """ - super().__init__( + @classmethod + def from_mcp_server_tool_result( + cls: type[TContent], + call_id: str, + *, + output: Any = None, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create MCP server tool result content.""" + return cls( + "mcp_server_tool_result", + call_id=call_id, + output=output, annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, ) - self.image_id = image_id - self.type: Literal["image_generation_tool_call"] = "image_generation_tool_call" + @classmethod + def from_function_approval_request( + cls: type[TContent], + id: str, + function_call: "Content", + *, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create function approval request content.""" + return cls( + "function_approval_request", + id=id, + function_call=function_call, + user_input_request=True, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) -class ImageGenerationToolResultContent(BaseContent): - """Represents the result of an image generation tool call invocation by a hosted service.""" + @classmethod + def from_function_approval_response( + cls: type[TContent], + approved: bool, + id: str, + function_call: "Content", + *, + annotations: Sequence[Annotation] | None = None, + additional_properties: MutableMapping[str, Any] | None = None, + raw_representation: Any = None, + ) -> TContent: + """Create function approval response content.""" + return cls( + "function_approval_response", + approved=approved, + id=id, + function_call=function_call, + annotations=annotations, + additional_properties=additional_properties, + raw_representation=raw_representation, + ) - def __init__( + def to_function_approval_response( self, - *, - image_id: str | None = None, - outputs: DataContent | UriContent | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes an ImageGenerationToolResultContent instance. + approved: bool, + ) -> "Content": + """Convert a function approval request content to a function approval response content.""" + if self.type != "function_approval_request": + raise ContentError( + "Can only convert 'function_approval_request' content to 'function_approval_response' content." + ) + return Content.from_function_approval_response( + approved=approved, + id=self.id, # type: ignore[attr-defined, arg-type] + function_call=self.function_call, # type: ignore[attr-defined, arg-type] + annotations=self.annotations, + additional_properties=self.additional_properties, + raw_representation=self.raw_representation, + ) + + def to_dict(self, *, exclude_none: bool = True, exclude: set[str] | None = None) -> dict[str, Any]: + """Serialize the content to a dictionary.""" + fields_to_capture = ( + "text", + "protected_data", + "uri", + "media_type", + "message", + "error_code", + "error_details", + "usage_details", + "call_id", + "name", + "arguments", + "exception", + "result", + "file_id", + "vector_store_id", + "inputs", + "outputs", + "image_id", + "tool_name", + "server_name", + "output", + "function_call", + "user_input_request", + "approved", + "id", + "additional_properties", + ) + + exclude = exclude or set() + result: dict[str, Any] = {"type": self.type} + + for field in fields_to_capture: + value = getattr(self, field, None) + if field in exclude: + continue + if exclude_none and value is None: + continue + result[field] = _serialize_value(value, exclude_none) + + if "annotations" not in exclude and self.annotations is not None: + result["annotations"] = [dict(annotation) for annotation in self.annotations] + + return result - Keyword Args: - image_id: The identifier of the generated image. - outputs: The outputs of the image generation tool call. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. + def __eq__(self, other: object) -> bool: + """Check if two Content instances are equal by comparing their dict representations.""" + if not isinstance(other, Content): + return False + return self.to_dict(exclude_none=False) == other.to_dict(exclude_none=False) - """ - super().__init__( + def __str__(self) -> str: + """Return a string representation of the Content.""" + if self.type == "error": + if self.error_code: + return f"Error {self.error_code}: {self.message or ''}" + return self.message or "Unknown error" + if self.type == "text": + return self.text or "" + return f"Content(type={self.type})" + + @classmethod + def from_dict(cls: type[TContent], data: Mapping[str, Any]) -> TContent: + """Create a Content instance from a mapping.""" + if not (content_type := data.get("type")): + raise ValueError("Content mapping requires 'type'") + remaining = dict(data) + remaining.pop("type", None) + annotations = remaining.pop("annotations", None) + additional_properties = remaining.pop("additional_properties", None) + raw_representation = remaining.pop("raw_representation", None) + + # Special handling for DataContent with data and media_type + if content_type == "data" and "data" in remaining and "media_type" in remaining: + # Use from_data() to properly create the DataContent with URI + return cls.from_data(remaining["data"], remaining["media_type"]) + + # Handle nested Content objects (e.g., function_call in function_approval_request) + if "function_call" in remaining and isinstance(remaining["function_call"], dict): + remaining["function_call"] = cls.from_dict(remaining["function_call"]) + + # Handle list of Content objects (e.g., inputs in code_interpreter_tool_call) + if "inputs" in remaining and isinstance(remaining["inputs"], list): + remaining["inputs"] = [ + cls.from_dict(item) if isinstance(item, dict) else item for item in remaining["inputs"] + ] + + if "outputs" in remaining and isinstance(remaining["outputs"], list): + remaining["outputs"] = [ + cls.from_dict(item) if isinstance(item, dict) else item for item in remaining["outputs"] + ] + + return cls( + type=content_type, annotations=annotations, additional_properties=additional_properties, raw_representation=raw_representation, - **kwargs, + **remaining, ) - self.image_id = image_id - self.outputs: DataContent | UriContent | None = outputs - self.type: Literal["image_generation_tool_result"] = "image_generation_tool_result" - - -class MCPServerToolCallContent(BaseContent): - """Represents a tool call request to a MCP server.""" - def __init__( - self, - call_id: str, - tool_name: str, - server_name: str | None = None, - *, - arguments: str | Mapping[str, Any] | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a MCPServerToolCallContent instance. + def __add__(self, other: "Content") -> "Content": + """Concatenate or merge two Content instances.""" + if not isinstance(other, Content): + raise TypeError(f"Incompatible type: Cannot add Content with {type(other).__name__}") + + if self.type != other.type: + raise TypeError(f"Cannot add Content of type '{self.type}' with type '{other.type}'") + + if self.type == "text": + return self._add_text_content(other) + if self.type == "text_reasoning": + return self._add_text_reasoning_content(other) + if self.type == "function_call": + return self._add_function_call_content(other) + if self.type == "usage": + return self._add_usage_content(other) + raise ContentError(f"Addition not supported for content type: {self.type}") + + def _add_text_content(self, other: "Content") -> "Content": + """Add two TextContent instances.""" + # Merge raw representations + if self.raw_representation is None: + raw_representation = other.raw_representation + elif other.raw_representation is None: + raw_representation = self.raw_representation + else: + raw_representation = ( + self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] + ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) - Args: - call_id: The tool call identifier. - tool_name: The name of the tool requested. - server_name: The name of the MCP server where the tool is hosted. + # Merge annotations + if self.annotations is None: + annotations = other.annotations + elif other.annotations is None: + annotations = self.annotations + else: + annotations = self.annotations + other.annotations # type: ignore[operator] - Keyword Args: - arguments: The arguments requested to be provided to the tool, - can be a string to allow gradual completion of the args. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - if not call_id: - raise ValueError("call_id must be a non-empty string.") - if not tool_name: - raise ValueError("tool_name must be a non-empty string.") - super().__init__( + return Content( + "text", + text=self.text + other.text, # type: ignore[attr-defined, operator] annotations=annotations, - additional_properties=additional_properties, + additional_properties={ + **(other.additional_properties or {}), + **(self.additional_properties or {}), + }, raw_representation=raw_representation, - **kwargs, ) - self.call_id = call_id - self.tool_name = tool_name - self.name = tool_name - self.server_name = server_name - self.arguments = arguments - self.type: Literal["mcp_server_tool_call"] = "mcp_server_tool_call" - - def parse_arguments(self) -> dict[str, Any] | None: - """Returns the parsed arguments for the MCP server tool call, if any.""" - if isinstance(self.arguments, str): - # If arguments are a string, try to parse it as JSON - try: - loaded = json.loads(self.arguments) - if isinstance(loaded, dict): - return loaded # type:ignore - return {"raw": loaded} - except (json.JSONDecodeError, TypeError): - return {"raw": self.arguments} - return cast(dict[str, Any] | None, self.arguments) + def _add_text_reasoning_content(self, other: "Content") -> "Content": + """Add two TextReasoningContent instances.""" + # Merge raw representations + if self.raw_representation is None: + raw_representation = other.raw_representation + elif other.raw_representation is None: + raw_representation = self.raw_representation + else: + raw_representation = ( + self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] + ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) -class MCPServerToolResultContent(BaseContent): - """Represents the result of a MCP server tool call.""" + # Merge annotations + if self.annotations is None: + annotations = other.annotations + elif other.annotations is None: + annotations = self.annotations + else: + annotations = self.annotations + other.annotations # type: ignore[operator] - def __init__( - self, - call_id: str, - *, - output: Any | None = None, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a MCPServerToolResultContent instance. + # Concatenate text, handling None values + self_text = self.text or "" # type: ignore[attr-defined] + other_text = other.text or "" # type: ignore[attr-defined] + combined_text = self_text + other_text if (self_text or other_text) else None - Args: - call_id: The identifier of the tool call for which this is the result. + # Handle protected_data replacement + protected_data = other.protected_data if other.protected_data is not None else self.protected_data # type: ignore[attr-defined] - Keyword Args: - output: The output of the MCP server tool call. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - if not call_id: - raise ValueError("call_id must be a non-empty string.") - super().__init__( + return Content( + "text_reasoning", + text=combined_text, + protected_data=protected_data, annotations=annotations, - additional_properties=additional_properties, + additional_properties={ + **(other.additional_properties or {}), + **(self.additional_properties or {}), + }, raw_representation=raw_representation, - **kwargs, ) - self.call_id = call_id - self.output: Any | None = output - self.type: Literal["mcp_server_tool_result"] = "mcp_server_tool_result" + def _add_function_call_content(self, other: "Content") -> "Content": + """Add two FunctionCallContent instances.""" + other_call_id = getattr(other, "call_id", None) + self_call_id = getattr(self, "call_id", None) + if other_call_id and self_call_id != other_call_id: + raise ContentError("Cannot add function calls with different call_ids") + + self_arguments = getattr(self, "arguments", None) + other_arguments = getattr(other, "arguments", None) + + if not self_arguments: + arguments: str | Mapping[str, Any] | None = other_arguments + elif not other_arguments: + arguments = self_arguments + elif isinstance(self_arguments, str) and isinstance(other_arguments, str): + arguments = self_arguments + other_arguments + elif isinstance(self_arguments, dict) and isinstance(other_arguments, dict): + arguments = {**self_arguments, **other_arguments} + else: + raise TypeError("Incompatible argument types") + + # Merge raw representations + if self.raw_representation is None: + raw_representation: Any = other.raw_representation + elif other.raw_representation is None: + raw_representation = self.raw_representation + else: + raw_representation = ( + self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] + ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) -class BaseUserInputRequest(BaseContent): - """Base class for all user requests.""" + return Content( + "function_call", + call_id=self_call_id, + name=getattr(self, "name", getattr(other, "name", None)), + arguments=arguments, + exception=getattr(self, "exception", None) or getattr(other, "exception", None), + additional_properties={ + **(self.additional_properties or {}), + **(other.additional_properties or {}), + }, + raw_representation=raw_representation, + ) - def __init__( - self, - *, - id: str, - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initialize BaseUserInputRequest. + def _add_usage_content(self, other: "Content") -> "Content": + """Add two UsageContent instances by combining their usage details.""" + self_details = getattr(self, "usage_details", {}) + other_details = getattr(other, "usage_details", {}) + + # Combine token counts + combined_details: dict[str, Any] = {} + for key in set(list(self_details.keys()) + list(other_details.keys())): + self_val = self_details.get(key) + other_val = other_details.get(key) + if isinstance(self_val, int) and isinstance(other_val, int): + combined_details[key] = self_val + other_val + elif self_val is not None: + combined_details[key] = self_val + elif other_val is not None: + combined_details[key] = other_val - Keyword Args: - id: The unique identifier for the request. - annotations: Optional annotations associated with the content. - additional_properties: Optional additional properties associated with the content. - raw_representation: Optional raw representation of the content. - **kwargs: Any additional keyword arguments. - """ - if not id or len(id) < 1: - raise ValueError("id must be at least 1 character long") - super().__init__( - annotations=annotations, - additional_properties=additional_properties, + # Merge raw representations + if self.raw_representation is None: + raw_representation = other.raw_representation + elif other.raw_representation is None: + raw_representation = self.raw_representation + else: + raw_representation = ( + self.raw_representation if isinstance(self.raw_representation, list) else [self.raw_representation] + ) + (other.raw_representation if isinstance(other.raw_representation, list) else [other.raw_representation]) + + return Content( + "usage", + usage_details=combined_details, + additional_properties={ + **(self.additional_properties or {}), + **(other.additional_properties or {}), + }, raw_representation=raw_representation, - **kwargs, ) - self.id = id - self.type: Literal["user_input_request"] = "user_input_request" + def has_top_level_media_type(self, top_level_media_type: Literal["application", "audio", "image", "text"]) -> bool: + """Check if content has a specific top-level media type. -class FunctionApprovalResponseContent(BaseContent): - """Represents a response for user approval of a function call. + Works with data, uri, and hosted_file content types. - Examples: - .. code-block:: python + Args: + top_level_media_type: The top-level media type to check for. - from agent_framework import FunctionApprovalResponseContent, FunctionCallContent + Returns: + True if the content's media type matches the specified top-level type. - # Create a function approval response - func_call = FunctionCallContent( - call_id="call_123", - name="send_email", - arguments={"to": "user@example.com"}, - ) - response = FunctionApprovalResponseContent( - approved=False, - id="approval_001", - function_call=func_call, - ) - print(response.approved) # False - """ + Raises: + ContentError: If the content type doesn't support media types. - def __init__( - self, - approved: bool, - *, - id: str, - function_call: FunctionCallContent | MCPServerToolCallContent | MutableMapping[str, Any], - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a FunctionApprovalResponseContent instance. + Examples: + .. code-block:: python - Args: - approved: Whether the function call was approved. + from agent_framework import Content - Keyword Args: - id: The unique identifier for the request. - function_call: The function call content to be approved. Can be a FunctionCallContent object or dict. - annotations: Optional list of annotations for the request. - additional_properties: Optional additional properties for the request. - raw_representation: Optional raw representation of the request. - **kwargs: Additional keyword arguments. + image = Content.from_uri(uri="data:image/png;base64,abc123", media_type="image/png") + print(image.has_top_level_media_type("image")) # True + print(image.has_top_level_media_type("audio")) # False """ - super().__init__( - annotations=annotations, - additional_properties=additional_properties, - raw_representation=raw_representation, - **kwargs, - ) - self.id = id - self.approved = approved - # Convert dict to FunctionCallContent if needed (for SerializationMixin support) - self.function_call: FunctionCallContent | MCPServerToolCallContent - if isinstance(function_call, MutableMapping): - if function_call.get("type") == "mcp_server_tool_call": - self.function_call = MCPServerToolCallContent.from_dict(function_call) - else: - self.function_call = FunctionCallContent.from_dict(function_call) - else: - self.function_call = function_call - # Override the type for this specific subclass - self.type: Literal["function_approval_response"] = "function_approval_response" + if self.media_type is None: + raise ContentError("no media_type found") + slash_index = self.media_type.find("/") + span = self.media_type[:slash_index] if slash_index >= 0 else self.media_type + span = span.strip() + return span.lower() == top_level_media_type.lower() -class FunctionApprovalRequestContent(BaseContent): - """Represents a request for user approval of a function call. + def parse_arguments(self) -> dict[str, Any | None] | None: + """Parse arguments from function_call or mcp_server_tool_call content. - Examples: - .. code-block:: python + If arguments cannot be parsed as JSON or the result is not a dict, + they are returned as a dictionary with a single key "raw". - from agent_framework import FunctionApprovalRequestContent, FunctionCallContent + Returns: + Parsed arguments as a dictionary, or None if no arguments. - # Create a function approval request - func_call = FunctionCallContent( - call_id="call_123", - name="send_email", - arguments={"to": "user@example.com", "subject": "Hello"}, - ) - approval_request = FunctionApprovalRequestContent( - id="approval_001", - function_call=func_call, - ) + Raises: + ContentError: If the content type doesn't support arguments. - # Create response - approval_response = approval_request.create_response(approved=True) - print(approval_response.approved) # True - """ + Examples: + .. code-block:: python - def __init__( - self, - *, - id: str, - function_call: FunctionCallContent | MutableMapping[str, Any], - annotations: Sequence[Annotations | MutableMapping[str, Any]] | None = None, - additional_properties: dict[str, Any] | None = None, - raw_representation: Any | None = None, - **kwargs: Any, - ) -> None: - """Initializes a FunctionApprovalRequestContent instance. + from agent_framework import Content - Keyword Args: - id: The unique identifier for the request. - function_call: The function call content to be approved. Can be a FunctionCallContent object or dict. - annotations: Optional list of annotations for the request. - additional_properties: Optional additional properties for the request. - raw_representation: Optional raw representation of the request. - **kwargs: Additional keyword arguments. + func_call = Content.from_function_call( + call_id="call_123", + name="send_email", + arguments='{"to": "user@example.com"}', + ) + args = func_call.parse_arguments() + print(args) # {"to": "user@example.com"} """ - super().__init__( - annotations=annotations, - additional_properties=additional_properties, - raw_representation=raw_representation, - **kwargs, - ) - self.id = id - self.function_call: FunctionCallContent - # Convert dict to FunctionCallContent if needed (for SerializationMixin support) - if isinstance(function_call, MutableMapping): - self.function_call = FunctionCallContent.from_dict(function_call) - else: - self.function_call = function_call - # Override the type for this specific subclass - self.type: Literal["function_approval_request"] = "function_approval_request" - - def create_response(self, approved: bool) -> "FunctionApprovalResponseContent": - """Create a response for the function approval request.""" - return FunctionApprovalResponseContent( - approved, - id=self.id, - function_call=self.function_call, - additional_properties=self.additional_properties, - ) + if self.arguments is None: + return None + if not self.arguments: + return {} -UserInputRequestContents = FunctionApprovalRequestContent - -Contents = ( - TextContent - | DataContent - | TextReasoningContent - | UriContent - | FunctionCallContent - | FunctionResultContent - | ErrorContent - | UsageContent - | HostedFileContent - | HostedVectorStoreContent - | CodeInterpreterToolCallContent - | CodeInterpreterToolResultContent - | ImageGenerationToolCallContent - | ImageGenerationToolResultContent - | MCPServerToolCallContent - | MCPServerToolResultContent - | FunctionApprovalRequestContent - | FunctionApprovalResponseContent -) + if isinstance(self.arguments, str): + # If arguments are a string, try to parse it as JSON + try: + loaded = json.loads(self.arguments) + if isinstance(loaded, dict): + return loaded # type: ignore[return-value] + return {"raw": loaded} + except (json.JSONDecodeError, TypeError): + return {"raw": self.arguments} + return self.arguments # type: ignore[return-value] + + +# endregion -def _prepare_function_call_results_as_dumpable(content: Contents | Any | list[Contents | Any]) -> Any: +def _prepare_function_call_results_as_dumpable(content: "Content | Any | list[Content | Any]") -> Any: if isinstance(content, list): # Particularly deal with lists of Content return [_prepare_function_call_results_as_dumpable(item) for item in content] @@ -2149,9 +1393,9 @@ def _prepare_function_call_results_as_dumpable(content: Contents | Any | list[Co return content -def prepare_function_call_results(content: Contents | Any | list[Contents | Any]) -> str: +def prepare_function_call_results(content: "Content | Any | list[Content | Any]") -> str: """Prepare the values of the function call results.""" - if isinstance(content, Contents): + if isinstance(content, Content): # For BaseContent objects, use to_dict and serialize to JSON # Use default=str to handle datetime and other non-JSON-serializable objects return json.dumps(content.to_dict(exclude={"raw_representation", "additional_properties"}), default=str) @@ -2331,7 +1575,7 @@ class ChatMessage(SerializationMixin): # Create a message with contents assistant_msg = ChatMessage( role="assistant", - contents=[TextContent(text="The weather is sunny!")], + contents=[Content.from_text(text="The weather is sunny!")], ) print(assistant_msg.text) # "The weather is sunny!" @@ -2384,7 +1628,7 @@ def __init__( self, role: Role | Literal["system", "user", "assistant", "tool"], *, - contents: Sequence[Contents | Mapping[str, Any]], + contents: "Sequence[Content | Mapping[str, Any]]", author_name: str | None = None, message_id: str | None = None, additional_properties: MutableMapping[str, Any] | None = None, @@ -2411,7 +1655,7 @@ def __init__( role: Role | Literal["system", "user", "assistant", "tool"] | dict[str, Any], *, text: str | None = None, - contents: Sequence[Contents | Mapping[str, Any]] | None = None, + contents: "Sequence[Content | Mapping[str, Any]] | None" = None, author_name: str | None = None, message_id: str | None = None, additional_properties: MutableMapping[str, Any] | None = None, @@ -2443,7 +1687,7 @@ def __init__( parsed_contents = [] if contents is None else _parse_content_list(contents) if text is not None: - parsed_contents.append(TextContent(text=text)) + parsed_contents.append(Content.from_text(text=text)) self.role = role self.contents = parsed_contents @@ -2458,9 +1702,9 @@ def text(self) -> str: """Returns the text content of the message. Remarks: - This property concatenates the text of all TextContent objects in Contents. + This property concatenates the text of all TextContent objects in Content. """ - return " ".join(content.text for content in self.contents if isinstance(content, TextContent)) + return " ".join(content.text for content in self.contents if content.type == "text") # type: ignore[misc] def prepare_messages( @@ -2495,6 +1739,22 @@ def prepare_messages( return return_messages +def normalize_messages( + messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, +) -> list[ChatMessage]: + """Normalize message inputs to a list of ChatMessage objects.""" + if messages is None: + return [] + + if isinstance(messages, str): + return [ChatMessage(role=Role.USER, text=messages)] + + if isinstance(messages, ChatMessage): + return [messages] + + return [ChatMessage(role=Role.USER, text=msg) if isinstance(msg, str) else msg for msg in messages] + + def prepend_instructions_to_messages( messages: list[ChatMessage], instructions: str | Sequence[str] | None, @@ -2575,7 +1835,7 @@ def _process_update( # Slow path: only check for dict if type is None if content_type is None and isinstance(content, (dict, MutableMapping)): try: - content = _parse_content(content) + content = Content.from_dict(content) content_type = content.type except ContentError as exc: logger.warning(f"Skipping unknown content type or invalid content: {exc}") @@ -2585,13 +1845,13 @@ def _process_update( case "function_call" if message.contents and message.contents[-1].type == "function_call": try: message.contents[-1] += content # type: ignore[operator] - except AdditionItemMismatch: + except (AdditionItemMismatch, ContentError): message.contents.append(content) case "usage": if response.usage_details is None: response.usage_details = UsageDetails() # mypy doesn't narrow type based on match/case, but we know this is UsageContent - response.usage_details += content.details # type: ignore[union-attr, arg-type] + response.usage_details = add_usage_details(response.usage_details, content.usage_details) # type: ignore[arg-type] case _: message.contents.append(content) # Incorporate the update's properties into the response. @@ -2617,16 +1877,14 @@ def _process_update( response.model_id = update.model_id -def _coalesce_text_content( - contents: list["Contents"], type_: type["TextContent"] | type["TextReasoningContent"] -) -> None: +def _coalesce_text_content(contents: list["Content"], type_str: Literal["text", "text_reasoning"]) -> None: """Take any subsequence Text or TextReasoningContent items and coalesce them into a single item.""" if not contents: return - coalesced_contents: list["Contents"] = [] + coalesced_contents: list["Content"] = [] first_new_content: Any | None = None for content in contents: - if isinstance(content, type_): + if content.type == type_str: if first_new_content is None: first_new_content = deepcopy(content) else: @@ -2649,8 +1907,8 @@ def _coalesce_text_content( def _finalize_response(response: "ChatResponse | AgentResponse") -> None: """Finalizes the response by performing any necessary post-processing.""" for msg in response.messages: - _coalesce_text_content(msg.contents, TextContent) - _coalesce_text_content(msg.contents, TextReasoningContent) + _coalesce_text_content(msg.contents, "text") + _coalesce_text_content(msg.contents, "text_reasoning") class ChatResponse(SerializationMixin): @@ -2744,7 +2002,7 @@ def __init__( def __init__( self, *, - text: TextContent | str, + text: Content | str, response_id: str | None = None, conversation_id: str | None = None, model_id: str | None = None, @@ -2779,7 +2037,7 @@ def __init__( self, *, messages: ChatMessage | MutableSequence[ChatMessage] | list[dict[str, Any]] | None = None, - text: TextContent | str | None = None, + text: Content | str | None = None, response_id: str | None = None, conversation_id: str | None = None, model_id: str | None = None, @@ -2826,16 +2084,15 @@ def __init__( if text is not None: if isinstance(text, str): - text = TextContent(text=text) + text = Content.from_text(text=text) messages.append(ChatMessage(role=Role.ASSISTANT, contents=[text])) # Handle finish_reason conversion if isinstance(finish_reason, dict): finish_reason = FinishReason.from_dict(finish_reason) - # Handle usage_details conversion - if isinstance(usage_details, dict): - usage_details = UsageDetails.from_dict(usage_details) + # Handle usage_details - UsageDetails is now a TypedDict, so dict is already the right type + # No conversion needed self.messages = list(messages) self.response_id = response_id @@ -2844,14 +2101,13 @@ def __init__( self.created_at = created_at self.finish_reason = finish_reason self.usage_details = usage_details - self.value = value + self._value: Any | None = value + self._response_format: type[BaseModel] | None = response_format + self._value_parsed: bool = value is not None self.additional_properties = additional_properties or {} self.additional_properties.update(kwargs or {}) self.raw_representation: Any | list[Any] | None = raw_representation - if response_format: - self.try_parse_value(output_format_type=response_format) - @classmethod def from_chat_response_updates( cls: type[TChatResponse], @@ -2916,12 +2172,13 @@ async def from_chat_response_generator( Keyword Args: output_format_type: Optional Pydantic model type to parse the response text into structured data. """ - msg = cls(messages=[]) + response_format = output_format_type if isinstance(output_format_type, type) else None + msg = cls(messages=[], response_format=response_format) async for update in updates: _process_update(msg, update) _finalize_response(msg) - if output_format_type and isinstance(output_format_type, type) and issubclass(output_format_type, BaseModel): - msg.try_parse_value(output_format_type) + if response_format and issubclass(response_format, BaseModel): + msg.try_parse_value(response_format) return msg @property @@ -2929,16 +2186,64 @@ def text(self) -> str: """Returns the concatenated text of all messages in the response.""" return ("\n".join(message.text for message in self.messages if isinstance(message, ChatMessage))).strip() + @property + def value(self) -> Any | None: + """Get the parsed structured output value. + + If a response_format was provided and parsing hasn't been attempted yet, + this will attempt to parse the text into the specified type. + + Raises: + ValidationError: If the response text doesn't match the expected schema. + """ + if self._value_parsed: + return self._value + if ( + self._response_format is not None + and isinstance(self._response_format, type) + and issubclass(self._response_format, BaseModel) + ): + self._value = self._response_format.model_validate_json(self.text) + self._value_parsed = True + return self._value + def __str__(self) -> str: return self.text - def try_parse_value(self, output_format_type: type[BaseModel]) -> None: - """If there is a value, does nothing, otherwise tries to parse the text into the value.""" - if self.value is None and isinstance(output_format_type, type) and issubclass(output_format_type, BaseModel): - try: - self.value = output_format_type.model_validate_json(self.text) # type: ignore[reportUnknownMemberType] - except ValidationError as ex: - logger.debug("Failed to parse value from chat response text: %s", ex) + def try_parse_value(self, output_format_type: type[_T] | None = None) -> _T | None: + """Try to parse the text into a typed value. + + This is the safe alternative to accessing the value property directly. + Returns the parsed value on success, or None on failure. + + Args: + output_format_type: The Pydantic model type to parse into. + If None, uses the response_format from initialization. + + Returns: + The parsed value as the specified type, or None if parsing fails. + """ + format_type = output_format_type or self._response_format + if format_type is None or not (isinstance(format_type, type) and issubclass(format_type, BaseModel)): + return None + + # Cache the result unless a different schema than the configured response_format is requested. + # This prevents calls with a different schema from polluting the cached value. + use_cache = ( + self._response_format is None or output_format_type is None or output_format_type is self._response_format + ) + + if use_cache and self._value_parsed and self._value is not None: + return self._value # type: ignore[return-value, no-any-return] + try: + parsed_value = format_type.model_validate_json(self.text) # type: ignore[reportUnknownMemberType] + if use_cache: + self._value = parsed_value + self._value_parsed = True + return parsed_value # type: ignore[return-value] + except ValidationError as ex: + logger.warning("Failed to parse value from chat response text: %s", ex) + return None # region ChatResponseUpdate @@ -2967,7 +2272,7 @@ class ChatResponseUpdate(SerializationMixin): # Create a response update update = ChatResponseUpdate( - contents=[TextContent(text="Hello")], + contents=[Content.from_text(text="Hello")], role="assistant", message_id="msg_123", ) @@ -2996,8 +2301,8 @@ class ChatResponseUpdate(SerializationMixin): def __init__( self, *, - contents: Sequence[Contents | dict[str, Any]] | None = None, - text: TextContent | str | None = None, + contents: Sequence[Content | dict[str, Any]] | None = None, + text: Content | str | None = None, role: Role | Literal["system", "user", "assistant", "tool"] | dict[str, Any] | None = None, author_name: str | None = None, response_id: str | None = None, @@ -3034,7 +2339,7 @@ def __init__( if text is not None: if isinstance(text, str): - text = TextContent(text=text) + text = Content.from_text(text=text) contents.append(text) # Handle role conversion @@ -3062,7 +2367,7 @@ def __init__( @property def text(self) -> str: """Returns the concatenated text of all contents in the update.""" - return "".join(content.text for content in self.contents if isinstance(content, TextContent)) + return "".join(content.text for content in self.contents if content.type == "text") # type: ignore[misc] def __str__(self) -> str: return self.text @@ -3124,6 +2429,7 @@ def __init__( created_at: CreatedAtT | None = None, usage_details: UsageDetails | MutableMapping[str, Any] | None = None, value: Any | None = None, + response_format: type[BaseModel] | None = None, raw_representation: Any | None = None, additional_properties: dict[str, Any] | None = None, **kwargs: Any, @@ -3136,6 +2442,7 @@ def __init__( created_at: A timestamp for the chat response. usage_details: The usage details for the chat response. value: The structured output of the agent run response, if applicable. + response_format: Optional response format for the agent response. additional_properties: Any additional properties associated with the chat response. raw_representation: The raw representation of the chat response from an underlying implementation. **kwargs: Additional properties to set on the response. @@ -3156,14 +2463,15 @@ def __init__( processed_messages.append(ChatMessage.from_dict(messages)) # Convert usage_details from dict if needed (for SerializationMixin support) - if isinstance(usage_details, MutableMapping): - usage_details = UsageDetails.from_dict(usage_details) + # UsageDetails is now a TypedDict, so dict is already the right type self.messages = processed_messages self.response_id = response_id self.created_at = created_at self.usage_details = usage_details - self.value = value + self._value: Any | None = value + self._response_format: type[BaseModel] | None = response_format + self._value_parsed: bool = value is not None self.additional_properties = additional_properties or {} self.additional_properties.update(kwargs or {}) self.raw_representation = raw_representation @@ -3174,13 +2482,34 @@ def text(self) -> str: return "".join(msg.text for msg in self.messages) if self.messages else "" @property - def user_input_requests(self) -> list[UserInputRequestContents]: + def value(self) -> Any | None: + """Get the parsed structured output value. + + If a response_format was provided and parsing hasn't been attempted yet, + this will attempt to parse the text into the specified type. + + Raises: + ValidationError: If the response text doesn't match the expected schema. + """ + if self._value_parsed: + return self._value + if ( + self._response_format is not None + and isinstance(self._response_format, type) + and issubclass(self._response_format, BaseModel) + ): + self._value = self._response_format.model_validate_json(self.text) + self._value_parsed = True + return self._value + + @property + def user_input_requests(self) -> list[Content]: """Get all BaseUserInputRequest messages from the response.""" return [ content for msg in self.messages for content in msg.contents - if isinstance(content, UserInputRequestContents) + if isinstance(content, Content) and content.user_input_request ] @classmethod @@ -3198,7 +2527,7 @@ def from_agent_run_response_updates( Keyword Args: output_format_type: Optional Pydantic model type to parse the response text into structured data. """ - msg = cls(messages=[]) + msg = cls(messages=[], response_format=output_format_type) for update in updates: _process_update(msg, update) _finalize_response(msg) @@ -3221,7 +2550,7 @@ async def from_agent_response_generator( Keyword Args: output_format_type: Optional Pydantic model type to parse the response text into structured data """ - msg = cls(messages=[]) + msg = cls(messages=[], response_format=output_format_type) async for update in updates: _process_update(msg, update) _finalize_response(msg) @@ -3232,13 +2561,40 @@ async def from_agent_response_generator( def __str__(self) -> str: return self.text - def try_parse_value(self, output_format_type: type[BaseModel]) -> None: - """If there is a value, does nothing, otherwise tries to parse the text into the value.""" - if self.value is None: - try: - self.value = output_format_type.model_validate_json(self.text) # type: ignore[reportUnknownMemberType] - except ValidationError as ex: - logger.debug("Failed to parse value from agent run response text: %s", ex) + def try_parse_value(self, output_format_type: type[_T] | None = None) -> _T | None: + """Try to parse the text into a typed value. + + This is the safe alternative when you need to parse the response text into a typed value. + Returns the parsed value on success, or None on failure. + + Args: + output_format_type: The Pydantic model type to parse into. + If None, uses the response_format from initialization. + + Returns: + The parsed value as the specified type, or None if parsing fails. + """ + format_type = output_format_type or self._response_format + if format_type is None or not (isinstance(format_type, type) and issubclass(format_type, BaseModel)): + return None + + # Cache the result unless a different schema than the configured response_format is requested. + # This prevents calls with a different schema from polluting the cached value. + use_cache = ( + self._response_format is None or output_format_type is None or output_format_type is self._response_format + ) + + if use_cache and self._value_parsed and self._value is not None: + return self._value # type: ignore[return-value, no-any-return] + try: + parsed_value = format_type.model_validate_json(self.text) # type: ignore[reportUnknownMemberType] + if use_cache: + self._value = parsed_value + self._value_parsed = True + return parsed_value # type: ignore[return-value] + except ValidationError as ex: + logger.warning("Failed to parse value from agent run response text: %s", ex) + return None # region AgentResponseUpdate @@ -3250,11 +2606,11 @@ class AgentResponseUpdate(SerializationMixin): Examples: .. code-block:: python - from agent_framework import AgentResponseUpdate, TextContent + from agent_framework import AgentResponseUpdate, Content # Create an agent run update update = AgentResponseUpdate( - contents=[TextContent(text="Processing...")], + contents=[Content.from_text(text="Processing...")], role="assistant", response_id="run_123", ) @@ -3282,8 +2638,8 @@ class AgentResponseUpdate(SerializationMixin): def __init__( self, *, - contents: Sequence[Contents | MutableMapping[str, Any]] | None = None, - text: TextContent | str | None = None, + contents: Sequence[Content | MutableMapping[str, Any]] | None = None, + text: Content | str | None = None, role: Role | MutableMapping[str, Any] | str | None = None, author_name: str | None = None, response_id: str | None = None, @@ -3308,11 +2664,11 @@ def __init__( kwargs: will be combined with additional_properties if provided. """ - parsed_contents: list[Contents] = [] if contents is None else _parse_content_list(contents) + parsed_contents: list[Content] = [] if contents is None else _parse_content_list(contents) if text is not None: if isinstance(text, str): - text = TextContent(text=text) + text = Content.from_text(text=text) parsed_contents.append(text) # Convert role from dict if needed (for SerializationMixin support) @@ -3333,16 +2689,12 @@ def __init__( @property def text(self) -> str: """Get the concatenated text of all TextContent objects in contents.""" - return ( - "".join(content.text for content in self.contents if isinstance(content, TextContent)) - if self.contents - else "" - ) + return "".join(content.text for content in self.contents if content.type == "text") if self.contents else "" # type: ignore[misc] @property - def user_input_requests(self) -> list[UserInputRequestContents]: + def user_input_requests(self) -> list[Content]: """Get all BaseUserInputRequest messages from the response.""" - return [content for content in self.contents if isinstance(content, UserInputRequestContents)] + return [content for content in self.contents if isinstance(content, Content) and content.user_input_request] def __str__(self) -> str: return self.text diff --git a/python/packages/core/agent_framework/_workflows/_agent.py b/python/packages/core/agent_framework/_workflows/_agent.py index cd768ffc4d..345e120c1f 100644 --- a/python/packages/core/agent_framework/_workflows/_agent.py +++ b/python/packages/core/agent_framework/_workflows/_agent.py @@ -13,18 +13,13 @@ AgentResponseUpdate, AgentThread, BaseAgent, - BaseContent, ChatMessage, - Contents, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, + Content, Role, - TextContent, UsageDetails, ) +from .._types import add_usage_details from ..exceptions import AgentExecutionException from ._agent_executor import AgentExecutor from ._checkpoint import CheckpointStorage @@ -357,12 +352,12 @@ def _convert_workflow_event_to_agent_update( args = self.RequestInfoFunctionArgs(request_id=request_id, data=event.data).to_dict() - function_call = FunctionCallContent( + function_call = Content.from_function_call( call_id=request_id, name=self.REQUEST_INFO_FUNCTION_NAME, arguments=args, ) - approval_request = FunctionApprovalRequestContent( + approval_request = Content.from_function_approval_request( id=request_id, function_call=function_call, additional_properties={"request_id": request_id}, @@ -385,9 +380,9 @@ def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict function_responses: dict[str, Any] = {} for message in input_messages: for content in message.contents: - if isinstance(content, FunctionApprovalResponseContent): + if content.type == "function_approval_response": # Parse the function arguments to recover request payload - arguments_payload = content.function_call.arguments + arguments_payload = content.function_call.arguments # type: ignore[attr-defined, union-attr] if isinstance(arguments_payload, str): try: parsed_args = self.RequestInfoFunctionArgs.from_json(arguments_payload) @@ -402,8 +397,8 @@ def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict "FunctionApprovalResponseContent arguments must be a mapping or JSON string." ) - request_id = parsed_args.request_id or content.id - if not content.approved: + request_id = parsed_args.request_id or content.id # type: ignore[attr-defined] + if not content.approved: # type: ignore[attr-defined] raise AgentExecutionException(f"Request '{request_id}' was not approved by the caller.") if request_id in self.pending_requests: @@ -412,10 +407,10 @@ def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict raise AgentExecutionException( "Only responses for pending requests are allowed when there are outstanding approvals." ) - elif isinstance(content, FunctionResultContent): - request_id = content.call_id + elif content.type == "function_result": + request_id = content.call_id # type: ignore[attr-defined] if request_id in self.pending_requests: - response_data = content.result if hasattr(content, "result") else str(content) + response_data = content.result if hasattr(content, "result") else str(content) # type: ignore[attr-defined] function_responses[request_id] = response_data elif bool(self.pending_requests): raise AgentExecutionException( @@ -426,17 +421,17 @@ def _extract_function_responses(self, input_messages: list[ChatMessage]) -> dict raise AgentExecutionException("Unexpected content type while awaiting request info responses.") return function_responses - def _extract_contents(self, data: Any) -> list[Contents]: - """Recursively extract Contents from workflow output data.""" + def _extract_contents(self, data: Any) -> list[Content]: + """Recursively extract Content from workflow output data.""" if isinstance(data, ChatMessage): return list(data.contents) if isinstance(data, list): return [c for item in data for c in self._extract_contents(item)] - if isinstance(data, BaseContent): - return [cast(Contents, data)] + if isinstance(data, Content): + return [data] # type: ignore[redundant-cast] if isinstance(data, str): - return [TextContent(text=data)] - return [TextContent(text=str(data))] + return [Content.from_text(text=data)] + return [Content.from_text(text=str(data))] class _ResponseState(TypedDict): """State for grouping response updates by message_id.""" @@ -468,7 +463,7 @@ def merge_updates(updates: list[AgentResponseUpdate], response_id: str) -> Agent for u in updates: if u.response_id: for content in u.contents: - if isinstance(content, FunctionCallContent) and content.call_id: + if content.type == "function_call" and content.call_id: call_id_to_response_id[content.call_id] = u.response_id # Second pass: group updates, associating FunctionResultContent with their calls @@ -480,7 +475,7 @@ def merge_updates(updates: list[AgentResponseUpdate], response_id: str) -> Agent # If no response_id, check if this is a FunctionResultContent that matches a call if not effective_response_id: for content in u.contents: - if isinstance(content, FunctionResultContent) and content.call_id: + if content.type == "function_result" and content.call_id: effective_response_id = call_id_to_response_id.get(content.call_id) if effective_response_id: break @@ -508,13 +503,6 @@ def _parse_dt(value: str | None) -> tuple[int, datetime | str | None]: except Exception: return (0, v) - def _sum_usage(a: UsageDetails | None, b: UsageDetails | None) -> UsageDetails | None: - if a is None: - return b - if b is None: - return a - return a + b - def _merge_responses(current: AgentResponse | None, incoming: AgentResponse) -> AgentResponse: if current is None: return incoming @@ -534,7 +522,7 @@ def _add_raw(value: object) -> None: messages=(current.messages or []) + (incoming.messages or []), response_id=current.response_id or incoming.response_id, created_at=incoming.created_at or current.created_at, - usage_details=_sum_usage(current.usage_details, incoming.usage_details), + usage_details=add_usage_details(current.usage_details, incoming.usage_details), # type: ignore[arg-type] raw_representation=raw_list if raw_list else None, additional_properties=incoming.additional_properties or current.additional_properties, ) @@ -569,7 +557,7 @@ def _add_raw(value: object) -> None: if aggregated: final_messages.extend(aggregated.messages) if aggregated.usage_details: - merged_usage = _sum_usage(merged_usage, aggregated.usage_details) + merged_usage = add_usage_details(merged_usage, aggregated.usage_details) # type: ignore[arg-type] if aggregated.created_at and ( not latest_created_at or _parse_dt(aggregated.created_at) > _parse_dt(latest_created_at) ): @@ -593,7 +581,7 @@ def _add_raw(value: object) -> None: flattened = AgentResponse.from_agent_run_response_updates(global_dangling) final_messages.extend(flattened.messages) if flattened.usage_details: - merged_usage = _sum_usage(merged_usage, flattened.usage_details) + merged_usage = add_usage_details(merged_usage, flattened.usage_details) # type: ignore[arg-type] if flattened.created_at and ( not latest_created_at or _parse_dt(flattened.created_at) > _parse_dt(latest_created_at) ): diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index bcd47caca2..9beaf06a65 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -5,7 +5,7 @@ from dataclasses import dataclass from typing import Any, cast -from agent_framework import FunctionApprovalRequestContent, FunctionApprovalResponseContent +from agent_framework import Content from .._agents import AgentProtocol, ChatAgent from .._threads import AgentThread @@ -95,8 +95,8 @@ def __init__( super().__init__(exec_id) self._agent = agent self._agent_thread = agent_thread or self._agent.get_new_thread() - self._pending_agent_requests: dict[str, FunctionApprovalRequestContent] = {} - self._pending_responses_to_agent: list[FunctionApprovalResponseContent] = [] + self._pending_agent_requests: dict[str, Content] = {} + self._pending_responses_to_agent: list[Content] = [] self._output_response = output_response # AgentExecutor maintains an internal cache of messages in between runs @@ -179,8 +179,8 @@ async def from_messages( @response_handler async def handle_user_input_response( self, - original_request: FunctionApprovalRequestContent, - response: FunctionApprovalResponseContent, + original_request: Content, + response: Content, ctx: WorkflowContext[AgentExecutorResponse, AgentResponse], ) -> None: """Handle user input responses for function approvals during agent execution. @@ -193,7 +193,7 @@ async def handle_user_input_response( ctx: The workflow context for emitting events and outputs. """ self._pending_responses_to_agent.append(response) - self._pending_agent_requests.pop(original_request.id, None) + self._pending_agent_requests.pop(original_request.id, None) # type: ignore[arg-type] if not self._pending_agent_requests: # All pending requests have been resolved; resume agent execution @@ -344,8 +344,8 @@ async def _run_agent(self, ctx: WorkflowContext) -> AgentResponse | None: # Handle any user input requests if response.user_input_requests: for user_input_request in response.user_input_requests: - self._pending_agent_requests[user_input_request.id] = user_input_request - await ctx.request_info(user_input_request, FunctionApprovalResponseContent) + self._pending_agent_requests[user_input_request.id] = user_input_request # type: ignore[index] + await ctx.request_info(user_input_request, Content) return None return response @@ -362,7 +362,7 @@ async def _run_agent_streaming(self, ctx: WorkflowContext) -> AgentResponse | No run_kwargs: dict[str, Any] = await ctx.get_shared_state(WORKFLOW_RUN_KWARGS_KEY) updates: list[AgentResponseUpdate] = [] - user_input_requests: list[FunctionApprovalRequestContent] = [] + user_input_requests: list[Content] = [] async for update in self._agent.run_stream( self._cache, thread=self._agent_thread, @@ -387,8 +387,8 @@ async def _run_agent_streaming(self, ctx: WorkflowContext) -> AgentResponse | No # Handle any user input requests after the streaming completes if user_input_requests: for user_input_request in user_input_requests: - self._pending_agent_requests[user_input_request.id] = user_input_request - await ctx.request_info(user_input_request, FunctionApprovalResponseContent) + self._pending_agent_requests[user_input_request.id] = user_input_request # type: ignore[index] + await ctx.request_info(user_input_request, Content) return None return response diff --git a/python/packages/core/agent_framework/_workflows/_checkpoint_encoding.py b/python/packages/core/agent_framework/_workflows/_checkpoint_encoding.py index 8c52c7a521..516a4547a0 100644 --- a/python/packages/core/agent_framework/_workflows/_checkpoint_encoding.py +++ b/python/packages/core/agent_framework/_workflows/_checkpoint_encoding.py @@ -146,6 +146,10 @@ def decode_checkpoint_value(value: Any) -> Any: cls = None if cls is not None: + # Verify the class actually supports the model protocol + if not _class_supports_model_protocol(cls): + logger.debug(f"Class {type_key} does not support model protocol; returning raw value") + return decoded_payload if strategy == "to_dict" and hasattr(cls, "from_dict"): with contextlib.suppress(Exception): return cls.from_dict(decoded_payload) @@ -169,6 +173,10 @@ def decode_checkpoint_value(value: Any) -> Any: if module is None: module = importlib.import_module(module_name) cls_dc: Any = getattr(module, class_name) + # Verify the class is actually a dataclass type (not an instance) + if not isinstance(cls_dc, type) or not is_dataclass(cls_dc): + logger.debug(f"Class {type_key_dc} is not a dataclass type; returning raw value") + return decoded_raw constructed = _instantiate_checkpoint_dataclass(cls_dc, decoded_raw) if constructed is not None: return constructed @@ -188,6 +196,22 @@ def decode_checkpoint_value(value: Any) -> Any: return value +def _class_supports_model_protocol(cls: type[Any]) -> bool: + """Check if a class type supports the model serialization protocol. + + Checks for pairs of serialization/deserialization methods: + - to_dict/from_dict + - to_json/from_json + """ + has_to_dict = hasattr(cls, "to_dict") and callable(getattr(cls, "to_dict", None)) + has_from_dict = hasattr(cls, "from_dict") and callable(getattr(cls, "from_dict", None)) + + has_to_json = hasattr(cls, "to_json") and callable(getattr(cls, "to_json", None)) + has_from_json = hasattr(cls, "from_json") and callable(getattr(cls, "from_json", None)) + + return (has_to_dict and has_from_dict) or (has_to_json and has_from_json) + + def _supports_model_protocol(obj: object) -> bool: """Detect objects that expose dictionary serialization hooks.""" try: @@ -195,13 +219,7 @@ def _supports_model_protocol(obj: object) -> bool: except Exception: return False - has_to_dict = hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict", None)) # type: ignore[arg-type] - has_from_dict = hasattr(obj_type, "from_dict") and callable(getattr(obj_type, "from_dict", None)) - - has_to_json = hasattr(obj, "to_json") and callable(getattr(obj, "to_json", None)) # type: ignore[arg-type] - has_from_json = hasattr(obj_type, "from_json") and callable(getattr(obj_type, "from_json", None)) - - return (has_to_dict and has_from_dict) or (has_to_json and has_from_json) + return _class_supports_model_protocol(obj_type) def _import_qualified_name(qualname: str) -> type[Any] | None: diff --git a/python/packages/core/agent_framework/_workflows/_handoff.py b/python/packages/core/agent_framework/_workflows/_handoff.py index 79e97dfca8..8d329b618d 100644 --- a/python/packages/core/agent_framework/_workflows/_handoff.py +++ b/python/packages/core/agent_framework/_workflows/_handoff.py @@ -710,9 +710,9 @@ def participants(self, participants: Sequence[AgentProtocol]) -> "HandoffBuilder from agent_framework.openai import OpenAIChatClient client = OpenAIChatClient() - triage = client.create_agent(instructions="...", name="triage_agent") - refund = client.create_agent(instructions="...", name="refund_agent") - billing = client.create_agent(instructions="...", name="billing_agent") + triage = client.as_agent(instructions="...", name="triage_agent") + refund = client.as_agent(instructions="...", name="refund_agent") + billing = client.as_agent(instructions="...", name="billing_agent") builder = HandoffBuilder().participants([triage, refund, billing]) builder.with_start_agent(triage) diff --git a/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py b/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py index edcffaa530..09f118a6c6 100644 --- a/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py +++ b/python/packages/core/agent_framework/_workflows/_orchestrator_helpers.py @@ -37,8 +37,6 @@ def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[Chat Returns: Cleaned conversation safe for handoff routing """ - from agent_framework import FunctionApprovalRequestContent, FunctionCallContent - cleaned: list[ChatMessage] = [] for msg in conversation: # Skip tool response messages entirely @@ -49,7 +47,7 @@ def clean_conversation_for_handoff(conversation: list[ChatMessage]) -> list[Chat has_tool_content = False if msg.contents: has_tool_content = any( - isinstance(content, (FunctionApprovalRequestContent, FunctionCallContent)) for content in msg.contents + content.type in ("function_approval_request", "function_call") for content in msg.contents ) # If no tool content, keep original diff --git a/python/packages/core/agent_framework/ag_ui/__init__.py b/python/packages/core/agent_framework/ag_ui/__init__.py index 941a586d30..b469bb8a60 100644 --- a/python/packages/core/agent_framework/ag_ui/__init__.py +++ b/python/packages/core/agent_framework/ag_ui/__init__.py @@ -12,11 +12,6 @@ "AGUIChatClient", "AGUIEventConverter", "AGUIHttpService", - "ConfirmationStrategy", - "DefaultConfirmationStrategy", - "TaskPlannerConfirmationStrategy", - "RecipeConfirmationStrategy", - "DocumentWriterConfirmationStrategy", ] diff --git a/python/packages/core/agent_framework/ag_ui/__init__.pyi b/python/packages/core/agent_framework/ag_ui/__init__.pyi index 201e1a0256..d7b6acafec 100644 --- a/python/packages/core/agent_framework/ag_ui/__init__.pyi +++ b/python/packages/core/agent_framework/ag_ui/__init__.pyi @@ -5,11 +5,6 @@ from agent_framework_ag_ui import ( AGUIChatClient, AGUIEventConverter, AGUIHttpService, - ConfirmationStrategy, - DefaultConfirmationStrategy, - DocumentWriterConfirmationStrategy, - RecipeConfirmationStrategy, - TaskPlannerConfirmationStrategy, __version__, add_agent_framework_fastapi_endpoint, ) @@ -19,11 +14,6 @@ __all__ = [ "AGUIEventConverter", "AGUIHttpService", "AgentFrameworkAgent", - "ConfirmationStrategy", - "DefaultConfirmationStrategy", - "DocumentWriterConfirmationStrategy", - "RecipeConfirmationStrategy", - "TaskPlannerConfirmationStrategy", "__version__", "add_agent_framework_fastapi_endpoint", ] diff --git a/python/packages/core/agent_framework/azure/__init__.py b/python/packages/core/agent_framework/azure/__init__.py index 3e0b4ad576..93d7dc1e0d 100644 --- a/python/packages/core/agent_framework/azure/__init__.py +++ b/python/packages/core/agent_framework/azure/__init__.py @@ -9,6 +9,7 @@ "AgentResponseCallbackProtocol": ("agent_framework_durabletask", "agent-framework-durabletask"), "AzureAIAgentClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAIAgentOptions": ("agent_framework_azure_ai", "agent-framework-azure-ai"), + "AzureAIProjectAgentOptions": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAIClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAIProjectAgentProvider": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAISearchContextProvider": ("agent_framework_azure_ai_search", "agent-framework-azure-ai-search"), diff --git a/python/packages/core/agent_framework/azure/__init__.pyi b/python/packages/core/agent_framework/azure/__init__.pyi index 6296483719..a819019039 100644 --- a/python/packages/core/agent_framework/azure/__init__.pyi +++ b/python/packages/core/agent_framework/azure/__init__.pyi @@ -4,6 +4,7 @@ from agent_framework_azure_ai import ( AzureAIAgentClient, AzureAIAgentsProvider, AzureAIClient, + AzureAIProjectAgentOptions, AzureAIProjectAgentProvider, AzureAISettings, ) @@ -31,6 +32,7 @@ __all__ = [ "AzureAIAgentClient", "AzureAIAgentsProvider", "AzureAIClient", + "AzureAIProjectAgentOptions", "AzureAIProjectAgentProvider", "AzureAISearchContextProvider", "AzureAISearchSettings", diff --git a/python/packages/core/agent_framework/azure/_chat_client.py b/python/packages/core/agent_framework/azure/_chat_client.py index 248e79ee47..b60054165f 100644 --- a/python/packages/core/agent_framework/azure/_chat_client.py +++ b/python/packages/core/agent_framework/azure/_chat_client.py @@ -13,10 +13,10 @@ from pydantic import ValidationError from agent_framework import ( + Annotation, ChatResponse, ChatResponseUpdate, - CitationAnnotation, - TextContent, + Content, use_chat_middleware, use_function_invocation, ) @@ -267,19 +267,22 @@ class MyOptions(AzureOpenAIChatOptions, total=False): ) @override - def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> TextContent | None: - """Parse the choice into a TextContent object. + def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> Content | None: + """Parse the choice into a Content object with type='text'. Overwritten from OpenAIBaseChatClient to deal with Azure On Your Data function. For docs see: https://learn.microsoft.com/en-us/azure/ai-foundry/openai/references/on-your-data?tabs=python#context """ message = choice.message if isinstance(choice, Choice) else choice.delta + # When you enable asynchronous content filtering in Azure OpenAI, you may receive empty deltas + if message is None: # type: ignore + return None if hasattr(message, "refusal") and message.refusal: - return TextContent(text=message.refusal, raw_representation=choice) + return Content.from_text(text=message.refusal, raw_representation=choice) if not message.content: return None - text_content = TextContent(text=message.content, raw_representation=choice) + text_content = Content.from_text(text=message.content, raw_representation=choice) if not message.model_extra or "context" not in message.model_extra: return text_content @@ -301,7 +304,8 @@ def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> TextContent | text_content.annotations = [] for citation in citations: text_content.annotations.append( - CitationAnnotation( + Annotation( + type="citation", title=citation.get("title", ""), url=citation.get("url", ""), snippet=citation.get("content", ""), diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index 70564c9354..51f2f09e60 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -40,7 +40,7 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - Contents, + Content, FinishReason, ) @@ -1096,7 +1096,12 @@ async def trace_get_response( ) with _get_span(attributes=attributes, span_name_attribute=SpanAttributes.LLM_REQUEST_MODEL) as span: if OBSERVABILITY_SETTINGS.SENSITIVE_DATA_ENABLED and messages: - _capture_messages(span=span, provider_name=provider_name, messages=messages) + _capture_messages( + span=span, + provider_name=provider_name, + messages=messages, + system_instructions=options.get("instructions"), + ) start_time_stamp = perf_counter() end_time_stamp: float | None = None try: @@ -1189,6 +1194,7 @@ async def trace_get_streaming_response( span=span, provider_name=provider_name, messages=messages, + system_instructions=options.get("instructions"), ) start_time_stamp = perf_counter() end_time_stamp: float | None = None @@ -1744,8 +1750,10 @@ def _to_otel_message(message: "ChatMessage") -> dict[str, Any]: return {"role": message.role.value, "parts": [_to_otel_part(content) for content in message.contents]} -def _to_otel_part(content: "Contents") -> dict[str, Any] | None: +def _to_otel_part(content: "Content") -> dict[str, Any] | None: """Create a otel representation of a Content.""" + from ._types import _get_data_bytes_as_str + match content.type: case "text": return {"type": "text", "content": content.text} @@ -1761,7 +1769,7 @@ def _to_otel_part(content: "Contents") -> dict[str, Any] | None: case "data": return { "type": "blob", - "content": content.get_data_bytes_as_str(), + "content": _get_data_bytes_as_str(content), "mime_type": content.media_type, "modality": content.media_type.split("/")[0] if content.media_type else None, } @@ -1802,10 +1810,10 @@ def _get_response_attributes( if model_id := getattr(response, "model_id", None): attributes[SpanAttributes.LLM_RESPONSE_MODEL] = model_id if capture_usage and (usage := response.usage_details): - if usage.input_token_count: - attributes[OtelAttr.INPUT_TOKENS] = usage.input_token_count - if usage.output_token_count: - attributes[OtelAttr.OUTPUT_TOKENS] = usage.output_token_count + if usage.get("input_token_count"): + attributes[OtelAttr.INPUT_TOKENS] = usage["input_token_count"] + if usage.get("output_token_count"): + attributes[OtelAttr.OUTPUT_TOKENS] = usage["output_token_count"] if duration: attributes[Meters.LLM_OPERATION_DURATION] = duration return attributes diff --git a/python/packages/core/agent_framework/openai/_assistants_client.py b/python/packages/core/agent_framework/openai/_assistants_client.py index d6dd7c251a..12ad1b5797 100644 --- a/python/packages/core/agent_framework/openai/_assistants_client.py +++ b/python/packages/core/agent_framework/openai/_assistants_client.py @@ -9,8 +9,12 @@ Mapping, MutableMapping, MutableSequence, + Sequence, ) -from typing import Any, Generic, Literal, TypedDict, cast +from typing import TYPE_CHECKING, Any, Generic, Literal, TypedDict, cast + +if TYPE_CHECKING: + from .._agents import ChatAgent from openai import AsyncOpenAI from openai.types.beta.threads import ( @@ -28,11 +32,14 @@ from pydantic import ValidationError from .._clients import BaseChatClient -from .._middleware import use_chat_middleware +from .._memory import ContextProvider +from .._middleware import Middleware, use_chat_middleware +from .._threads import ChatMessageStoreProtocol from .._tools import ( AIFunction, HostedCodeInterpreterTool, HostedFileSearchTool, + ToolProtocol, use_function_invocation, ) from .._types import ( @@ -40,15 +47,8 @@ ChatOptions, ChatResponse, ChatResponseUpdate, - CodeInterpreterToolCallContent, - Contents, - FunctionCallContent, - FunctionResultContent, - MCPServerToolCallContent, + Content, Role, - TextContent, - UriContent, - UsageContent, UsageDetails, prepare_function_call_results, ) @@ -409,7 +409,7 @@ async def _create_assistant_stream( thread_id: str | None, assistant_id: str, run_options: dict[str, Any], - tool_results: list[FunctionResultContent] | None, + tool_results: list[Content] | None, ) -> tuple[Any, str]: """Create the assistant stream for processing. @@ -519,7 +519,7 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter and response.data.usage is not None ): usage = response.data.usage - usage_content = UsageContent( + usage_content = Content.from_usage( UsageDetails( input_token_count=usage.prompt_tokens, output_token_count=usage.completion_tokens, @@ -544,9 +544,9 @@ async def _process_stream_events(self, stream: Any, thread_id: str) -> AsyncIter role=Role.ASSISTANT, ) - def _parse_function_calls_from_assistants(self, event_data: Run, response_id: str | None) -> list[Contents]: + def _parse_function_calls_from_assistants(self, event_data: Run, response_id: str | None) -> list[Content]: """Parse function call contents from an assistants tool action event.""" - contents: list[Contents] = [] + contents: list[Content] = [] if event_data.required_action is not None: for tool_call in event_data.required_action.submit_tool_outputs.tool_calls: @@ -556,10 +556,12 @@ def _parse_function_calls_from_assistants(self, event_data: Run, response_id: st if tool_type == "code_interpreter" and getattr(tool_call_any, "code_interpreter", None): code_input = getattr(tool_call_any.code_interpreter, "input", None) inputs = ( - [TextContent(text=code_input, raw_representation=tool_call)] if code_input is not None else None + [Content.from_text(text=code_input, raw_representation=tool_call)] + if code_input is not None + else None ) contents.append( - CodeInterpreterToolCallContent( + Content.from_code_interpreter_tool_call( call_id=call_id, inputs=inputs, raw_representation=tool_call, @@ -567,7 +569,7 @@ def _parse_function_calls_from_assistants(self, event_data: Run, response_id: st ) elif tool_type == "mcp": contents.append( - MCPServerToolCallContent( + Content.from_mcp_server_tool_call( call_id=call_id, tool_name=getattr(tool_call, "name", "") or "", server_name=getattr(tool_call, "server_label", None), @@ -579,7 +581,7 @@ def _parse_function_calls_from_assistants(self, event_data: Run, response_id: st function_name = tool_call.function.name function_arguments = json.loads(tool_call.function.arguments) contents.append( - FunctionCallContent( + Content.from_function_call( call_id=call_id, name=function_name, arguments=function_arguments, @@ -593,7 +595,7 @@ def _prepare_options( messages: MutableSequence[ChatMessage], options: dict[str, Any], **kwargs: Any, - ) -> tuple[dict[str, Any], list[FunctionResultContent] | None]: + ) -> tuple[dict[str, Any], list[Content] | None]: from .._types import validate_tool_mode run_options: dict[str, Any] = {**kwargs} @@ -665,7 +667,7 @@ def _prepare_options( } instructions: list[str] = [] - tool_results: list[FunctionResultContent] | None = None + tool_results: list[Content] | None = None additional_messages: list[AdditionalMessage] | None = None @@ -674,21 +676,23 @@ def _prepare_options( # All other messages are added 1:1. for chat_message in messages: if chat_message.role.value in ["system", "developer"]: - for text_content in [content for content in chat_message.contents if isinstance(content, TextContent)]: - instructions.append(text_content.text) + for text_content in [content for content in chat_message.contents if content.type == "text"]: + text = getattr(text_content, "text", None) + if text: + instructions.append(text) continue message_contents: list[MessageContentPartParam] = [] for content in chat_message.contents: - if isinstance(content, TextContent): - message_contents.append(TextContentBlockParam(type="text", text=content.text)) - elif isinstance(content, UriContent) and content.has_top_level_media_type("image"): + if content.type == "text": + message_contents.append(TextContentBlockParam(type="text", text=content.text)) # type: ignore[attr-defined, typeddict-item] + elif content.type == "uri" and content.has_top_level_media_type("image"): message_contents.append( - ImageURLContentBlockParam(type="image_url", image_url=ImageURLParam(url=content.uri)) + ImageURLContentBlockParam(type="image_url", image_url=ImageURLParam(url=content.uri)) # type: ignore[attr-defined, typeddict-item] ) - elif isinstance(content, FunctionResultContent): + elif content.type == "function_result": if tool_results is None: tool_results = [] tool_results.append(content) @@ -713,7 +717,7 @@ def _prepare_options( def _prepare_tool_outputs_for_assistants( self, - tool_results: list[FunctionResultContent] | None, + tool_results: list[Content] | None, ) -> tuple[str | None, list[ToolOutput] | None]: """Prepare function results for submission to the assistants API.""" run_id: str | None = None @@ -724,7 +728,7 @@ def _prepare_tool_outputs_for_assistants( # When creating the FunctionCallContent, we created it with a CallId == [runId, callId]. # We need to extract the run ID and ensure that the ToolOutput we send back to Azure # is only the call ID. - run_and_call_ids: list[str] = json.loads(function_result_content.call_id) + run_and_call_ids: list[str] = json.loads(function_result_content.call_id) # type: ignore[arg-type] if ( not run_and_call_ids @@ -761,3 +765,59 @@ def _update_agent_name_and_description(self, agent_name: str | None, description self.assistant_name = agent_name if description and not self.assistant_description: self.assistant_description = description + + @override + def as_agent( + self, + *, + id: str | None = None, + name: str | None = None, + description: str | None = None, + instructions: str | None = None, + tools: ToolProtocol + | Callable[..., Any] + | MutableMapping[str, Any] + | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] + | None = None, + default_options: TOpenAIAssistantsOptions | None = None, + chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, + context_provider: ContextProvider | None = None, + middleware: Sequence[Middleware] | None = None, + **kwargs: Any, + ) -> "ChatAgent[TOpenAIAssistantsOptions]": + """Convert this chat client to a ChatAgent. + + This method creates a ChatAgent instance with this client pre-configured. + It does NOT create an assistant on the OpenAI service - the actual assistant + will be created on the server during the first invocation (run). + + For creating and managing persistent assistants on the server, use + :class:`~agent_framework.openai.OpenAIAssistantProvider` instead. + + Keyword Args: + id: The unique identifier for the agent. Will be created automatically if not provided. + name: The name of the agent. + description: A brief description of the agent's purpose. + instructions: Optional instructions for the agent. + tools: The tools to use for the request. + default_options: A TypedDict containing chat options. + chat_message_store_factory: Factory function to create an instance of ChatMessageStoreProtocol. + context_provider: Context providers to include during agent invocation. + middleware: List of middleware to intercept agent and function invocations. + kwargs: Any additional keyword arguments. + + Returns: + A ChatAgent instance configured with this chat client. + """ + return super().as_agent( + id=id, + name=name, + description=description, + instructions=instructions, + tools=tools, + default_options=default_options, + chat_message_store_factory=chat_message_store_factory, + context_provider=context_provider, + middleware=middleware, + **kwargs, + ) diff --git a/python/packages/core/agent_framework/openai/_chat_client.py b/python/packages/core/agent_framework/openai/_chat_client.py index 2d1ef8b463..2b4023e85a 100644 --- a/python/packages/core/agent_framework/openai/_chat_client.py +++ b/python/packages/core/agent_framework/openai/_chat_client.py @@ -25,18 +25,9 @@ ChatOptions, ChatResponse, ChatResponseUpdate, - Contents, - DataContent, + Content, FinishReason, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, Role, - TextContent, - TextReasoningContent, - UriContent, - UsageContent, UsageDetails, prepare_function_call_results, ) @@ -294,13 +285,13 @@ def _parse_response_from_openai(self, response: ChatCompletion, options: dict[st response_metadata.update(self._get_metadata_from_chat_choice(choice)) if choice.finish_reason: finish_reason = FinishReason(value=choice.finish_reason) - contents: list[Contents] = [] + contents: list[Content] = [] if text_content := self._parse_text_from_openai(choice): contents.append(text_content) if parsed_tool_calls := [tool for tool in self._parse_tool_calls_from_openai(choice)]: contents.extend(parsed_tool_calls) if reasoning_details := getattr(choice.message, "reasoning_details", None): - contents.append(TextReasoningContent(None, protected_data=json.dumps(reasoning_details))) + contents.append(Content.from_text_reasoning(protected_data=json.dumps(reasoning_details))) messages.append(ChatMessage(role="assistant", contents=contents)) return ChatResponse( response_id=response.id, @@ -322,13 +313,17 @@ def _parse_response_update_from_openai( if chunk.usage: return ChatResponseUpdate( role=Role.ASSISTANT, - contents=[UsageContent(details=self._parse_usage_from_openai(chunk.usage), raw_representation=chunk)], + contents=[ + Content.from_usage( + usage_details=self._parse_usage_from_openai(chunk.usage), raw_representation=chunk + ) + ], model_id=chunk.model, additional_properties=chunk_metadata, response_id=chunk.id, message_id=chunk.id, ) - contents: list[Contents] = [] + contents: list[Content] = [] finish_reason: FinishReason | None = None for choice in chunk.choices: chunk_metadata.update(self._get_metadata_from_chat_choice(choice)) @@ -339,7 +334,7 @@ def _parse_response_update_from_openai( if text_content := self._parse_text_from_openai(choice): contents.append(text_content) if reasoning_details := getattr(choice.delta, "reasoning_details", None): - contents.append(TextReasoningContent(None, protected_data=json.dumps(reasoning_details))) + contents.append(Content.from_text_reasoning(protected_data=json.dumps(reasoning_details))) return ChatResponseUpdate( created_at=datetime.fromtimestamp(chunk.created, tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), contents=contents, @@ -360,27 +355,27 @@ def _parse_usage_from_openai(self, usage: CompletionUsage) -> UsageDetails: ) if usage.completion_tokens_details: if tokens := usage.completion_tokens_details.accepted_prediction_tokens: - details["completion/accepted_prediction_tokens"] = tokens + details["completion/accepted_prediction_tokens"] = tokens # type: ignore[typeddict-unknown-key] if tokens := usage.completion_tokens_details.audio_tokens: - details["completion/audio_tokens"] = tokens + details["completion/audio_tokens"] = tokens # type: ignore[typeddict-unknown-key] if tokens := usage.completion_tokens_details.reasoning_tokens: - details["completion/reasoning_tokens"] = tokens + details["completion/reasoning_tokens"] = tokens # type: ignore[typeddict-unknown-key] if tokens := usage.completion_tokens_details.rejected_prediction_tokens: - details["completion/rejected_prediction_tokens"] = tokens + details["completion/rejected_prediction_tokens"] = tokens # type: ignore[typeddict-unknown-key] if usage.prompt_tokens_details: if tokens := usage.prompt_tokens_details.audio_tokens: - details["prompt/audio_tokens"] = tokens + details["prompt/audio_tokens"] = tokens # type: ignore[typeddict-unknown-key] if tokens := usage.prompt_tokens_details.cached_tokens: - details["prompt/cached_tokens"] = tokens + details["prompt/cached_tokens"] = tokens # type: ignore[typeddict-unknown-key] return details - def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> TextContent | None: - """Parse the choice into a TextContent object.""" + def _parse_text_from_openai(self, choice: Choice | ChunkChoice) -> Content | None: + """Parse the choice into a Content object with type='text'.""" message = choice.message if isinstance(choice, Choice) else choice.delta if message.content: - return TextContent(text=message.content, raw_representation=choice) + return Content.from_text(text=message.content, raw_representation=choice) if hasattr(message, "refusal") and message.refusal: - return TextContent(text=message.refusal, raw_representation=choice) + return Content.from_text(text=message.refusal, raw_representation=choice) return None def _get_metadata_from_chat_response(self, response: ChatCompletion) -> dict[str, Any]: @@ -401,15 +396,15 @@ def _get_metadata_from_chat_choice(self, choice: Choice | ChunkChoice) -> dict[s "logprobs": getattr(choice, "logprobs", None), } - def _parse_tool_calls_from_openai(self, choice: Choice | ChunkChoice) -> list[Contents]: + def _parse_tool_calls_from_openai(self, choice: Choice | ChunkChoice) -> list[Content]: """Parse tool calls from an OpenAI response choice.""" - resp: list[Contents] = [] + resp: list[Content] = [] content = choice.message if isinstance(choice, Choice) else choice.delta if content and content.tool_calls: for tool in content.tool_calls: if not isinstance(tool, ChatCompletionMessageCustomToolCall) and tool.function: # ignoring tool.custom - fcc = FunctionCallContent( + fcc = Content.from_function_call( call_id=tool.id if tool.id else "", name=tool.function.name if tool.function.name else "", arguments=tool.function.arguments if tool.function.arguments else "", @@ -455,7 +450,7 @@ def _prepare_message_for_openai(self, message: ChatMessage) -> list[dict[str, An all_messages: list[dict[str, Any]] = [] for content in message.contents: # Skip approval content - it's internal framework state, not for the LLM - if isinstance(content, (FunctionApprovalRequestContent, FunctionApprovalResponseContent)): + if content.type in ("function_approval_request", "function_approval_response"): continue args: dict[str, Any] = { @@ -467,21 +462,21 @@ def _prepare_message_for_openai(self, message: ChatMessage) -> list[dict[str, An details := message.additional_properties["reasoning_details"] ): args["reasoning_details"] = details - match content: - case FunctionCallContent(): + match content.type: + case "function_call": if all_messages and "tool_calls" in all_messages[-1]: # If the last message already has tool calls, append to it all_messages[-1]["tool_calls"].append(self._prepare_content_for_openai(content)) else: args["tool_calls"] = [self._prepare_content_for_openai(content)] # type: ignore - case FunctionResultContent(): + case "function_result": args["tool_call_id"] = content.call_id # Always include content for tool results - API requires it even if empty # Functions returning None should still have a tool result message args["content"] = ( prepare_function_call_results(content.result) if content.result is not None else "" ) - case TextReasoningContent(protected_data=protected_data) if protected_data is not None: + case "text_reasoning" if (protected_data := content.protected_data) is not None: all_messages[-1]["reasoning_details"] = json.loads(protected_data) case _: if "content" not in args: @@ -492,27 +487,27 @@ def _prepare_message_for_openai(self, message: ChatMessage) -> list[dict[str, An all_messages.append(args) return all_messages - def _prepare_content_for_openai(self, content: Contents) -> dict[str, Any]: + def _prepare_content_for_openai(self, content: Content) -> dict[str, Any]: """Prepare content for OpenAI.""" - match content: - case FunctionCallContent(): + match content.type: + case "function_call": args = json.dumps(content.arguments) if isinstance(content.arguments, Mapping) else content.arguments return { "id": content.call_id, "type": "function", "function": {"name": content.name, "arguments": args}, } - case FunctionResultContent(): + case "function_result": return { "tool_call_id": content.call_id, "content": content.result, } - case DataContent() | UriContent() if content.has_top_level_media_type("image"): + case "data" | "uri" if content.has_top_level_media_type("image"): return { "type": "image_url", "image_url": {"url": content.uri}, } - case DataContent() | UriContent() if content.has_top_level_media_type("audio"): + case "data" | "uri" if content.has_top_level_media_type("audio"): if content.media_type and "wav" in content.media_type: audio_format = "wav" elif content.media_type and "mp3" in content.media_type: @@ -523,9 +518,9 @@ def _prepare_content_for_openai(self, content: Contents) -> dict[str, Any]: # Extract base64 data from data URI audio_data = content.uri - if audio_data.startswith("data:"): + if audio_data.startswith("data:"): # type: ignore[union-attr] # Extract just the base64 part after "data:audio/format;base64," - audio_data = audio_data.split(",", 1)[-1] + audio_data = audio_data.split(",", 1)[-1] # type: ignore[union-attr] return { "type": "input_audio", @@ -534,9 +529,7 @@ def _prepare_content_for_openai(self, content: Contents) -> dict[str, Any]: "format": audio_format, }, } - case DataContent() | UriContent() if content.has_top_level_media_type( - "application" - ) and content.uri.startswith("data:"): + case "data" | "uri" if content.has_top_level_media_type("application") and content.uri.startswith("data:"): # type: ignore[union-attr] # All application/* media types should be treated as files for OpenAI filename = getattr(content, "filename", None) or ( content.additional_properties.get("filename") diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 37a35ae9bc..3d023110cf 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -48,33 +48,16 @@ use_function_invocation, ) from .._types import ( + Annotation, ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, - CitationAnnotation, - CodeInterpreterToolCallContent, - CodeInterpreterToolResultContent, - Contents, - DataContent, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, - HostedFileContent, - HostedVectorStoreContent, - ImageGenerationToolCallContent, - ImageGenerationToolResultContent, - MCPServerToolCallContent, - MCPServerToolResultContent, + Content, Role, - TextContent, - TextReasoningContent, TextSpanRegion, - UriContent, - UsageContent, UsageDetails, - _parse_content, + detect_media_type_from_base64, prepare_function_call_results, prepend_instructions_to_messages, validate_tool_mode, @@ -231,7 +214,6 @@ async def _inner_get_response( response = await client.responses.parse(stream=False, **run_options) else: response = await client.responses.create(stream=False, **run_options) - return self._parse_response_from_openai(response, options=options) except BadRequestError as ex: if ex.code == "content_filter": raise OpenAIContentFilterException( @@ -247,6 +229,7 @@ async def _inner_get_response( f"{type(self)} service failed to complete the prompt: {ex}", inner_exception=ex, ) from ex + return self._parse_response_from_openai(response, options=options) @override async def _inner_get_streaming_response( @@ -391,7 +374,7 @@ def _prepare_tools_for_openai( if tool.inputs: tool_args["file_ids"] = [] for tool_input in tool.inputs: - if isinstance(tool_input, HostedFileContent): + if tool_input.type == "hosted_file": tool_args["file_ids"].append(tool_input.file_id) # type: ignore[attr-defined] if not tool_args["file_ids"]: tool_args.pop("file_ids") @@ -417,7 +400,9 @@ def _prepare_tools_for_openai( if not tool.inputs: raise ValueError("HostedFileSearchTool requires inputs to be specified.") inputs: list[str] = [ - inp.vector_store_id for inp in tool.inputs if isinstance(inp, HostedVectorStoreContent) + inp.vector_store_id # type: ignore[misc] + for inp in tool.inputs + if inp.type == "hosted_vector_store" # type: ignore[attr-defined] ] if not inputs: raise ValueError( @@ -605,8 +590,12 @@ def _check_model_presence(self, options: dict[str, Any]) -> None: options["model"] = self.model_id def _get_current_conversation_id(self, options: dict[str, Any], **kwargs: Any) -> str | None: - """Get the current conversation ID from options dict or kwargs.""" - return options.get("conversation_id") or kwargs.get("conversation_id") + """Get the current conversation ID, preferring kwargs over options. + + This ensures runtime-updated conversation IDs (for example, from tool execution + loops) take precedence over the initial configuration provided in options. + """ + return kwargs.get("conversation_id") or options.get("conversation_id") def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> list[dict[str, Any]]: """Prepare the chat messages for a request. @@ -629,11 +618,11 @@ def _prepare_messages_for_openai(self, chat_messages: Sequence[ChatMessage]) -> for message in chat_messages: for content in message.contents: if ( - isinstance(content, FunctionCallContent) + content.type == "function_call" and content.additional_properties and "fc_id" in content.additional_properties ): - call_id_to_id[content.call_id] = content.additional_properties["fc_id"] + call_id_to_id[content.call_id] = content.additional_properties["fc_id"] # type: ignore[attr-defined, index] list_of_list = [self._prepare_message_for_openai(message, call_id_to_id) for message in chat_messages] # Flatten the list of lists into a single list return list(chain.from_iterable(list_of_list)) @@ -649,18 +638,18 @@ def _prepare_message_for_openai( "role": message.role.value if isinstance(message.role, Role) else message.role, } for content in message.contents: - match content: - case TextReasoningContent(): + match content.type: + case "text_reasoning": # Don't send reasoning content back to model continue - case FunctionResultContent(): + case "function_result": new_args: dict[str, Any] = {} new_args.update(self._prepare_content_for_openai(message.role, content, call_id_to_id)) all_messages.append(new_args) - case FunctionCallContent(): + case "function_call": function_call = self._prepare_content_for_openai(message.role, content, call_id_to_id) all_messages.append(function_call) # type: ignore - case FunctionApprovalResponseContent() | FunctionApprovalRequestContent(): + case "function_approval_response" | "function_approval_request": all_messages.append(self._prepare_content_for_openai(message.role, content, call_id_to_id)) # type: ignore case _: if "content" not in args: @@ -673,17 +662,17 @@ def _prepare_message_for_openai( def _prepare_content_for_openai( self, role: Role, - content: Contents, + content: Content, call_id_to_id: dict[str, str], ) -> dict[str, Any]: """Prepare content for the OpenAI Responses API format.""" - match content: - case TextContent(): + match content.type: + case "text": return { "type": "output_text" if role == Role.ASSISTANT else "input_text", "text": content.text, } - case TextReasoningContent(): + case "text_reasoning": ret: dict[str, Any] = { "type": "reasoning", "summary": { @@ -703,7 +692,7 @@ def _prepare_content_for_openai( if encrypted_content := props.get("encrypted_content"): ret["encrypted_content"] = encrypted_content return ret - case DataContent() | UriContent(): + case "data" | "uri": if content.has_top_level_media_type("image"): return { "type": "input_image", @@ -744,7 +733,7 @@ def _prepare_content_for_openai( file_obj["filename"] = filename return file_obj return {} - case FunctionCallContent(): + case "function_call": if not content.call_id: logger.warning(f"FunctionCallContent missing call_id for function '{content.name}'") return {} @@ -761,7 +750,7 @@ def _prepare_content_for_openai( "arguments": content.arguments, "status": None, } - case FunctionResultContent(): + case "function_result": # call_id for the result needs to be the same as the call_id for the function call args: dict[str, Any] = { "call_id": content.call_id, @@ -769,29 +758,29 @@ def _prepare_content_for_openai( "output": prepare_function_call_results(content.result), } return args - case FunctionApprovalRequestContent(): + case "function_approval_request": return { "type": "mcp_approval_request", - "id": content.id, - "arguments": content.function_call.arguments, - "name": content.function_call.name, - "server_label": content.function_call.additional_properties.get("server_label") - if content.function_call.additional_properties + "id": content.id, # type: ignore[union-attr] + "arguments": content.function_call.arguments, # type: ignore[union-attr] + "name": content.function_call.name, # type: ignore[union-attr] + "server_label": content.function_call.additional_properties.get("server_label") # type: ignore[union-attr] + if content.function_call.additional_properties # type: ignore[union-attr] else None, } - case FunctionApprovalResponseContent(): + case "function_approval_response": return { "type": "mcp_approval_response", "approval_request_id": content.id, "approve": content.approved, } - case HostedFileContent(): + case "hosted_file": return { "type": "input_file", "file_id": content.file_id, } case _: # should catch UsageDetails and ErrorContent and HostedVectorStoreContent - logger.debug("Unsupported content type passed (type: %s)", type(content)) + logger.debug("Unsupported content type passed (type: %s)", content.type) return {} # region Parse methods @@ -804,7 +793,7 @@ def _parse_response_from_openai( structured_response: BaseModel | None = response.output_parsed if isinstance(response, ParsedResponse) else None # type: ignore[reportUnknownMemberType] metadata: dict[str, Any] = response.metadata or {} - contents: list[Contents] = [] + contents: list[Content] = [] for item in response.output: # type: ignore[reportUnknownMemberType] match item.type: # types: @@ -829,7 +818,7 @@ def _parse_response_from_openai( for message_content in item.content: # type: ignore[reportMissingTypeArgument] match message_content.type: case "output_text": - text_content = TextContent( + text_content = Content.from_text( text=message_content.text, raw_representation=message_content, # type: ignore[reportUnknownArgumentType] ) @@ -840,7 +829,8 @@ def _parse_response_from_openai( match annotation.type: case "file_path": text_content.annotations.append( - CitationAnnotation( + Annotation( + type="citation", file_id=annotation.file_id, additional_properties={ "index": annotation.index, @@ -850,7 +840,8 @@ def _parse_response_from_openai( ) case "file_citation": text_content.annotations.append( - CitationAnnotation( + Annotation( + type="citation", url=annotation.filename, file_id=annotation.file_id, raw_representation=annotation, @@ -861,11 +852,13 @@ def _parse_response_from_openai( ) case "url_citation": text_content.annotations.append( - CitationAnnotation( + Annotation( + type="citation", title=annotation.title, url=annotation.url, annotated_regions=[ TextSpanRegion( + type="text_span", start_index=annotation.start_index, end_index=annotation.end_index, ) @@ -875,7 +868,8 @@ def _parse_response_from_openai( ) case "container_file_citation": text_content.annotations.append( - CitationAnnotation( + Annotation( + type="citation", file_id=annotation.file_id, url=annotation.filename, additional_properties={ @@ -883,6 +877,7 @@ def _parse_response_from_openai( }, annotated_regions=[ TextSpanRegion( + type="text_span", start_index=annotation.start_index, end_index=annotation.end_index, ) @@ -898,7 +893,7 @@ def _parse_response_from_openai( contents.append(text_content) case "refusal": contents.append( - TextContent( + Content.from_text( text=message_content.refusal, raw_representation=message_content, ) @@ -910,7 +905,7 @@ def _parse_response_from_openai( if hasattr(item, "summary") and item.summary and index < len(item.summary): additional_properties = {"summary": item.summary[index]} contents.append( - TextReasoningContent( + Content.from_text_reasoning( text=reasoning_content.text, raw_representation=reasoning_content, additional_properties=additional_properties, @@ -919,23 +914,23 @@ def _parse_response_from_openai( if hasattr(item, "summary") and item.summary: for summary in item.summary: contents.append( - TextReasoningContent(text=summary.text, raw_representation=summary) # type: ignore[arg-type] + Content.from_text_reasoning(text=summary.text, raw_representation=summary) # type: ignore[arg-type] ) case "code_interpreter_call": # ResponseOutputCodeInterpreterCall call_id = getattr(item, "call_id", None) or getattr(item, "id", None) - outputs: list["Contents"] = [] + outputs: list["Content"] = [] if item_outputs := getattr(item, "outputs", None): for code_output in item_outputs: if getattr(code_output, "type", None) == "logs": outputs.append( - TextContent( + Content.from_text( text=code_output.logs, raw_representation=code_output, ) ) elif getattr(code_output, "type", None) == "image": outputs.append( - UriContent( + Content.from_uri( uri=code_output.url, raw_representation=code_output, media_type="image", @@ -943,14 +938,14 @@ def _parse_response_from_openai( ) if code := getattr(item, "code", None): contents.append( - CodeInterpreterToolCallContent( + Content.from_code_interpreter_tool_call( call_id=call_id, - inputs=[TextContent(text=code, raw_representation=item)], + inputs=[Content.from_text(text=code, raw_representation=item)], raw_representation=item, ) ) contents.append( - CodeInterpreterToolResultContent( + Content.from_code_interpreter_tool_result( call_id=call_id, outputs=outputs, raw_representation=item, @@ -958,7 +953,7 @@ def _parse_response_from_openai( ) case "function_call": # ResponseOutputFunctionCall contents.append( - FunctionCallContent( + Content.from_function_call( call_id=item.call_id if hasattr(item, "call_id") and item.call_id else "", name=item.name if hasattr(item, "name") else "", arguments=item.arguments if hasattr(item, "arguments") else "", @@ -968,9 +963,9 @@ def _parse_response_from_openai( ) case "mcp_approval_request": # ResponseOutputMcpApprovalRequest contents.append( - FunctionApprovalRequestContent( + Content.from_function_approval_request( id=item.id, - function_call=FunctionCallContent( + function_call=Content.from_function_call( call_id=item.id, name=item.name, arguments=item.arguments, @@ -982,7 +977,7 @@ def _parse_response_from_openai( case "mcp_call": call_id = item.id contents.append( - MCPServerToolCallContent( + Content.from_mcp_server_tool_call( call_id=call_id, tool_name=item.name, server_name=item.server_label, @@ -992,31 +987,31 @@ def _parse_response_from_openai( ) if item.output is not None: contents.append( - MCPServerToolResultContent( + Content.from_mcp_server_tool_result( call_id=call_id, - output=[TextContent(text=item.output)], + output=[Content.from_text(text=item.output)], raw_representation=item, ) ) case "image_generation_call": # ResponseOutputImageGenerationCall - image_output: DataContent | None = None - if item.result: - base64_data = item.result - image_format = DataContent.detect_image_format_from_base64(base64_data) - image_output = DataContent( - data=base64_data, - media_type=f"image/{image_format}" if image_format else "image/png", + image_output: Content | None = None + if item.result is not None: + # item.result contains raw base64 string + # so we call detect_media_type_from_base64 to get the media type and fallback to image/png + image_output = Content.from_uri( + uri=f"data:{detect_media_type_from_base64(data_str=item.result) or 'image/png'}" + f";base64,{item.result}", raw_representation=item.result, ) image_id = item.id contents.append( - ImageGenerationToolCallContent( + Content.from_image_generation_tool_call( image_id=image_id, raw_representation=item, ) ) contents.append( - ImageGenerationToolResultContent( + Content.from_image_generation_tool_result( image_id=image_id, outputs=image_output, raw_representation=item, @@ -1056,11 +1051,10 @@ def _parse_chunk_from_openai( ) -> ChatResponseUpdate: """Parse an OpenAI Responses API streaming event into a ChatResponseUpdate.""" metadata: dict[str, Any] = {} - contents: list[Contents] = [] + contents: list[Content] = [] conversation_id: str | None = None response_id: str | None = None model = self.model_id - # TODO(peterychang): Add support for other content types match event.type: # types: # ResponseAudioDeltaEvent, @@ -1120,26 +1114,26 @@ def _parse_chunk_from_openai( event_part = event.part match event_part.type: case "output_text": - contents.append(TextContent(text=event_part.text, raw_representation=event)) + contents.append(Content.from_text(text=event_part.text, raw_representation=event)) metadata.update(self._get_metadata_from_response(event_part)) case "refusal": - contents.append(TextContent(text=event_part.refusal, raw_representation=event)) + contents.append(Content.from_text(text=event_part.refusal, raw_representation=event)) case _: pass case "response.output_text.delta": - contents.append(TextContent(text=event.delta, raw_representation=event)) + contents.append(Content.from_text(text=event.delta, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_text.delta": - contents.append(TextReasoningContent(text=event.delta, raw_representation=event)) + contents.append(Content.from_text_reasoning(text=event.delta, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_text.done": - contents.append(TextReasoningContent(text=event.text, raw_representation=event)) + contents.append(Content.from_text_reasoning(text=event.text, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_summary_text.delta": - contents.append(TextReasoningContent(text=event.delta, raw_representation=event)) + contents.append(Content.from_text_reasoning(text=event.delta, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.reasoning_summary_text.done": - contents.append(TextReasoningContent(text=event.text, raw_representation=event)) + contents.append(Content.from_text_reasoning(text=event.text, raw_representation=event)) metadata.update(self._get_metadata_from_response(event)) case "response.created": response_id = event.response.id @@ -1154,7 +1148,7 @@ def _parse_chunk_from_openai( if event.response.usage: usage = self._parse_usage_from_openai(event.response.usage) if usage: - contents.append(UsageContent(details=usage, raw_representation=event)) + contents.append(Content.from_usage(usage_details=usage, raw_representation=event)) case "response.output_item.added": event_item = event.item match event_item.type: @@ -1179,9 +1173,9 @@ def _parse_chunk_from_openai( ) case "mcp_approval_request": contents.append( - FunctionApprovalRequestContent( + Content.from_function_approval_request( id=event_item.id, - function_call=FunctionCallContent( + function_call=Content.from_function_call( call_id=event_item.id, name=event_item.name, arguments=event_item.arguments, @@ -1193,7 +1187,7 @@ def _parse_chunk_from_openai( case "mcp_call": call_id = getattr(event_item, "id", None) or getattr(event_item, "call_id", None) or "" contents.append( - MCPServerToolCallContent( + Content.from_mcp_server_tool_call( call_id=call_id, tool_name=getattr(event_item, "name", "") or "", server_name=getattr(event_item, "server_label", None), @@ -1206,7 +1200,7 @@ def _parse_chunk_from_openai( or getattr(event_item, "output", None) or getattr(event_item, "outputs", None) ) - parsed_output: list[Contents] | None = None + parsed_output: list[Content] | None = None if result_output: normalized = ( result_output @@ -1214,9 +1208,9 @@ def _parse_chunk_from_openai( and not isinstance(result_output, (str, bytes, MutableMapping)) else [result_output] ) - parsed_output = [_parse_content(output_item) for output_item in normalized] + parsed_output = [Content.from_dict(output_item) for output_item in normalized] contents.append( - MCPServerToolResultContent( + Content.from_mcp_server_tool_result( call_id=call_id, output=parsed_output, raw_representation=event_item, @@ -1224,19 +1218,19 @@ def _parse_chunk_from_openai( ) case "code_interpreter_call": # ResponseOutputCodeInterpreterCall call_id = getattr(event_item, "call_id", None) or getattr(event_item, "id", None) - outputs: list[Contents] = [] + outputs: list[Content] = [] if hasattr(event_item, "outputs") and event_item.outputs: for code_output in event_item.outputs: if getattr(code_output, "type", None) == "logs": outputs.append( - TextContent( + Content.from_text( text=cast(Any, code_output).logs, raw_representation=code_output, ) ) elif getattr(code_output, "type", None) == "image": outputs.append( - UriContent( + Content.from_uri( uri=cast(Any, code_output).url, raw_representation=code_output, media_type="image", @@ -1244,10 +1238,10 @@ def _parse_chunk_from_openai( ) if hasattr(event_item, "code") and event_item.code: contents.append( - CodeInterpreterToolCallContent( + Content.from_code_interpreter_tool_call( call_id=call_id, inputs=[ - TextContent( + Content.from_text( text=event_item.code, raw_representation=event_item, ) @@ -1256,7 +1250,7 @@ def _parse_chunk_from_openai( ) ) contents.append( - CodeInterpreterToolResultContent( + Content.from_code_interpreter_tool_result( call_id=call_id, outputs=outputs, raw_representation=event_item, @@ -1273,7 +1267,7 @@ def _parse_chunk_from_openai( ): additional_properties = {"summary": event_item.summary[index]} contents.append( - TextReasoningContent( + Content.from_text_reasoning( text=reasoning_content.text, raw_representation=reasoning_content, additional_properties=additional_properties, @@ -1285,7 +1279,7 @@ def _parse_chunk_from_openai( call_id, name = function_call_ids.get(event.output_index, (None, None)) if call_id and name: contents.append( - FunctionCallContent( + Content.from_function_call( call_id=call_id, name=name, arguments=event.delta, @@ -1300,13 +1294,9 @@ def _parse_chunk_from_openai( # Handle streaming partial image generation image_base64 = event.partial_image_b64 partial_index = event.partial_image_index - - # Use helper function to create data URI from base64 - uri, media_type = DataContent.create_data_uri_from_base64(image_base64) - - image_output = DataContent( - uri=uri, - media_type=media_type, + image_output = Content.from_uri( + uri=f"data:{detect_media_type_from_base64(data_str=image_base64) or 'image/png'}" + f";base64,{image_base64}", additional_properties={ "partial_image_index": partial_index, "is_partial_image": True, @@ -1316,13 +1306,13 @@ def _parse_chunk_from_openai( image_id = getattr(event, "item_id", None) contents.append( - ImageGenerationToolCallContent( + Content.from_image_generation_tool_call( image_id=image_id, raw_representation=event, ) ) contents.append( - ImageGenerationToolResultContent( + Content.from_image_generation_tool_result( image_id=image_id, outputs=image_output, raw_representation=event, @@ -1343,7 +1333,7 @@ def _get_ann_value(key: str) -> Any: if ann_type == "file_path": if ann_file_id: contents.append( - HostedFileContent( + Content.from_hosted_file( file_id=str(ann_file_id), additional_properties={ "annotation_index": event.annotation_index, @@ -1355,7 +1345,7 @@ def _get_ann_value(key: str) -> Any: elif ann_type == "file_citation": if ann_file_id: contents.append( - HostedFileContent( + Content.from_hosted_file( file_id=str(ann_file_id), additional_properties={ "annotation_index": event.annotation_index, @@ -1368,7 +1358,7 @@ def _get_ann_value(key: str) -> Any: elif ann_type == "container_file_citation": if ann_file_id: contents.append( - HostedFileContent( + Content.from_hosted_file( file_id=str(ann_file_id), additional_properties={ "annotation_index": event.annotation_index, @@ -1402,9 +1392,9 @@ def _parse_usage_from_openai(self, usage: ResponseUsage) -> UsageDetails | None: total_token_count=usage.total_tokens, ) if usage.input_tokens_details and usage.input_tokens_details.cached_tokens: - details["openai.cached_input_tokens"] = usage.input_tokens_details.cached_tokens + details["openai.cached_input_tokens"] = usage.input_tokens_details.cached_tokens # type: ignore[typeddict-unknown-key] if usage.output_tokens_details and usage.output_tokens_details.reasoning_tokens: - details["openai.reasoning_tokens"] = usage.output_tokens_details.reasoning_tokens + details["openai.reasoning_tokens"] = usage.output_tokens_details.reasoning_tokens # type: ignore[typeddict-unknown-key] return details def _get_metadata_from_response(self, output: Any) -> dict[str, Any]: diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index c47f8eb8e6..830118e3d8 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/core/tests/azure/test_azure_assistants_client.py b/python/packages/core/tests/azure/test_azure_assistants_client.py index 6c65dac7c1..776951a9ea 100644 --- a/python/packages/core/tests/azure/test_azure_assistants_client.py +++ b/python/packages/core/tests/azure/test_azure_assistants_client.py @@ -18,7 +18,6 @@ ChatResponse, ChatResponseUpdate, HostedCodeInterpreterTool, - TextContent, ) from agent_framework.azure import AzureOpenAIAssistantsClient from agent_framework.exceptions import ServiceInitializationError @@ -332,7 +331,7 @@ async def test_azure_assistants_client_streaming() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert any(word in full_message.lower() for word in ["sunny", "25", "weather", "seattle"]) @@ -358,7 +357,7 @@ async def test_azure_assistants_client_streaming_tools() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert any(word in full_message.lower() for word in ["sunny", "25", "weather"]) diff --git a/python/packages/core/tests/azure/test_azure_chat_client.py b/python/packages/core/tests/azure/test_azure_chat_client.py index 483b13f14f..84d7d897ff 100644 --- a/python/packages/core/tests/azure/test_azure_chat_client.py +++ b/python/packages/core/tests/azure/test_azure_chat_client.py @@ -25,7 +25,6 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - TextContent, ai_function, ) from agent_framework._telemetry import USER_AGENT_KEY @@ -304,9 +303,9 @@ async def test_azure_on_your_data( ) assert len(content.messages) == 1 assert len(content.messages[0].contents) == 1 - assert isinstance(content.messages[0].contents[0], TextContent) + assert content.messages[0].contents[0].type == "text" assert len(content.messages[0].contents[0].annotations) == 1 - assert content.messages[0].contents[0].annotations[0].title == "test title" + assert content.messages[0].contents[0].annotations[0]["title"] == "test title" assert content.messages[0].contents[0].text == "test" mock_create.assert_awaited_once_with( @@ -374,9 +373,9 @@ async def test_azure_on_your_data_string( ) assert len(content.messages) == 1 assert len(content.messages[0].contents) == 1 - assert isinstance(content.messages[0].contents[0], TextContent) + assert content.messages[0].contents[0].type == "text" assert len(content.messages[0].contents[0].annotations) == 1 - assert content.messages[0].contents[0].annotations[0].title == "test title" + assert content.messages[0].contents[0].annotations[0]["title"] == "test title" assert content.messages[0].contents[0].text == "test" mock_create.assert_awaited_once_with( @@ -433,7 +432,7 @@ async def test_azure_on_your_data_fail( ) assert len(content.messages) == 1 assert len(content.messages[0].contents) == 1 - assert isinstance(content.messages[0].contents[0], TextContent) + assert content.messages[0].contents[0].type == "text" assert content.messages[0].contents[0].text == "test" mock_create.assert_awaited_once_with( @@ -592,6 +591,46 @@ async def test_get_streaming( ) +@patch.object(AsyncChatCompletions, "create", new_callable=AsyncMock) +async def test_streaming_with_none_delta( + mock_create: AsyncMock, + azure_openai_unit_test_env: dict[str, str], + chat_history: list[ChatMessage], +) -> None: + """Test streaming handles None delta from async content filtering.""" + # First chunk has None delta (simulates async filtering) + chunk_choice_with_none = ChunkChoice.model_construct(index=0, delta=None, finish_reason=None) + chunk_with_none_delta = ChatCompletionChunk.model_construct( + id="test_id", + choices=[chunk_choice_with_none], + created=0, + model="test", + object="chat.completion.chunk", + ) + # Second chunk has actual content + chunk_with_content = ChatCompletionChunk( + id="test_id", + choices=[ChunkChoice(index=0, delta=ChunkChoiceDelta(content="test", role="assistant"), finish_reason="stop")], + created=0, + model="test", + object="chat.completion.chunk", + ) + stream = MagicMock(spec=AsyncStream) + stream.__aiter__.return_value = [chunk_with_none_delta, chunk_with_content] + mock_create.return_value = stream + + chat_history.append(ChatMessage(text="hello world", role="user")) + azure_chat_client = AzureOpenAIChatClient() + + results: list[ChatResponseUpdate] = [] + async for msg in azure_chat_client.get_streaming_response(messages=chat_history): + results.append(msg) + + assert len(results) > 0 + assert any(content.type == "text" and content.text == "test" for msg in results for content in msg.contents) + assert any(msg.contents for msg in results) + + @ai_function def get_story_text() -> str: """Returns a story about Emily and David.""" @@ -689,7 +728,7 @@ async def test_azure_openai_chat_client_streaming() -> None: assert chunk.message_id is not None assert chunk.response_id is not None for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert "Emily" in full_message or "David" in full_message @@ -715,7 +754,7 @@ async def test_azure_openai_chat_client_streaming_tools() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert "Emily" in full_message or "David" in full_message diff --git a/python/packages/core/tests/azure/test_azure_responses_client.py b/python/packages/core/tests/azure/test_azure_responses_client.py index 0e1c17f9a8..b2d4a59ab7 100644 --- a/python/packages/core/tests/azure/test_azure_responses_client.py +++ b/python/packages/core/tests/azure/test_azure_responses_client.py @@ -15,10 +15,10 @@ ChatClientProtocol, ChatMessage, ChatResponse, + Content, HostedCodeInterpreterTool, HostedFileSearchTool, HostedMCPTool, - HostedVectorStoreContent, HostedWebSearchTool, ai_function, ) @@ -48,7 +48,7 @@ async def get_weather(location: Annotated[str, "The location as a city name"]) - return f"The weather in {location} is sunny and 72°F." -async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, HostedVectorStoreContent]: +async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, Content]: """Create a vector store with sample documents for testing.""" file = await client.client.files.create( file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), purpose="assistants" @@ -61,7 +61,7 @@ async def create_vector_store(client: AzureOpenAIResponsesClient) -> tuple[str, if result.last_error is not None: raise Exception(f"Vector store file processing failed with status: {result.last_error.message}") - return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id) + return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id) async def delete_vector_store(client: AzureOpenAIResponsesClient, file_id: str, vector_store_id: str) -> None: diff --git a/python/packages/core/tests/core/conftest.py b/python/packages/core/tests/core/conftest.py index 1561392214..f2f6059b91 100644 --- a/python/packages/core/tests/core/conftest.py +++ b/python/packages/core/tests/core/conftest.py @@ -20,8 +20,8 @@ ChatMessage, ChatResponse, ChatResponseUpdate, + Content, Role, - TextContent, ToolProtocol, ai_function, use_chat_middleware, @@ -108,8 +108,8 @@ async def get_streaming_response( for update in self.streaming_responses.pop(0): yield update else: - yield ChatResponseUpdate(text=TextContent(text="test streaming response "), role="assistant") - yield ChatResponseUpdate(contents=[TextContent(text="another update")], role="assistant") + yield ChatResponseUpdate(text=Content.from_text(text="test streaming response "), role="assistant") + yield ChatResponseUpdate(contents=[Content.from_text(text="another update")], role="assistant") @use_chat_middleware @@ -233,7 +233,7 @@ async def run( **kwargs: Any, ) -> AgentResponse: logger.debug(f"Running mock agent, with: {messages=}, {thread=}, {kwargs=}") - return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, contents=[TextContent("Response")])]) + return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("Response")])]) async def run_stream( self, @@ -243,7 +243,7 @@ async def run_stream( **kwargs: Any, ) -> AsyncIterable[AgentResponseUpdate]: logger.debug(f"Running mock agent stream, with: {messages=}, {thread=}, {kwargs=}") - yield AgentResponseUpdate(contents=[TextContent("Response")]) + yield AgentResponseUpdate(contents=[Content.from_text("Response")]) def get_new_thread(self) -> AgentThread: return MockAgentThread() diff --git a/python/packages/core/tests/core/test_agents.py b/python/packages/core/tests/core/test_agents.py index ee9054c143..a331f6f75c 100644 --- a/python/packages/core/tests/core/test_agents.py +++ b/python/packages/core/tests/core/test_agents.py @@ -18,12 +18,11 @@ ChatMessage, ChatMessageStore, ChatResponse, + Content, Context, ContextProvider, - FunctionCallContent, HostedCodeInterpreterTool, Role, - TextContent, ai_function, ) from agent_framework._mcp import MCPTool @@ -136,7 +135,7 @@ async def test_prepare_thread_does_not_mutate_agent_chat_options(chat_client: Ch async def test_chat_client_agent_update_thread_id(chat_client_base: ChatClientProtocol) -> None: mock_response = ChatResponse( - messages=[ChatMessage(role=Role.ASSISTANT, contents=[TextContent("test response")])], + messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("test response")])], conversation_id="123", ) chat_client_base.run_responses = [mock_response] @@ -200,7 +199,9 @@ async def test_chat_client_agent_author_name_is_used_from_response(chat_client_b chat_client_base.run_responses = [ ChatResponse( messages=[ - ChatMessage(role=Role.ASSISTANT, contents=[TextContent("test response")], author_name="TestAuthor") + ChatMessage( + role=Role.ASSISTANT, contents=[Content.from_text("test response")], author_name="TestAuthor" + ) ] ) ] @@ -264,7 +265,7 @@ async def test_chat_agent_context_providers_thread_created(chat_client_base: Cha mock_provider = MockContextProvider() chat_client_base.run_responses = [ ChatResponse( - messages=[ChatMessage(role=Role.ASSISTANT, contents=[TextContent("test response")])], + messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("test response")])], conversation_id="test-thread-id", ) ] @@ -345,7 +346,7 @@ async def test_chat_agent_context_providers_with_thread_service_id(chat_client_b mock_provider = MockContextProvider() chat_client_base.run_responses = [ ChatResponse( - messages=[ChatMessage(role=Role.ASSISTANT, contents=[TextContent("test response")])], + messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("test response")])], conversation_id="service-thread-123", ) ] @@ -575,7 +576,9 @@ def echo_thread_info(text: str, **kwargs: Any) -> str: # type: ignore[reportUnk ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="echo_thread_info", arguments='{"text": "hello"}')], + contents=[ + Content.from_function_call(call_id="1", name="echo_thread_info", arguments='{"text": "hello"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), diff --git a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py index 8669ecc3d6..39f441eb49 100644 --- a/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py +++ b/python/packages/core/tests/core/test_as_tool_kwargs_propagation.py @@ -5,7 +5,7 @@ from collections.abc import Awaitable, Callable from typing import Any -from agent_framework import ChatAgent, ChatMessage, ChatResponse, FunctionCallContent, agent_middleware +from agent_framework import ChatAgent, ChatMessage, ChatResponse, Content, agent_middleware from agent_framework._middleware import AgentRunContext from .conftest import MockChatClient @@ -113,7 +113,7 @@ async def capture_middleware( ChatMessage( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_c_1", name="call_c", arguments='{"task": "Please execute agent_c"}', @@ -170,10 +170,10 @@ async def capture_middleware( await next(context) # Setup mock streaming responses - from agent_framework import ChatResponseUpdate, TextContent + from agent_framework import ChatResponseUpdate chat_client.streaming_responses = [ - [ChatResponseUpdate(text=TextContent(text="Streaming response"), role="assistant")], + [ChatResponseUpdate(text=Content.from_text(text="Streaming response"), role="assistant")], ] sub_agent = ChatAgent( @@ -313,3 +313,44 @@ async def capture_middleware( # Verify second call had its own kwargs (not leaked from first) assert second_call_kwargs.get("session_id") == "session-2" assert second_call_kwargs.get("api_token") == "token-2" + + async def test_as_tool_excludes_conversation_id_from_forwarded_kwargs(self, chat_client: MockChatClient) -> None: + """Test that conversation_id is not forwarded to sub-agent.""" + captured_kwargs: dict[str, Any] = {} + + @agent_middleware + async def capture_middleware( + context: AgentRunContext, next: Callable[[AgentRunContext], Awaitable[None]] + ) -> None: + captured_kwargs.update(context.kwargs) + await next(context) + + # Setup mock response + chat_client.responses = [ + ChatResponse(messages=[ChatMessage(role="assistant", text="Response from sub-agent")]), + ] + + sub_agent = ChatAgent( + chat_client=chat_client, + name="sub_agent", + middleware=[capture_middleware], + ) + + tool = sub_agent.as_tool(name="delegate", arg_name="task") + + # Invoke tool with conversation_id in kwargs (simulating parent's conversation state) + await tool.invoke( + arguments=tool.input_model(task="Test delegation"), + conversation_id="conv-parent-456", + api_token="secret-xyz-123", + user_id="user-456", + ) + + # Verify conversation_id was NOT forwarded to sub-agent + assert "conversation_id" not in captured_kwargs, ( + f"conversation_id should not be forwarded, but got: {captured_kwargs}" + ) + + # Verify other kwargs were still forwarded + assert captured_kwargs.get("api_token") == "secret-xyz-123" + assert captured_kwargs.get("user_id") == "user-456" diff --git a/python/packages/core/tests/core/test_function_invocation_logic.py b/python/packages/core/tests/core/test_function_invocation_logic.py index 3aa8586a69..b2de663d03 100644 --- a/python/packages/core/tests/core/test_function_invocation_logic.py +++ b/python/packages/core/tests/core/test_function_invocation_logic.py @@ -11,11 +11,8 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - FunctionApprovalRequestContent, - FunctionCallContent, - FunctionResultContent, + Content, Role, - TextContent, ai_function, ) from agent_framework._middleware import FunctionInvocationContext, FunctionMiddleware @@ -34,7 +31,9 @@ def ai_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -43,12 +42,12 @@ def ai_func(arg1: str) -> str: assert exec_counter == 1 assert len(response.messages) == 3 assert response.messages[0].role == Role.ASSISTANT - assert isinstance(response.messages[0].contents[0], FunctionCallContent) + assert response.messages[0].contents[0].type == "function_call" assert response.messages[0].contents[0].name == "test_function" assert response.messages[0].contents[0].arguments == '{"arg1": "value1"}' assert response.messages[0].contents[0].call_id == "1" assert response.messages[1].role == Role.TOOL - assert isinstance(response.messages[1].contents[0], FunctionResultContent) + assert response.messages[1].contents[0].type == "function_result" assert response.messages[1].contents[0].call_id == "1" assert response.messages[1].contents[0].result == "Processed value1" assert response.messages[2].role == Role.ASSISTANT @@ -68,13 +67,17 @@ def ai_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="2", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="2", name="test_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -87,10 +90,10 @@ def ai_func(arg1: str) -> str: assert response.messages[2].role == Role.ASSISTANT assert response.messages[3].role == Role.TOOL assert response.messages[4].role == Role.ASSISTANT - assert isinstance(response.messages[0].contents[0], FunctionCallContent) - assert isinstance(response.messages[1].contents[0], FunctionResultContent) - assert isinstance(response.messages[2].contents[0], FunctionCallContent) - assert isinstance(response.messages[3].contents[0], FunctionResultContent) + assert response.messages[0].contents[0].type == "function_call" + assert response.messages[1].contents[0].type == "function_result" + assert response.messages[2].contents[0].type == "function_call" + assert response.messages[3].contents[0].type == "function_result" async def test_base_client_with_streaming_function_calling(chat_client_base: ChatClientProtocol): @@ -105,17 +108,17 @@ def ai_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1":')], + contents=[Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1":')], role="assistant", ), ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='"value1"}')], + contents=[Content.from_function_call(call_id="1", name="test_function", arguments='"value1"}')], role="assistant", ), ], [ ChatResponseUpdate( - contents=[TextContent(text="Processed value1")], + contents=[Content.from_text(text="Processed value1")], role="assistant", ) ], @@ -150,7 +153,7 @@ def ai_func(user_query: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( call_id="1", name="start_todo_investigation", arguments='{"user_query": "issue"}', @@ -207,7 +210,7 @@ def ai_func(user_query: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( call_id="thread-1", name="start_threaded_investigation", arguments='{"user_query": "issue"}', @@ -334,7 +337,7 @@ def func_with_approval(arg1: str) -> str: function_name = "approval_func" if approval_required else "no_approval_func" # Single function call content - func_call = FunctionCallContent(call_id="1", name=function_name, arguments='{"arg1": "value1"}') + func_call = Content.from_function_call(call_id="1", name=function_name, arguments='{"arg1": "value1"}') completion = ChatMessage(role="assistant", text="done") chat_client_base.run_responses = [ @@ -344,23 +347,27 @@ def func_with_approval(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name=function_name, arguments='{"arg1":')], + contents=[Content.from_function_call(call_id="1", name=function_name, arguments='{"arg1":')], role="assistant", ), ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name=function_name, arguments='"value1"}')], + contents=[Content.from_function_call(call_id="1", name=function_name, arguments='"value1"}')], role="assistant", ), ] - ] + ([] if approval_required else [[ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")]]) + ] + ( + [] + if approval_required + else [[ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")]] + ) else: # num_functions == 2 tools = [func_no_approval, func_with_approval] # Two function calls content func_calls = [ - FunctionCallContent(call_id="1", name="no_approval_func", arguments='{"arg1": "value1"}'), - FunctionCallContent(call_id="2", name="approval_func", arguments='{"arg1": "value2"}'), + Content.from_function_call(call_id="1", name="no_approval_func", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="2", name="approval_func", arguments='{"arg1": "value2"}'), ] chat_client_base.run_responses = [ChatResponse(messages=ChatMessage(role="assistant", contents=func_calls))] @@ -405,24 +412,24 @@ def func_with_approval(arg1: str) -> str: assert len(messages) == 1 # Assistant message should have FunctionCallContent + FunctionApprovalRequestContent assert len(messages[0].contents) == 2 - assert isinstance(messages[0].contents[0], FunctionCallContent) - assert isinstance(messages[0].contents[1], FunctionApprovalRequestContent) + assert messages[0].contents[0].type == "function_call" + assert messages[0].contents[1].type == "function_approval_request" assert messages[0].contents[1].function_call.name == "approval_func" assert exec_counter == 0 # Function not executed yet else: # Streaming: 2 function call chunks + 1 approval request update (same assistant message) assert len(messages) == 3 - assert isinstance(messages[0].contents[0], FunctionCallContent) - assert isinstance(messages[1].contents[0], FunctionCallContent) - assert isinstance(messages[2].contents[0], FunctionApprovalRequestContent) + assert messages[0].contents[0].type == "function_call" + assert messages[1].contents[0].type == "function_call" + assert messages[2].contents[0].type == "function_approval_request" assert messages[2].contents[0].function_call.name == "approval_func" assert exec_counter == 0 # Function not executed yet else: # Single function without approval: call + result + final if not streaming: assert len(messages) == 3 - assert isinstance(messages[0].contents[0], FunctionCallContent) - assert isinstance(messages[1].contents[0], FunctionResultContent) + assert messages[0].contents[0].type == "function_call" + assert messages[1].contents[0].type == "function_result" assert messages[1].contents[0].result == "Processed value1" assert messages[2].role == Role.ASSISTANT assert messages[2].text == "done" @@ -430,9 +437,9 @@ def func_with_approval(arg1: str) -> str: else: # Streaming has: 2 function call updates + 1 result update + 1 final update assert len(messages) == 4 - assert isinstance(messages[0].contents[0], FunctionCallContent) - assert isinstance(messages[1].contents[0], FunctionCallContent) - assert isinstance(messages[2].contents[0], FunctionResultContent) + assert messages[0].contents[0].type == "function_call" + assert messages[1].contents[0].type == "function_call" + assert messages[2].contents[0].type == "function_result" assert messages[3].text == "done" assert exec_counter == 1 else: # num_functions == 2 @@ -443,26 +450,25 @@ def func_with_approval(arg1: str) -> str: assert len(messages) == 1 # Should have: 2 FunctionCallContent + 2 FunctionApprovalRequestContent assert len(messages[0].contents) == 4 - assert isinstance(messages[0].contents[0], FunctionCallContent) - assert isinstance(messages[0].contents[1], FunctionCallContent) + assert messages[0].contents[0].type == "function_call" + assert messages[0].contents[1].type == "function_call" # Both should result in approval requests - approval_requests = [c for c in messages[0].contents if isinstance(c, FunctionApprovalRequestContent)] + approval_requests = [c for c in messages[0].contents if c.type == "function_approval_request"] assert len(approval_requests) == 2 assert exec_counter == 0 # Neither function executed yet else: # Streaming: 2 function call updates + 1 approval request with 2 contents assert len(messages) == 3 - assert isinstance(messages[0].contents[0], FunctionCallContent) - assert isinstance(messages[1].contents[0], FunctionCallContent) + assert messages[0].contents[0].type == "function_call" + assert messages[1].contents[0].type == "function_call" # The approval request message contains both approval requests assert len(messages[2].contents) == 2 - assert all(isinstance(c, FunctionApprovalRequestContent) for c in messages[2].contents) + assert all(c.type == "function_approval_request" for c in messages[2].contents) assert exec_counter == 0 # Neither function executed yet async def test_rejected_approval(chat_client_base: ChatClientProtocol): """Test that rejecting an approval alongside an approved one is handled correctly.""" - from agent_framework import FunctionApprovalResponseContent exec_counter_approved = 0 exec_counter_rejected = 0 @@ -485,8 +491,8 @@ def func_rejected(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="1", name="approved_func", arguments='{"arg1": "value1"}'), - FunctionCallContent(call_id="2", name="rejected_func", arguments='{"arg1": "value2"}'), + Content.from_function_call(call_id="1", name="approved_func", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="2", name="rejected_func", arguments='{"arg1": "value2"}'), ], ) ), @@ -501,19 +507,19 @@ def func_rejected(arg1: str) -> str: assert len(response.messages) == 1 # Assistant message should have: 2 FunctionCallContent + 2 FunctionApprovalRequestContent assert len(response.messages[0].contents) == 4 - approval_requests = [c for c in response.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)] + approval_requests = [c for c in response.messages[0].contents if c.type == "function_approval_request"] assert len(approval_requests) == 2 # Approve one and reject the other approval_req_1 = approval_requests[0] approval_req_2 = approval_requests[1] - approved_response = FunctionApprovalResponseContent( + approved_response = Content.from_function_approval_response( id=approval_req_1.id, function_call=approval_req_1.function_call, approved=True, ) - rejected_response = FunctionApprovalResponseContent( + rejected_response = Content.from_function_approval_response( id=approval_req_2.id, function_call=approval_req_2.function_call, approved=False, @@ -533,7 +539,7 @@ def func_rejected(arg1: str) -> str: rejected_result = None for msg in all_messages: for content in msg.contents: - if isinstance(content, FunctionResultContent): + if content.type == "function_result": if content.call_id == "1": approved_result = content elif content.call_id == "2": @@ -553,7 +559,7 @@ def func_rejected(arg1: str) -> str: # This ensures the message format is correct for OpenAI's API for msg in all_messages: for content in msg.contents: - if isinstance(content, FunctionResultContent): + if content.type == "function_result": assert msg.role == Role.TOOL, ( f"Message with FunctionResultContent must have role='tool', got '{msg.role}'" ) @@ -574,7 +580,7 @@ def func_with_approval(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), ], ) ), @@ -588,14 +594,13 @@ def func_with_approval(arg1: str) -> str: assert len(response.messages) == 1 assert response.messages[0].role == Role.ASSISTANT assert len(response.messages[0].contents) == 2 - assert isinstance(response.messages[0].contents[0], FunctionCallContent) - assert isinstance(response.messages[0].contents[1], FunctionApprovalRequestContent) + assert response.messages[0].contents[0].type == "function_call" + assert response.messages[0].contents[1].type == "function_approval_request" assert exec_counter == 0 async def test_persisted_approval_messages_replay_correctly(chat_client_base: ChatClientProtocol): """Approval flow should work when messages are persisted and sent back (thread scenario).""" - from agent_framework import FunctionApprovalResponseContent exec_counter = 0 @@ -610,7 +615,7 @@ def func_with_approval(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), ], ) ), @@ -624,13 +629,13 @@ def func_with_approval(arg1: str) -> str: # Store messages (like a thread would) persisted_messages = [ - ChatMessage(role="user", contents=[TextContent(text="hello")]), + ChatMessage(role="user", contents=[Content.from_text(text="hello")]), *response1.messages, ] # Send approval - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] - approval_response = FunctionApprovalResponseContent( + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] + approval_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=True, @@ -650,7 +655,6 @@ def func_with_approval(arg1: str) -> str: async def test_no_duplicate_function_calls_after_approval_processing(chat_client_base: ChatClientProtocol): """Processing approval should not create duplicate function calls in messages.""" - from agent_framework import FunctionApprovalResponseContent @ai_function(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: @@ -661,7 +665,7 @@ def func_with_approval(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), ], ) ), @@ -672,8 +676,8 @@ def func_with_approval(arg1: str) -> str: "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} ) - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] - approval_response = FunctionApprovalResponseContent( + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] + approval_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=True, @@ -687,7 +691,7 @@ def func_with_approval(arg1: str) -> str: 1 for msg in all_messages for content in msg.contents - if isinstance(content, FunctionCallContent) and content.call_id == "1" + if content.type == "function_call" and content.call_id == "1" ) assert function_call_count == 1 @@ -695,7 +699,6 @@ def func_with_approval(arg1: str) -> str: async def test_rejection_result_uses_function_call_id(chat_client_base: ChatClientProtocol): """Rejection error result should use the function call's call_id, not the approval's id.""" - from agent_framework import FunctionApprovalResponseContent @ai_function(name="test_func", approval_mode="always_require") def func_with_approval(arg1: str) -> str: @@ -706,7 +709,7 @@ def func_with_approval(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="call_123", name="test_func", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="call_123", name="test_func", arguments='{"arg1": "value1"}'), ], ) ), @@ -717,8 +720,8 @@ def func_with_approval(arg1: str) -> str: "hello", options={"tool_choice": "auto", "tools": [func_with_approval]} ) - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] - rejection_response = FunctionApprovalResponseContent( + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] + rejection_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=False, @@ -729,7 +732,7 @@ def func_with_approval(arg1: str) -> str: # Find the rejection result rejection_result = next( - (content for msg in all_messages for content in msg.contents if isinstance(content, FunctionResultContent)), + (content for msg in all_messages for content in msg.contents if content.type == "function_result"), None, ) @@ -753,13 +756,17 @@ def ai_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="2", name="test_function", arguments='{"arg1": "value2"}')], + contents=[ + Content.from_function_call(call_id="2", name="test_function", arguments='{"arg1": "value2"}') + ], ) ), # Failsafe response when tool_choice is set to "none" @@ -816,25 +823,33 @@ def error_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="error_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="2", name="error_function", arguments='{"arg1": "value2"}')], + contents=[ + Content.from_function_call(call_id="2", name="error_function", arguments='{"arg1": "value2"}') + ], ) ), ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="3", name="error_function", arguments='{"arg1": "value3"}')], + contents=[ + Content.from_function_call(call_id="3", name="error_function", arguments='{"arg1": "value3"}') + ], ) ), ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="4", name="error_function", arguments='{"arg1": "value4"}')], + contents=[ + Content.from_function_call(call_id="4", name="error_function", arguments='{"arg1": "value4"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="final response")), @@ -850,14 +865,14 @@ def error_func(arg1: str) -> str: content for msg in response.messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.exception + if content.type == "function_result" and content.exception ] # The first call errors, then the second call errors, hitting the limit # So we get 2 function calls with errors, but the responses show the behavior stopped assert len(error_results) >= 1 # At least one error occurred # Should have stopped making new function calls after hitting the error limit function_calls = [ - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionCallContent) + content for msg in response.messages for content in msg.contents if content.type == "function_call" ] # Should have made at most 2 function calls before stopping assert len(function_calls) <= 2 @@ -877,7 +892,9 @@ def known_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -890,7 +907,7 @@ def known_func(arg1: str) -> str: # Should have a result message indicating the tool wasn't found assert len(response.messages) == 3 - assert isinstance(response.messages[1].contents[0], FunctionResultContent) + assert response.messages[1].contents[0].type == "function_result" result_str = response.messages[1].contents[0].result or response.messages[1].contents[0].exception or "" assert "not found" in result_str.lower() assert exec_counter == 0 # Known function not executed @@ -910,7 +927,9 @@ def known_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}') + ], ) ), ] @@ -946,7 +965,9 @@ def hidden_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="hidden_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="hidden_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -967,7 +988,7 @@ def hidden_func(arg1: str) -> str: content for msg in response.messages for content in msg.contents - if isinstance(content, FunctionCallContent) and content.name == "hidden_function" + if content.type == "function_call" and content.name == "hidden_function" ] assert len(function_calls) >= 1 @@ -983,7 +1004,9 @@ def error_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="error_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -996,7 +1019,7 @@ def error_func(arg1: str) -> str: # Should have a generic error message error_result = next( - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionResultContent) + content for msg in response.messages for content in msg.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -1015,7 +1038,9 @@ def error_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="error_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1028,7 +1053,7 @@ def error_func(arg1: str) -> str: # Should have detailed error message error_result = next( - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionResultContent) + content for msg in response.messages for content in msg.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -1083,7 +1108,9 @@ def typed_func(arg1: int) -> str: # Expects int, not str ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}')], + contents=[ + Content.from_function_call(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1096,7 +1123,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str # Should have detailed validation error error_result = next( - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionResultContent) + content for msg in response.messages for content in msg.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -1115,7 +1142,9 @@ def typed_func(arg1: int) -> str: # Expects int, not str ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}')], + contents=[ + Content.from_function_call(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1128,7 +1157,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str # Should have generic validation error error_result = next( - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionResultContent) + content for msg in response.messages for content in msg.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -1138,17 +1167,16 @@ def typed_func(arg1: int) -> str: # Expects int, not str async def test_hosted_tool_approval_response(chat_client_base: ChatClientProtocol): """Test handling of approval responses for hosted tools (tools not in tool_map).""" - from agent_framework import FunctionApprovalResponseContent @ai_function(name="local_function") def local_func(arg1: str) -> str: return f"Local {arg1}" # Create an approval response for a hosted tool that's not in our tool_map - hosted_function_call = FunctionCallContent( + hosted_function_call = Content.from_function_call( call_id="hosted_1", name="hosted_function", arguments='{"arg1": "value"}' ) - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( id="approval_1", function_call=hosted_function_call, approved=True, @@ -1172,7 +1200,6 @@ def local_func(arg1: str) -> str: async def test_unapproved_tool_execution_raises_exception(chat_client_base: ChatClientProtocol): """Test that attempting to execute an unapproved tool raises ToolException.""" - from agent_framework import FunctionApprovalResponseContent @ai_function(name="test_function", approval_mode="always_require") def test_func(arg1: str) -> str: @@ -1183,7 +1210,7 @@ def test_func(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}'), ], ) ), @@ -1193,10 +1220,10 @@ def test_func(arg1: str) -> str: # Get approval request response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [test_func]}) - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] # Create a rejection response (approved=False) - rejection_response = FunctionApprovalResponseContent( + rejection_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=False, @@ -1214,8 +1241,7 @@ def test_func(arg1: str) -> str: content for msg in all_messages for content in msg.contents - if isinstance(content, FunctionResultContent) - and "rejected" in (content.result or content.exception or "").lower() + if content.type == "function_result" and "rejected" in (content.result or content.exception or "").lower() ), None, ) @@ -1227,7 +1253,6 @@ async def test_approved_function_call_with_error_without_detailed_errors(chat_cl When include_detailed_errors=False. """ - from agent_framework import FunctionApprovalResponseContent exec_counter = 0 @@ -1241,7 +1266,7 @@ def error_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="error_func", arguments='{"arg1": "value1"}')], + contents=[Content.from_function_call(call_id="1", name="error_func", arguments='{"arg1": "value1"}')], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1253,10 +1278,10 @@ def error_func(arg1: str) -> str: # Get approval request response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [error_func]}) - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] # Approve the function - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=True, @@ -1276,7 +1301,7 @@ def error_func(arg1: str) -> str: content for msg in all_messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.exception is not None + if content.type == "function_result" and content.exception is not None ), None, ) @@ -1291,7 +1316,6 @@ async def test_approved_function_call_with_error_with_detailed_errors(chat_clien When include_detailed_errors=True. """ - from agent_framework import FunctionApprovalResponseContent exec_counter = 0 @@ -1305,7 +1329,7 @@ def error_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="error_func", arguments='{"arg1": "value1"}')], + contents=[Content.from_function_call(call_id="1", name="error_func", arguments='{"arg1": "value1"}')], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1317,10 +1341,10 @@ def error_func(arg1: str) -> str: # Get approval request response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [error_func]}) - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] # Approve the function - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=True, @@ -1340,7 +1364,7 @@ def error_func(arg1: str) -> str: content for msg in all_messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.exception is not None + if content.type == "function_result" and content.exception is not None ), None, ) @@ -1353,7 +1377,6 @@ def error_func(arg1: str) -> str: async def test_approved_function_call_with_validation_error(chat_client_base: ChatClientProtocol): """Test that approved functions with validation errors are handled correctly.""" - from agent_framework import FunctionApprovalResponseContent exec_counter = 0 @@ -1367,7 +1390,9 @@ def typed_func(arg1: int) -> str: # Expects int, not str ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="typed_func", arguments='{"arg1": "not_an_int"}')], + contents=[ + Content.from_function_call(call_id="1", name="typed_func", arguments='{"arg1": "not_an_int"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1379,10 +1404,10 @@ def typed_func(arg1: int) -> str: # Expects int, not str # Get approval request response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [typed_func]}) - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] # Approve the function (even though it will fail validation) - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=True, @@ -1402,7 +1427,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str content for msg in all_messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.exception is not None + if content.type == "function_result" and content.exception is not None ), None, ) @@ -1413,7 +1438,6 @@ def typed_func(arg1: int) -> str: # Expects int, not str async def test_approved_function_call_successful_execution(chat_client_base: ChatClientProtocol): """Test that approved functions execute successfully when no errors occur.""" - from agent_framework import FunctionApprovalResponseContent exec_counter = 0 @@ -1427,7 +1451,7 @@ def success_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="success_func", arguments='{"arg1": "value1"}')], + contents=[Content.from_function_call(call_id="1", name="success_func", arguments='{"arg1": "value1"}')], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1436,10 +1460,10 @@ def success_func(arg1: str) -> str: # Get approval request response1 = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [success_func]}) - approval_req = [c for c in response1.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)][0] + approval_req = [c for c in response1.messages[0].contents if c.type == "function_approval_request"][0] # Approve the function - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( id=approval_req.id, function_call=approval_req.function_call, approved=True, @@ -1459,7 +1483,7 @@ def success_func(arg1: str) -> str: content for msg in all_messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.exception is None + if content.type == "function_result" and content.exception is None ), None, ) @@ -1486,7 +1510,9 @@ async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="declaration_func", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="declaration_func", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1501,7 +1527,7 @@ async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): content for msg in response.messages for content in msg.contents - if isinstance(content, FunctionCallContent) and content.name == "declaration_func" + if content.type == "function_call" and content.name == "declaration_func" ] assert len(function_calls) >= 1 @@ -1510,7 +1536,7 @@ async def test_declaration_only_tool(chat_client_base: ChatClientProtocol): content for msg in response.messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.call_id == "1" + if content.type == "function_result" and content.call_id == "1" ] assert len(function_results) == 0 @@ -1540,8 +1566,8 @@ async def func2(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="1", name="func1", arguments='{"arg1": "value1"}'), - FunctionCallContent(call_id="2", name="func2", arguments='{"arg1": "value2"}'), + Content.from_function_call(call_id="1", name="func1", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="2", name="func2", arguments='{"arg1": "value2"}'), ], ) ), @@ -1557,9 +1583,7 @@ async def func2(arg1: str) -> str: assert "func2_end" in exec_order # Should have results for both - results = [ - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionResultContent) - ] + results = [content for msg in response.messages for content in msg.contents if content.type == "function_result"] assert len(results) == 2 @@ -1577,7 +1601,9 @@ def plain_function(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="plain_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="plain_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1588,9 +1614,7 @@ def plain_function(arg1: str) -> str: # Function should be executed assert exec_counter == 1 - result = next( - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionResultContent) - ) + result = next(content for msg in response.messages for content in msg.contents if content.type == "function_result") assert result.result == "Plain value1" @@ -1606,7 +1630,9 @@ def test_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') + ], ), conversation_id="conv_123", # Simulate service-side thread ), @@ -1619,9 +1645,7 @@ def test_func(arg1: str) -> str: response = await chat_client_base.get_response("hello", options={"tool_choice": "auto", "tools": [test_func]}) # Should have executed the function - results = [ - content for msg in response.messages for content in msg.contents if isinstance(content, FunctionResultContent) - ] + results = [content for msg in response.messages for content in msg.contents if content.type == "function_result"] assert len(results) >= 1 assert response.conversation_id == "conv_123" @@ -1637,7 +1661,9 @@ def test_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1648,10 +1674,8 @@ def test_func(arg1: str) -> str: # Should have messages with both function call and function result assert len(response.messages) >= 2 # Check that we have both a function call and a function result - has_call = any(isinstance(content, FunctionCallContent) for msg in response.messages for content in msg.contents) - has_result = any( - isinstance(content, FunctionResultContent) for msg in response.messages for content in msg.contents - ) + has_call = any(content.type == "function_call" for msg in response.messages for content in msg.contents) + has_result = any(content.type == "function_result" for msg in response.messages for content in msg.contents) assert has_call assert has_result @@ -1673,13 +1697,17 @@ def sometimes_fails(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="sometimes_fails", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="sometimes_fails", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="2", name="sometimes_fails", arguments='{"arg1": "value2"}')], + contents=[ + Content.from_function_call(call_id="2", name="sometimes_fails", arguments='{"arg1": "value2"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -1692,13 +1720,13 @@ def sometimes_fails(arg1: str) -> str: content for msg in response.messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.exception + if content.type == "function_result" and content.exception ] success_results = [ content for msg in response.messages for content in msg.contents - if isinstance(content, FunctionResultContent) and content.result + if content.type == "function_result" and content.result ] assert len(error_results) >= 1 @@ -1723,7 +1751,7 @@ def func_with_approval(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="test_func", arguments='{"arg1": "value1"}')], + contents=[Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}')], role="assistant", ), ], @@ -1738,10 +1766,7 @@ def func_with_approval(arg1: str) -> str: # Should have function call update and approval request approval_requests = [ - content - for update in updates - for content in update.contents - if isinstance(content, FunctionApprovalRequestContent) + content for update in updates for content in update.contents if content.type == "function_approval_request" ] assert len(approval_requests) == 1 assert approval_requests[0].function_call.name == "test_func" @@ -1762,26 +1787,26 @@ def ai_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1":')], + contents=[Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1":')], role="assistant", ), ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='"value1"}')], + contents=[Content.from_function_call(call_id="1", name="test_function", arguments='"value1"}')], role="assistant", ), ], [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="2", name="test_function", arguments='{"arg1":')], + contents=[Content.from_function_call(call_id="2", name="test_function", arguments='{"arg1":')], role="assistant", ), ChatResponseUpdate( - contents=[FunctionCallContent(call_id="2", name="test_function", arguments='"value2"}')], + contents=[Content.from_function_call(call_id="2", name="test_function", arguments='"value2"}')], role="assistant", ), ], # Failsafe response when tool_choice is set to "none" - [ChatResponseUpdate(contents=[TextContent(text="giving up on tools")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="giving up on tools")], role="assistant")], ] # Set max_iterations to 1 in additional_properties @@ -1811,7 +1836,7 @@ def ai_func(arg1: str) -> str: return f"Processed {arg1}" chat_client_base.streaming_responses = [ - [ChatResponseUpdate(contents=[TextContent(text="response without function calling")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="response without function calling")], role="assistant")], ] # Disable function invocation @@ -1840,23 +1865,29 @@ def error_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="error_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') + ], role="assistant", ), ], [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="2", name="error_function", arguments='{"arg1": "value2"}')], + contents=[ + Content.from_function_call(call_id="2", name="error_function", arguments='{"arg1": "value2"}') + ], role="assistant", ), ], [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="3", name="error_function", arguments='{"arg1": "value3"}')], + contents=[ + Content.from_function_call(call_id="3", name="error_function", arguments='{"arg1": "value3"}') + ], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="final response")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="final response")], role="assistant")], ] # Set max_consecutive_errors to 2 @@ -1873,14 +1904,12 @@ def error_func(arg1: str) -> str: content for update in updates for content in update.contents - if isinstance(content, FunctionResultContent) and content.exception + if content.type == "function_result" and content.exception ] # At least one error occurred assert len(error_results) >= 1 # Should have stopped making new function calls after hitting the error limit - function_calls = [ - content for update in updates for content in update.contents if isinstance(content, FunctionCallContent) - ] + function_calls = [content for update in updates for content in update.contents if content.type == "function_call"] # Should have made at most 2 function calls before stopping assert len(function_calls) <= 2 @@ -1900,11 +1929,13 @@ def known_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}') + ], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")], ] # Set terminate_on_unknown_calls to False (default) @@ -1918,7 +1949,7 @@ def known_func(arg1: str) -> str: # Should have a result message indicating the tool wasn't found result_contents = [ - content for update in updates for content in update.contents if isinstance(content, FunctionResultContent) + content for update in updates for content in update.contents if content.type == "function_result" ] assert len(result_contents) >= 1 result_str = result_contents[0].result or result_contents[0].exception or "" @@ -1941,7 +1972,9 @@ def known_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="unknown_function", arguments='{"arg1": "value1"}') + ], role="assistant", ), ], @@ -1970,11 +2003,13 @@ def error_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="error_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') + ], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")], ] # Set include_detailed_errors to True @@ -1988,7 +2023,7 @@ def error_func(arg1: str) -> str: # Should have detailed error message error_result = next( - content for update in updates for content in update.contents if isinstance(content, FunctionResultContent) + content for update in updates for content in update.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -2008,11 +2043,13 @@ def error_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="error_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="error_function", arguments='{"arg1": "value1"}') + ], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")], ] # Set include_detailed_errors to False (default) @@ -2026,7 +2063,7 @@ def error_func(arg1: str) -> str: # Should have a generic error message error_result = next( - content for update in updates for content in update.contents if isinstance(content, FunctionResultContent) + content for update in updates for content in update.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -2044,11 +2081,13 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}')], + contents=[ + Content.from_function_call(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}') + ], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")], ] # Set include_detailed_errors to True @@ -2062,7 +2101,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str # Should have detailed validation error error_result = next( - content for update in updates for content in update.contents if isinstance(content, FunctionResultContent) + content for update in updates for content in update.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -2080,11 +2119,13 @@ def typed_func(arg1: int) -> str: # Expects int, not str chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}')], + contents=[ + Content.from_function_call(call_id="1", name="typed_function", arguments='{"arg1": "not_an_int"}') + ], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")], ] # Set include_detailed_errors to False (default) @@ -2098,7 +2139,7 @@ def typed_func(arg1: int) -> str: # Expects int, not str # Should have generic validation error error_result = next( - content for update in updates for content in update.contents if isinstance(content, FunctionResultContent) + content for update in updates for content in update.contents if content.type == "function_result" ) assert error_result.result is not None assert error_result.exception is not None @@ -2129,15 +2170,15 @@ async def func2(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="func1", arguments='{"arg1": "value1"}')], + contents=[Content.from_function_call(call_id="1", name="func1", arguments='{"arg1": "value1"}')], role="assistant", ), ChatResponseUpdate( - contents=[FunctionCallContent(call_id="2", name="func2", arguments='{"arg1": "value2"}')], + contents=[Content.from_function_call(call_id="2", name="func2", arguments='{"arg1": "value2"}')], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")], ] updates = [] @@ -2153,9 +2194,7 @@ async def func2(arg1: str) -> str: assert "func2_end" in exec_order # Should have results for both - results = [ - content for update in updates for content in update.contents if isinstance(content, FunctionResultContent) - ] + results = [content for update in updates for content in update.contents if content.type == "function_result"] assert len(results) == 2 @@ -2173,7 +2212,7 @@ def func_with_approval(arg1: str) -> str: [ ChatResponseUpdate( contents=[ - FunctionCallContent(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), + Content.from_function_call(call_id="1", name="test_func", arguments='{"arg1": "value1"}'), ], role="assistant", ), @@ -2188,10 +2227,7 @@ def func_with_approval(arg1: str) -> str: # Should have updates containing both the call and approval request approval_requests = [ - content - for update in updates - for content in update.contents - if isinstance(content, FunctionApprovalRequestContent) + content for update in updates for content in update.contents if content.type == "function_approval_request" ] assert len(approval_requests) == 1 assert exec_counter == 0 @@ -2213,17 +2249,21 @@ def sometimes_fails(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="sometimes_fails", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="sometimes_fails", arguments='{"arg1": "value1"}') + ], role="assistant", ), ], [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="2", name="sometimes_fails", arguments='{"arg1": "value2"}')], + contents=[ + Content.from_function_call(call_id="2", name="sometimes_fails", arguments='{"arg1": "value2"}') + ], role="assistant", ), ], - [ChatResponseUpdate(contents=[TextContent(text="done")], role="assistant")], + [ChatResponseUpdate(contents=[Content.from_text(text="done")], role="assistant")], ] updates = [] @@ -2237,13 +2277,13 @@ def sometimes_fails(arg1: str) -> str: content for update in updates for content in update.contents - if isinstance(content, FunctionResultContent) and content.exception + if content.type == "function_result" and content.exception ] success_results = [ content for update in updates for content in update.contents - if isinstance(content, FunctionResultContent) and content.result + if content.type == "function_result" and content.result ] assert len(error_results) >= 1 @@ -2278,7 +2318,9 @@ def ai_func(arg1: str) -> str: ChatResponse( messages=ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') + ], ) ), ChatResponse(messages=ChatMessage(role="assistant", text="done")), @@ -2297,9 +2339,9 @@ def ai_func(arg1: str) -> str: # The loop should NOT have continued to call the LLM again assert len(response.messages) == 2 assert response.messages[0].role == Role.ASSISTANT - assert isinstance(response.messages[0].contents[0], FunctionCallContent) + assert response.messages[0].contents[0].type == "function_call" assert response.messages[1].role == Role.TOOL - assert isinstance(response.messages[1].contents[0], FunctionResultContent) + assert response.messages[1].contents[0].type == "function_result" assert response.messages[1].contents[0].result == "terminated by middleware" # Verify the second response is still in the queue (wasn't consumed) @@ -2343,8 +2385,10 @@ def terminating_func(arg1: str) -> str: messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="1", name="normal_function", arguments='{"arg1": "value1"}'), - FunctionCallContent(call_id="2", name="terminating_function", arguments='{"arg1": "value2"}'), + Content.from_function_call(call_id="1", name="normal_function", arguments='{"arg1": "value1"}'), + Content.from_function_call( + call_id="2", name="terminating_function", arguments='{"arg1": "value2"}' + ), ], ) ), @@ -2389,13 +2433,15 @@ def ai_func(arg1: str) -> str: chat_client_base.streaming_responses = [ [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="1", name="test_function", arguments='{"arg1": "value1"}')], + contents=[ + Content.from_function_call(call_id="1", name="test_function", arguments='{"arg1": "value1"}') + ], role="assistant", ), ], [ ChatResponseUpdate( - contents=[TextContent(text="done")], + contents=[Content.from_text(text="done")], role="assistant", ) ], diff --git a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py index fc6acb435d..1a206d9646 100644 --- a/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py +++ b/python/packages/core/tests/core/test_kwargs_propagation_to_ai_function.py @@ -8,8 +8,7 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - FunctionCallContent, - TextContent, + Content, ai_function, ) from agent_framework._tools import _handle_function_calls_response, _handle_function_calls_streaming_response @@ -42,7 +41,9 @@ async def mock_get_response(self, messages, **kwargs): ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="call_1", name="capture_kwargs_tool", arguments='{"x": 42}') + Content.from_function_call( + call_id="call_1", name="capture_kwargs_tool", arguments='{"x": 42}' + ) ], ) ] @@ -94,7 +95,9 @@ async def mock_get_response(self, messages, **kwargs): messages=[ ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="call_1", name="simple_tool", arguments='{"x": 99}')], + contents=[ + Content.from_function_call(call_id="call_1", name="simple_tool", arguments='{"x": 99}') + ], ) ] ) @@ -136,10 +139,10 @@ async def mock_get_response(self, messages, **kwargs): ChatMessage( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_1", name="tracking_tool", arguments='{"name": "first"}' ), - FunctionCallContent( + Content.from_function_call( call_id="call_2", name="tracking_tool", arguments='{"name": "second"}' ), ], @@ -187,7 +190,7 @@ async def mock_get_streaming_response(self, messages, **kwargs): yield ChatResponseUpdate( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( call_id="stream_call_1", name="streaming_capture_tool", arguments='{"value": "streaming-test"}', @@ -197,7 +200,9 @@ async def mock_get_streaming_response(self, messages, **kwargs): ) else: # Second call: return final response - yield ChatResponseUpdate(text=TextContent(text="Stream complete!"), role="assistant", is_finished=True) + yield ChatResponseUpdate( + text=Content.from_text(text="Stream complete!"), role="assistant", is_finished=True + ) wrapped = _handle_function_calls_streaming_response(mock_get_streaming_response) diff --git a/python/packages/core/tests/core/test_mcp.py b/python/packages/core/tests/core/test_mcp.py index c4e8cb09df..28f9286294 100644 --- a/python/packages/core/tests/core/test_mcp.py +++ b/python/packages/core/tests/core/test_mcp.py @@ -13,14 +13,12 @@ from agent_framework import ( ChatMessage, - DataContent, + Content, MCPStdioTool, MCPStreamableHTTPTool, MCPWebsocketTool, Role, - TextContent, ToolProtocol, - UriContent, ) from agent_framework._mcp import ( MCPTool, @@ -65,7 +63,7 @@ def test_mcp_prompt_message_to_ai_content(): assert isinstance(ai_content, ChatMessage) assert ai_content.role.value == "user" assert len(ai_content.contents) == 1 - assert isinstance(ai_content.contents[0], TextContent) + assert ai_content.contents[0].type == "text" assert ai_content.contents[0].text == "Hello, world!" assert ai_content.raw_representation == mcp_message @@ -75,20 +73,20 @@ def test_parse_contents_from_mcp_tool_result(): mcp_result = types.CallToolResult( content=[ types.TextContent(type="text", text="Result text"), - types.ImageContent(type="image", data="xyz", mimeType="image/png"), - types.ImageContent(type="image", data=b"abc", mimeType="image/webp"), + types.ImageContent(type="image", data="eHl6", mimeType="image/png"), # base64 for "xyz" + types.ImageContent(type="image", data="YWJj", mimeType="image/webp"), # base64 for "abc" ] ) ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 3 - assert isinstance(ai_contents[0], TextContent) + assert ai_contents[0].type == "text" assert ai_contents[0].text == "Result text" - assert isinstance(ai_contents[1], DataContent) - assert ai_contents[1].uri == "data:image/png;base64,xyz" + assert ai_contents[1].type == "data" + assert ai_contents[1].uri == "data:image/png;base64,eHl6" assert ai_contents[1].media_type == "image/png" - assert isinstance(ai_contents[2], DataContent) - assert ai_contents[2].uri == "data:image/webp;base64,abc" + assert ai_contents[2].type == "data" + assert ai_contents[2].uri == "data:image/webp;base64,YWJj" assert ai_contents[2].media_type == "image/webp" @@ -103,7 +101,7 @@ def test_mcp_call_tool_result_with_meta_error(): ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 - assert isinstance(ai_contents[0], TextContent) + assert ai_contents[0].type == "text" assert ai_contents[0].text == "Error occurred" # Check that _meta data is merged into additional_properties @@ -134,7 +132,7 @@ def test_mcp_call_tool_result_with_meta_arbitrary_data(): ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 - assert isinstance(ai_contents[0], TextContent) + assert ai_contents[0].type == "text" assert ai_contents[0].text == "Success result" # Check that _meta data is preserved in additional_properties @@ -172,7 +170,7 @@ def test_mcp_call_tool_result_with_meta_none(): ai_contents = _parse_contents_from_mcp_tool_result(mcp_result) assert len(ai_contents) == 1 - assert isinstance(ai_contents[0], TextContent) + assert ai_contents[0].type == "text" assert ai_contents[0].text == "No meta test" # Should handle gracefully when no _meta field exists @@ -187,7 +185,7 @@ def test_mcp_call_tool_result_regression_successful_workflow(): mcp_result = types.CallToolResult( content=[ types.TextContent(type="text", text="Success message"), - types.ImageContent(type="image", data="abc123", mimeType="image/jpeg"), + types.ImageContent(type="image", data="YWJjMTIz", mimeType="image/jpeg"), # base64 for "abc123" ] ) @@ -197,12 +195,12 @@ def test_mcp_call_tool_result_regression_successful_workflow(): assert len(ai_contents) == 2 text_content = ai_contents[0] - assert isinstance(text_content, TextContent) + assert text_content.type == "text" assert text_content.text == "Success message" image_content = ai_contents[1] - assert isinstance(image_content, DataContent) - assert image_content.uri == "data:image/jpeg;base64,abc123" + assert image_content.type == "data" + assert image_content.uri == "data:image/jpeg;base64,YWJjMTIz" assert image_content.media_type == "image/jpeg" # Should have no additional_properties when no _meta field @@ -215,30 +213,31 @@ def test_mcp_content_types_to_ai_content_text(): mcp_content = types.TextContent(type="text", text="Sample text") ai_content = _parse_content_from_mcp(mcp_content)[0] - assert isinstance(ai_content, TextContent) + assert ai_content.type == "text" assert ai_content.text == "Sample text" assert ai_content.raw_representation == mcp_content def test_mcp_content_types_to_ai_content_image(): """Test conversion of MCP image content to AI content.""" - mcp_content = types.ImageContent(type="image", data="abc", mimeType="image/jpeg") - mcp_content = types.ImageContent(type="image", data=b"abc", mimeType="image/jpeg") + # MCP can send data as base64 string or as bytes + mcp_content = types.ImageContent(type="image", data="YWJj", mimeType="image/jpeg") # base64 for b"abc" ai_content = _parse_content_from_mcp(mcp_content)[0] - assert isinstance(ai_content, DataContent) - assert ai_content.uri == "data:image/jpeg;base64,abc" + assert ai_content.type == "data" + assert ai_content.uri == "data:image/jpeg;base64,YWJj" assert ai_content.media_type == "image/jpeg" assert ai_content.raw_representation == mcp_content def test_mcp_content_types_to_ai_content_audio(): """Test conversion of MCP audio content to AI content.""" - mcp_content = types.AudioContent(type="audio", data="def", mimeType="audio/wav") + # Use properly padded base64 + mcp_content = types.AudioContent(type="audio", data="ZGVm", mimeType="audio/wav") # base64 for b"def" ai_content = _parse_content_from_mcp(mcp_content)[0] - assert isinstance(ai_content, DataContent) - assert ai_content.uri == "data:audio/wav;base64,def" + assert ai_content.type == "data" + assert ai_content.uri == "data:audio/wav;base64,ZGVm" assert ai_content.media_type == "audio/wav" assert ai_content.raw_representation == mcp_content @@ -253,7 +252,7 @@ def test_mcp_content_types_to_ai_content_resource_link(): ) ai_content = _parse_content_from_mcp(mcp_content)[0] - assert isinstance(ai_content, UriContent) + assert ai_content.type == "uri" assert ai_content.uri == "https://example.com/resource" assert ai_content.media_type == "application/json" assert ai_content.raw_representation == mcp_content @@ -269,7 +268,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_text(): mcp_content = types.EmbeddedResource(type="resource", resource=text_resource) ai_content = _parse_content_from_mcp(mcp_content)[0] - assert isinstance(ai_content, TextContent) + assert ai_content.type == "text" assert ai_content.text == "Embedded text content" assert ai_content.raw_representation == mcp_content @@ -285,7 +284,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_blob(): mcp_content = types.EmbeddedResource(type="resource", resource=blob_resource) ai_content = _parse_content_from_mcp(mcp_content)[0] - assert isinstance(ai_content, DataContent) + assert ai_content.type == "data" assert ai_content.uri == "data:application/octet-stream;base64,dGVzdCBkYXRh" assert ai_content.media_type == "application/octet-stream" assert ai_content.raw_representation == mcp_content @@ -293,7 +292,7 @@ def test_mcp_content_types_to_ai_content_embedded_resource_blob(): def test_ai_content_to_mcp_content_types_text(): """Test conversion of AI text content to MCP content.""" - ai_content = TextContent(text="Sample text") + ai_content = Content.from_text(text="Sample text") mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.TextContent) @@ -303,7 +302,7 @@ def test_ai_content_to_mcp_content_types_text(): def test_ai_content_to_mcp_content_types_data_image(): """Test conversion of AI data content to MCP content.""" - ai_content = DataContent(uri="data:image/png;base64,xyz", media_type="image/png") + ai_content = Content.from_uri(uri="data:image/png;base64,xyz", media_type="image/png") mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.ImageContent) @@ -314,7 +313,7 @@ def test_ai_content_to_mcp_content_types_data_image(): def test_ai_content_to_mcp_content_types_data_audio(): """Test conversion of AI data content to MCP content.""" - ai_content = DataContent(uri="data:audio/mpeg;base64,xyz", media_type="audio/mpeg") + ai_content = Content.from_uri(uri="data:audio/mpeg;base64,xyz", media_type="audio/mpeg") mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.AudioContent) @@ -325,7 +324,7 @@ def test_ai_content_to_mcp_content_types_data_audio(): def test_ai_content_to_mcp_content_types_data_binary(): """Test conversion of AI data content to MCP content.""" - ai_content = DataContent( + ai_content = Content.from_uri( uri="data:application/octet-stream;base64,xyz", media_type="application/octet-stream", ) @@ -339,7 +338,7 @@ def test_ai_content_to_mcp_content_types_data_binary(): def test_ai_content_to_mcp_content_types_uri(): """Test conversion of AI URI content to MCP content.""" - ai_content = UriContent(uri="https://example.com/resource", media_type="application/json") + ai_content = Content.from_uri(uri="https://example.com/resource", media_type="application/json") mcp_content = _prepare_content_for_mcp(ai_content) assert isinstance(mcp_content, types.ResourceLink) @@ -352,8 +351,8 @@ def test_prepare_message_for_mcp(): message = ChatMessage( role="user", contents=[ - TextContent(text="test"), - DataContent(uri="data:image/png;base64,xyz", media_type="image/png"), + Content.from_text(text="test"), + Content.from_uri(uri="data:image/png;base64,xyz", media_type="image/png"), ], ) mcp_contents = _prepare_message_for_mcp(message) @@ -871,7 +870,7 @@ def get_mcp_client(self) -> _AsyncGeneratorContextManager[Any, None]: result = await func.invoke(param="test_value") assert len(result) == 1 - assert isinstance(result[0], TextContent) + assert result[0].type == "text" assert result[0].text == "Tool executed with metadata" # Verify that _meta data is present in additional_properties @@ -920,7 +919,7 @@ def get_mcp_client(self) -> _AsyncGeneratorContextManager[Any, None]: result = await func.invoke(param="test_value") assert len(result) == 1 - assert isinstance(result[0], TextContent) + assert result[0].type == "text" assert result[0].text == "Tool executed successfully" @@ -969,7 +968,7 @@ def get_mcp_client(self) -> _AsyncGeneratorContextManager[Any, None]: result = await func.invoke(params={"customer_id": 251}) assert len(result) == 1 - assert isinstance(result[0], TextContent) + assert result[0].type == "text" # Verify the session.call_tool was called with the correct nested structure server.session.call_tool.assert_called_once() @@ -1413,7 +1412,7 @@ async def test_mcp_tool_sampling_callback_chat_client_exception(): async def test_mcp_tool_sampling_callback_no_valid_content(): """Test sampling callback when response has no valid content types.""" - from agent_framework import ChatMessage, DataContent, Role + from agent_framework import ChatMessage, Role tool = MCPStdioTool(name="test_tool", command="python") @@ -1424,7 +1423,7 @@ async def test_mcp_tool_sampling_callback_no_valid_content(): ChatMessage( role=Role.ASSISTANT, contents=[ - DataContent( + Content.from_uri( uri="data:application/json;base64,e30K", media_type="application/json", ) @@ -2364,3 +2363,154 @@ async def always_fail(*args, **kwargs): assert mock_connect.call_count >= 1 # Verify error message indicates reconnection failure assert "failed to reconnect" in str(exc_info.value).lower() + + +async def test_mcp_tool_reconnection_handles_cross_task_cancel_scope_error(): + """Test that reconnection gracefully handles anyio cancel scope errors. + + This tests the fix for the bug where calling connect(reset=True) from a + different task than where the connection was originally established would + cause: RuntimeError: Attempted to exit cancel scope in a different task + than it was entered in + + This happens when using multiple MCP tools with AG-UI streaming - the first + tool call succeeds, but when the connection closes, the second tool call + triggers a reconnection from within the streaming loop (a different task). + """ + from contextlib import AsyncExitStack + + from agent_framework._mcp import MCPStdioTool + + # Use load_tools=False and load_prompts=False to avoid triggering them during connect() + tool = MCPStdioTool( + name="test_server", + command="test_command", + args=["arg1"], + load_tools=False, + load_prompts=False, + ) + + # Mock the exit stack to raise the cross-task cancel scope error + mock_exit_stack = AsyncMock(spec=AsyncExitStack) + mock_exit_stack.aclose = AsyncMock( + side_effect=RuntimeError("Attempted to exit cancel scope in a different task than it was entered in") + ) + tool._exit_stack = mock_exit_stack + tool.session = Mock() + tool.is_connected = True + + # Mock get_mcp_client to return a mock transport + mock_transport = (Mock(), Mock()) + mock_context = AsyncMock() + mock_context.__aenter__ = AsyncMock(return_value=mock_transport) + mock_context.__aexit__ = AsyncMock() + + with ( + patch.object(tool, "get_mcp_client", return_value=mock_context), + patch("agent_framework._mcp.ClientSession") as mock_session_class, + ): + mock_session = Mock() + mock_session._request_id = 1 + mock_session.initialize = AsyncMock() + mock_session.set_logging_level = AsyncMock() + mock_session_context = AsyncMock() + mock_session_context.__aenter__ = AsyncMock(return_value=mock_session) + mock_session_context.__aexit__ = AsyncMock() + mock_session_class.return_value = mock_session_context + + # This should NOT raise even though aclose() raised the cancel scope error + # The _safe_close_exit_stack method should catch and log the error + await tool.connect(reset=True) + + # Verify a new exit stack was created (the old mock was replaced) + assert tool._exit_stack is not mock_exit_stack + assert tool.session is not None + assert tool.is_connected is True + + +async def test_mcp_tool_safe_close_reraises_other_runtime_errors(): + """Test that _safe_close_exit_stack re-raises RuntimeErrors that aren't cancel scope related.""" + from contextlib import AsyncExitStack + + from agent_framework._mcp import MCPStdioTool + + tool = MCPStdioTool( + name="test_server", + command="test_command", + args=["arg1"], + load_tools=True, + ) + + # Mock the exit stack to raise a different RuntimeError + mock_exit_stack = AsyncMock(spec=AsyncExitStack) + mock_exit_stack.aclose = AsyncMock(side_effect=RuntimeError("Some other runtime error")) + tool._exit_stack = mock_exit_stack + + # This should re-raise the RuntimeError since it's not about cancel scopes + with pytest.raises(RuntimeError) as exc_info: + await tool._safe_close_exit_stack() + + assert "Some other runtime error" in str(exc_info.value) + + +async def test_mcp_tool_safe_close_handles_alternate_cancel_scope_error(): + """Test that _safe_close_exit_stack handles the alternate cancel scope error message. + + anyio has multiple variants of cancel scope errors: + - "Attempted to exit cancel scope in a different task than it was entered in" + - "Attempted to exit a cancel scope that isn't the current task's current cancel scope" + """ + from contextlib import AsyncExitStack + + from agent_framework._mcp import MCPStdioTool + + tool = MCPStdioTool( + name="test_server", + command="test_command", + args=["arg1"], + load_tools=False, + load_prompts=False, + ) + + # Mock the exit stack to raise the alternate cancel scope error + mock_exit_stack = AsyncMock(spec=AsyncExitStack) + mock_exit_stack.aclose = AsyncMock( + side_effect=RuntimeError("Attempted to exit a cancel scope that isn't the current task's current cancel scope") + ) + tool._exit_stack = mock_exit_stack + + # This should NOT raise - the error should be caught and logged + await tool._safe_close_exit_stack() + + # Verify aclose was called + mock_exit_stack.aclose.assert_called_once() + + +async def test_mcp_tool_safe_close_handles_cancelled_error(): + """Test that _safe_close_exit_stack handles asyncio.CancelledError. + + CancelledError can occur during cleanup when anyio cancel scopes are involved. + """ + import asyncio + from contextlib import AsyncExitStack + + from agent_framework._mcp import MCPStdioTool + + tool = MCPStdioTool( + name="test_server", + command="test_command", + args=["arg1"], + load_tools=False, + load_prompts=False, + ) + + # Mock the exit stack to raise CancelledError + mock_exit_stack = AsyncMock(spec=AsyncExitStack) + mock_exit_stack.aclose = AsyncMock(side_effect=asyncio.CancelledError()) + tool._exit_stack = mock_exit_stack + + # This should NOT raise - the CancelledError should be caught and logged + await tool._safe_close_exit_stack() + + # Verify aclose was called + mock_exit_stack.aclose.assert_called_once() diff --git a/python/packages/core/tests/core/test_middleware.py b/python/packages/core/tests/core/test_middleware.py index ebb833f2b4..441896f92b 100644 --- a/python/packages/core/tests/core/test_middleware.py +++ b/python/packages/core/tests/core/test_middleware.py @@ -14,8 +14,8 @@ ChatMessage, ChatResponse, ChatResponseUpdate, + Content, Role, - TextContent, ) from agent_framework._middleware import ( AgentMiddleware, @@ -217,8 +217,8 @@ async def test_execute_stream_no_middleware(self, mock_agent: AgentProtocol) -> context = AgentRunContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(contents=[TextContent(text="chunk1")]) - yield AgentResponseUpdate(contents=[TextContent(text="chunk2")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk2")]) updates: list[AgentResponseUpdate] = [] async for update in pipeline.execute_stream(mock_agent, messages, context, final_handler): @@ -250,8 +250,8 @@ async def process( async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: execution_order.append("handler_start") - yield AgentResponseUpdate(contents=[TextContent(text="chunk1")]) - yield AgentResponseUpdate(contents=[TextContent(text="chunk2")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk2")]) execution_order.append("handler_end") updates: list[AgentResponseUpdate] = [] @@ -313,8 +313,8 @@ async def test_execute_stream_with_pre_next_termination(self, mock_agent: AgentP async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: # Handler should not be executed when terminated before next() execution_order.append("handler_start") - yield AgentResponseUpdate(contents=[TextContent(text="chunk1")]) - yield AgentResponseUpdate(contents=[TextContent(text="chunk2")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk2")]) execution_order.append("handler_end") updates: list[AgentResponseUpdate] = [] @@ -336,8 +336,8 @@ async def test_execute_stream_with_post_next_termination(self, mock_agent: Agent async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: execution_order.append("handler_start") - yield AgentResponseUpdate(contents=[TextContent(text="chunk1")]) - yield AgentResponseUpdate(contents=[TextContent(text="chunk2")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk2")]) execution_order.append("handler_end") updates: list[AgentResponseUpdate] = [] @@ -609,8 +609,8 @@ async def test_execute_stream_no_middleware(self, mock_chat_client: Any) -> None context = ChatContext(chat_client=mock_chat_client, messages=messages, options=chat_options) async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: - yield ChatResponseUpdate(contents=[TextContent(text="chunk1")]) - yield ChatResponseUpdate(contents=[TextContent(text="chunk2")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk2")]) updates: list[ChatResponseUpdate] = [] async for update in pipeline.execute_stream(mock_chat_client, messages, chat_options, context, final_handler): @@ -641,8 +641,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: execution_order.append("handler_start") - yield ChatResponseUpdate(contents=[TextContent(text="chunk1")]) - yield ChatResponseUpdate(contents=[TextContent(text="chunk2")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk2")]) execution_order.append("handler_end") updates: list[ChatResponseUpdate] = [] @@ -706,8 +706,8 @@ async def test_execute_stream_with_pre_next_termination(self, mock_chat_client: async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: # Handler should not be executed when terminated before next() execution_order.append("handler_start") - yield ChatResponseUpdate(contents=[TextContent(text="chunk1")]) - yield ChatResponseUpdate(contents=[TextContent(text="chunk2")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk2")]) execution_order.append("handler_end") updates: list[ChatResponseUpdate] = [] @@ -730,8 +730,8 @@ async def test_execute_stream_with_post_next_termination(self, mock_chat_client: async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: execution_order.append("handler_start") - yield ChatResponseUpdate(contents=[TextContent(text="chunk1")]) - yield ChatResponseUpdate(contents=[TextContent(text="chunk2")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk1")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk2")]) execution_order.append("handler_end") updates: list[ChatResponseUpdate] = [] @@ -1264,7 +1264,7 @@ async def final_handler(ctx: AgentRunContext) -> AgentResponse: async def final_stream_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: streaming_flags.append(ctx.is_streaming) - yield AgentResponseUpdate(contents=[TextContent(text="chunk")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk")]) updates: list[AgentResponseUpdate] = [] async for update in pipeline.execute_stream(mock_agent, messages, context_stream, final_stream_handler): @@ -1292,9 +1292,9 @@ async def process( async def final_stream_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: chunks_processed.append("stream_start") - yield AgentResponseUpdate(contents=[TextContent(text="chunk1")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk1")]) chunks_processed.append("chunk1_yielded") - yield AgentResponseUpdate(contents=[TextContent(text="chunk2")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="chunk2")]) chunks_processed.append("chunk2_yielded") chunks_processed.append("stream_end") @@ -1342,7 +1342,7 @@ async def final_handler(ctx: ChatContext) -> ChatResponse: async def final_stream_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: streaming_flags.append(ctx.is_streaming) - yield ChatResponseUpdate(contents=[TextContent(text="chunk")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk")]) updates: list[ChatResponseUpdate] = [] async for update in pipeline.execute_stream( @@ -1371,9 +1371,9 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai async def final_stream_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: chunks_processed.append("stream_start") - yield ChatResponseUpdate(contents=[TextContent(text="chunk1")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk1")]) chunks_processed.append("chunk1_yielded") - yield ChatResponseUpdate(contents=[TextContent(text="chunk2")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="chunk2")]) chunks_processed.append("chunk2_yielded") chunks_processed.append("stream_end") @@ -1486,7 +1486,7 @@ async def process( async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: nonlocal handler_called handler_called = True - yield AgentResponseUpdate(contents=[TextContent(text="should not execute")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="should not execute")]) # When middleware doesn't call next(), streaming should yield no updates updates: list[AgentResponseUpdate] = [] @@ -1617,7 +1617,7 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai async def final_handler(ctx: ChatContext) -> AsyncIterable[ChatResponseUpdate]: nonlocal handler_called handler_called = True - yield ChatResponseUpdate(contents=[TextContent(text="should not execute")]) + yield ChatResponseUpdate(contents=[Content.from_text(text="should not execute")]) # When middleware doesn't call next(), streaming should yield no updates updates: list[ChatResponseUpdate] = [] diff --git a/python/packages/core/tests/core/test_middleware_context_result.py b/python/packages/core/tests/core/test_middleware_context_result.py index bfcfb48e5f..f939a0f409 100644 --- a/python/packages/core/tests/core/test_middleware_context_result.py +++ b/python/packages/core/tests/core/test_middleware_context_result.py @@ -13,8 +13,8 @@ AgentResponseUpdate, ChatAgent, ChatMessage, + Content, Role, - TextContent, ) from agent_framework._middleware import ( AgentMiddleware, @@ -75,8 +75,8 @@ async def test_agent_middleware_response_override_streaming(self, mock_agent: Ag """Test that agent middleware can override response for streaming execution.""" async def override_stream() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(contents=[TextContent(text="overridden")]) - yield AgentResponseUpdate(contents=[TextContent(text=" stream")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="overridden")]) + yield AgentResponseUpdate(contents=[Content.from_text(text=" stream")]) class StreamResponseOverrideMiddleware(AgentMiddleware): async def process( @@ -92,7 +92,7 @@ async def process( context = AgentRunContext(agent=mock_agent, messages=messages) async def final_handler(ctx: AgentRunContext) -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(contents=[TextContent(text="original")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="original")]) updates: list[AgentResponseUpdate] = [] async for update in pipeline.execute_stream(mock_agent, messages, context, final_handler): @@ -175,9 +175,9 @@ async def test_chat_agent_middleware_streaming_override(self) -> None: mock_chat_client = MockChatClient() async def custom_stream() -> AsyncIterable[AgentResponseUpdate]: - yield AgentResponseUpdate(contents=[TextContent(text="Custom")]) - yield AgentResponseUpdate(contents=[TextContent(text=" streaming")]) - yield AgentResponseUpdate(contents=[TextContent(text=" response!")]) + yield AgentResponseUpdate(contents=[Content.from_text(text="Custom")]) + yield AgentResponseUpdate(contents=[Content.from_text(text=" streaming")]) + yield AgentResponseUpdate(contents=[Content.from_text(text=" response!")]) class ChatAgentStreamOverrideMiddleware(AgentMiddleware): async def process( diff --git a/python/packages/core/tests/core/test_middleware_with_agent.py b/python/packages/core/tests/core/test_middleware_with_agent.py index 5cfea39287..445f13596a 100644 --- a/python/packages/core/tests/core/test_middleware_with_agent.py +++ b/python/packages/core/tests/core/test_middleware_with_agent.py @@ -13,10 +13,8 @@ ChatMiddleware, ChatResponse, ChatResponseUpdate, - FunctionCallContent, - FunctionResultContent, + Content, Role, - TextContent, agent_middleware, chat_middleware, function_middleware, @@ -201,7 +199,9 @@ async def process( ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent(call_id="test_call", name="test_function", arguments={"text": "test"}) + Content.from_function_call( + call_id="test_call", name="test_function", arguments={"text": "test"} + ) ], ) ] @@ -256,7 +256,9 @@ async def process( ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent(call_id="test_call", name="test_function", arguments={"text": "test"}) + Content.from_function_call( + call_id="test_call", name="test_function", arguments={"text": "test"} + ) ], ) ] @@ -365,8 +367,8 @@ async def process( # Set up mock streaming responses chat_client.streaming_responses = [ [ - ChatResponseUpdate(contents=[TextContent(text="Streaming")], role=Role.ASSISTANT), - ChatResponseUpdate(contents=[TextContent(text=" response")], role=Role.ASSISTANT), + ChatResponseUpdate(contents=[Content.from_text(text="Streaming")], role=Role.ASSISTANT), + ChatResponseUpdate(contents=[Content.from_text(text=" response")], role=Role.ASSISTANT), ] ] @@ -550,7 +552,7 @@ async def process( ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_123", name="sample_tool_function", arguments='{"location": "Seattle"}', @@ -585,8 +587,8 @@ async def process( # Verify function call and result are in the response all_contents = [content for message in response.messages for content in message.contents] - function_calls = [c for c in all_contents if isinstance(c, FunctionCallContent)] - function_results = [c for c in all_contents if isinstance(c, FunctionResultContent)] + function_calls = [c for c in all_contents if c.type == "function_call"] + function_results = [c for c in all_contents if c.type == "function_result"] assert len(function_calls) == 1 assert len(function_results) == 1 @@ -610,7 +612,7 @@ async def tracking_function_middleware( ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_456", name="sample_tool_function", arguments='{"location": "San Francisco"}', @@ -644,8 +646,8 @@ async def tracking_function_middleware( # Verify function call and result are in the response all_contents = [content for message in response.messages for content in message.contents] - function_calls = [c for c in all_contents if isinstance(c, FunctionCallContent)] - function_results = [c for c in all_contents if isinstance(c, FunctionResultContent)] + function_calls = [c for c in all_contents if c.type == "function_call"] + function_results = [c for c in all_contents if c.type == "function_result"] assert len(function_calls) == 1 assert len(function_results) == 1 @@ -682,7 +684,7 @@ async def process( ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_789", name="sample_tool_function", arguments='{"location": "New York"}', @@ -723,8 +725,8 @@ async def process( # Verify function call and result are in the response all_contents = [content for message in response.messages for content in message.contents] - function_calls = [c for c in all_contents if isinstance(c, FunctionCallContent)] - function_results = [c for c in all_contents if isinstance(c, FunctionResultContent)] + function_calls = [c for c in all_contents if c.type == "function_call"] + function_results = [c for c in all_contents if c.type == "function_result"] assert len(function_calls) == 1 assert len(function_results) == 1 @@ -769,14 +771,16 @@ async def kwargs_middleware( ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="test_call", name="sample_tool_function", arguments={"location": "Seattle"} ) ], ) ] ), - ChatResponse(messages=[ChatMessage(role=Role.ASSISTANT, contents=[TextContent("Function completed")])]), + ChatResponse( + messages=[ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text("Function completed")])] + ), ] # Create ChatAgent with function middleware @@ -1076,8 +1080,8 @@ async def process( # Set up mock streaming responses chat_client.streaming_responses = [ [ - ChatResponseUpdate(contents=[TextContent(text="Stream")], role=Role.ASSISTANT), - ChatResponseUpdate(contents=[TextContent(text=" response")], role=Role.ASSISTANT), + ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role=Role.ASSISTANT), + ChatResponseUpdate(contents=[Content.from_text(text=" response")], role=Role.ASSISTANT), ] ] @@ -1159,7 +1163,7 @@ def custom_tool(message: str) -> str: ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="test_call", name="custom_tool", arguments='{"message": "test"}', @@ -1204,8 +1208,8 @@ def custom_tool(message: str) -> str: # Verify function call and result are in the response all_contents = [content for message in response.messages for content in message.contents] - function_calls = [c for c in all_contents if isinstance(c, FunctionCallContent)] - function_results = [c for c in all_contents if isinstance(c, FunctionResultContent)] + function_calls = [c for c in all_contents if c.type == "function_call"] + function_results = [c for c in all_contents if c.type == "function_result"] assert len(function_calls) == 1 assert len(function_results) == 1 @@ -1248,7 +1252,7 @@ def custom_tool(message: str) -> str: ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="test_call", name="custom_tool", arguments='{"message": "test"}', @@ -1315,7 +1319,7 @@ def custom_tool(message: str) -> str: ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="test_call", name="custom_tool", arguments='{"message": "test"}', @@ -1365,7 +1369,7 @@ def custom_tool(message: str) -> str: ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="test_call", name="custom_tool", arguments='{"message": "test"}', @@ -1704,8 +1708,8 @@ async def process(self, context: ChatContext, next: Callable[[ChatContext], Awai # Set up mock streaming responses chat_client.streaming_responses = [ [ - ChatResponseUpdate(contents=[TextContent(text="Stream")], role=Role.ASSISTANT), - ChatResponseUpdate(contents=[TextContent(text=" response")], role=Role.ASSISTANT), + ChatResponseUpdate(contents=[Content.from_text(text="Stream")], role=Role.ASSISTANT), + ChatResponseUpdate(contents=[Content.from_text(text=" response")], role=Role.ASSISTANT), ] ] @@ -1806,7 +1810,7 @@ async def function_middleware( ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_456", name="sample_tool_function", arguments='{"location": "San Francisco"}', @@ -1850,8 +1854,8 @@ async def function_middleware( # Verify function call and result are in the response all_contents = [content for message in response.messages for content in message.contents] - function_calls = [c for c in all_contents if isinstance(c, FunctionCallContent)] - function_results = [c for c in all_contents if isinstance(c, FunctionResultContent)] + function_calls = [c for c in all_contents if c.type == "function_call"] + function_results = [c for c in all_contents if c.type == "function_result"] assert len(function_calls) == 1 assert len(function_results) == 1 @@ -1902,3 +1906,59 @@ async def kwargs_middleware( assert modified_kwargs["max_tokens"] == 500 assert modified_kwargs["new_param"] == "added_by_middleware" assert modified_kwargs["custom_param"] == "test_value" # Should still be there + + +class TestMiddlewareWithProtocolOnlyAgent: + """Test use_agent_middleware with agents implementing only AgentProtocol.""" + + async def test_middleware_with_protocol_only_agent(self) -> None: + """Verify middleware works without BaseAgent inheritance for both run and run_stream.""" + from collections.abc import AsyncIterable + + from agent_framework import AgentProtocol, AgentResponse, AgentResponseUpdate, use_agent_middleware + + execution_order: list[str] = [] + + class TrackingMiddleware(AgentMiddleware): + async def process( + self, context: AgentRunContext, next: Callable[[AgentRunContext], Awaitable[None]] + ) -> None: + execution_order.append("before") + await next(context) + execution_order.append("after") + + @use_agent_middleware + class ProtocolOnlyAgent: + """Minimal agent implementing only AgentProtocol, not inheriting from BaseAgent.""" + + def __init__(self): + self.id = "protocol-only-agent" + self.name = "Protocol Only Agent" + self.description = "Test agent" + self.middleware = [TrackingMiddleware()] + + async def run(self, messages=None, *, thread=None, **kwargs) -> AgentResponse: + return AgentResponse(messages=[ChatMessage(role=Role.ASSISTANT, text="response")]) + + def run_stream(self, messages=None, *, thread=None, **kwargs) -> AsyncIterable[AgentResponseUpdate]: + async def _stream(): + yield AgentResponseUpdate() + + return _stream() + + def get_new_thread(self, **kwargs): + return None + + agent = ProtocolOnlyAgent() + assert isinstance(agent, AgentProtocol) + + # Test run (non-streaming) + response = await agent.run("test message") + assert response is not None + assert execution_order == ["before", "after"] + + # Test run_stream (streaming) + execution_order.clear() + async for _ in agent.run_stream("test message"): + pass + assert execution_order == ["before", "after"] diff --git a/python/packages/core/tests/core/test_middleware_with_chat.py b/python/packages/core/tests/core/test_middleware_with_chat.py index 9d395284ea..a24d0e8037 100644 --- a/python/packages/core/tests/core/test_middleware_with_chat.py +++ b/python/packages/core/tests/core/test_middleware_with_chat.py @@ -9,7 +9,7 @@ ChatMessage, ChatMiddleware, ChatResponse, - FunctionCallContent, + Content, FunctionInvocationContext, Role, chat_middleware, @@ -349,7 +349,7 @@ def sample_tool(location: str) -> str: ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_1", name="sample_tool", arguments={"location": "San Francisco"}, @@ -405,7 +405,7 @@ def sample_tool(location: str) -> str: ChatMessage( role=Role.ASSISTANT, contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_2", name="sample_tool", arguments={"location": "New York"}, diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index 95f234efd4..88245cfa52 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -279,6 +279,133 @@ async def test_chat_client_streaming_observability( assert span.attributes[OtelAttr.OUTPUT_MESSAGES] is not None +@pytest.mark.parametrize("enable_sensitive_data", [True], indirect=True) +async def test_chat_client_observability_with_instructions( + mock_chat_client, span_exporter: InMemorySpanExporter, enable_sensitive_data +): + """Test that system_instructions from options are captured in LLM span.""" + import json + + client = use_instrumentation(mock_chat_client)() + + messages = [ChatMessage(role=Role.USER, text="Test message")] + options = {"model_id": "Test", "instructions": "You are a helpful assistant."} + span_exporter.clear() + response = await client.get_response(messages=messages, options=options) + + assert response is not None + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Verify system_instructions attribute is set + assert OtelAttr.SYSTEM_INSTRUCTIONS in span.attributes + system_instructions = json.loads(span.attributes[OtelAttr.SYSTEM_INSTRUCTIONS]) + assert len(system_instructions) == 1 + assert system_instructions[0]["content"] == "You are a helpful assistant." + + # Verify input_messages contains system message + input_messages = json.loads(span.attributes[OtelAttr.INPUT_MESSAGES]) + assert any(msg.get("role") == "system" for msg in input_messages) + + +@pytest.mark.parametrize("enable_sensitive_data", [True], indirect=True) +async def test_chat_client_streaming_observability_with_instructions( + mock_chat_client, span_exporter: InMemorySpanExporter, enable_sensitive_data +): + """Test streaming telemetry captures system_instructions from options.""" + import json + + client = use_instrumentation(mock_chat_client)() + messages = [ChatMessage(role=Role.USER, text="Test")] + options = {"model_id": "Test", "instructions": "You are a helpful assistant."} + span_exporter.clear() + + updates = [] + async for update in client.get_streaming_response(messages=messages, options=options): + updates.append(update) + + assert len(updates) == 2 + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Verify system_instructions attribute is set + assert OtelAttr.SYSTEM_INSTRUCTIONS in span.attributes + system_instructions = json.loads(span.attributes[OtelAttr.SYSTEM_INSTRUCTIONS]) + assert len(system_instructions) == 1 + assert system_instructions[0]["content"] == "You are a helpful assistant." + + +@pytest.mark.parametrize("enable_sensitive_data", [True], indirect=True) +async def test_chat_client_observability_without_instructions( + mock_chat_client, span_exporter: InMemorySpanExporter, enable_sensitive_data +): + """Test that system_instructions attribute is not set when instructions are not provided.""" + client = use_instrumentation(mock_chat_client)() + + messages = [ChatMessage(role=Role.USER, text="Test message")] + options = {"model_id": "Test"} # No instructions + span_exporter.clear() + response = await client.get_response(messages=messages, options=options) + + assert response is not None + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Verify system_instructions attribute is NOT set + assert OtelAttr.SYSTEM_INSTRUCTIONS not in span.attributes + + +@pytest.mark.parametrize("enable_sensitive_data", [True], indirect=True) +async def test_chat_client_observability_with_empty_instructions( + mock_chat_client, span_exporter: InMemorySpanExporter, enable_sensitive_data +): + """Test that system_instructions attribute is not set when instructions is an empty string.""" + client = use_instrumentation(mock_chat_client)() + + messages = [ChatMessage(role=Role.USER, text="Test message")] + options = {"model_id": "Test", "instructions": ""} # Empty string + span_exporter.clear() + response = await client.get_response(messages=messages, options=options) + + assert response is not None + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Empty string should not set system_instructions + assert OtelAttr.SYSTEM_INSTRUCTIONS not in span.attributes + + +@pytest.mark.parametrize("enable_sensitive_data", [True], indirect=True) +async def test_chat_client_observability_with_list_instructions( + mock_chat_client, span_exporter: InMemorySpanExporter, enable_sensitive_data +): + """Test that list-type instructions are correctly captured.""" + import json + + client = use_instrumentation(mock_chat_client)() + + messages = [ChatMessage(role=Role.USER, text="Test message")] + options = {"model_id": "Test", "instructions": ["Instruction 1", "Instruction 2"]} + span_exporter.clear() + response = await client.get_response(messages=messages, options=options) + + assert response is not None + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Verify system_instructions attribute contains both instructions + assert OtelAttr.SYSTEM_INSTRUCTIONS in span.attributes + system_instructions = json.loads(span.attributes[OtelAttr.SYSTEM_INSTRUCTIONS]) + assert len(system_instructions) == 2 + assert system_instructions[0]["content"] == "Instruction 1" + assert system_instructions[1]["content"] == "Instruction 2" + + async def test_chat_client_without_model_id_observability(mock_chat_client, span_exporter: InMemorySpanExporter): """Test telemetry shouldn't fail when the model_id is not provided for unknown reason.""" client = use_instrumentation(mock_chat_client)() diff --git a/python/packages/core/tests/core/test_tools.py b/python/packages/core/tests/core/test_tools.py index 77442be322..ee6103a613 100644 --- a/python/packages/core/tests/core/test_tools.py +++ b/python/packages/core/tests/core/test_tools.py @@ -9,6 +9,7 @@ from agent_framework import ( AIFunction, + Content, HostedCodeInterpreterTool, HostedImageGenerationTool, HostedMCPTool, @@ -639,24 +640,22 @@ def test_parse_inputs_none(): def test_parse_inputs_string(): """Test _parse_inputs with string input.""" - from agent_framework import UriContent result = _parse_inputs("http://example.com") assert len(result) == 1 - assert isinstance(result[0], UriContent) + assert result[0].type == "uri" assert result[0].uri == "http://example.com" assert result[0].media_type == "text/plain" def test_parse_inputs_list_of_strings(): """Test _parse_inputs with list of strings.""" - from agent_framework import UriContent inputs = ["http://example.com", "https://test.org"] result = _parse_inputs(inputs) assert len(result) == 2 - assert all(isinstance(item, UriContent) for item in result) + assert all(item.type == "uri" for item in result) assert result[0].uri == "http://example.com" assert result[1].uri == "https://test.org" assert all(item.media_type == "text/plain" for item in result) @@ -664,88 +663,84 @@ def test_parse_inputs_list_of_strings(): def test_parse_inputs_uri_dict(): """Test _parse_inputs with URI dictionary.""" - from agent_framework import UriContent input_dict = {"uri": "http://example.com", "media_type": "application/json"} result = _parse_inputs(input_dict) assert len(result) == 1 - assert isinstance(result[0], UriContent) + assert result[0].type == "uri" assert result[0].uri == "http://example.com" assert result[0].media_type == "application/json" def test_parse_inputs_hosted_file_dict(): """Test _parse_inputs with hosted file dictionary.""" - from agent_framework import HostedFileContent input_dict = {"file_id": "file-123"} result = _parse_inputs(input_dict) assert len(result) == 1 - assert isinstance(result[0], HostedFileContent) + assert result[0].type == "hosted_file" assert result[0].file_id == "file-123" def test_parse_inputs_hosted_vector_store_dict(): """Test _parse_inputs with hosted vector store dictionary.""" - from agent_framework import HostedVectorStoreContent + from agent_framework import Content input_dict = {"vector_store_id": "vs-789"} result = _parse_inputs(input_dict) assert len(result) == 1 - assert isinstance(result[0], HostedVectorStoreContent) + assert isinstance(result[0], Content) + assert result[0].type == "hosted_vector_store" assert result[0].vector_store_id == "vs-789" def test_parse_inputs_data_dict(): """Test _parse_inputs with data dictionary.""" - from agent_framework import DataContent input_dict = {"data": b"test data", "media_type": "application/octet-stream"} result = _parse_inputs(input_dict) assert len(result) == 1 - assert isinstance(result[0], DataContent) + assert result[0].type == "data" assert result[0].uri == "data:application/octet-stream;base64,dGVzdCBkYXRh" assert result[0].media_type == "application/octet-stream" def test_parse_inputs_ai_contents_instance(): - """Test _parse_inputs with Contents instance.""" - from agent_framework import TextContent + """Test _parse_inputs with Content instance.""" - text_content = TextContent(text="Hello, world!") + text_content = Content.from_text(text="Hello, world!") result = _parse_inputs(text_content) assert len(result) == 1 - assert isinstance(result[0], TextContent) + assert result[0].type == "text" assert result[0].text == "Hello, world!" def test_parse_inputs_mixed_list(): """Test _parse_inputs with mixed input types.""" - from agent_framework import HostedFileContent, TextContent, UriContent inputs = [ "http://example.com", # string {"uri": "https://test.org", "media_type": "text/html"}, # URI dict {"file_id": "file-456"}, # hosted file dict - TextContent(text="Hello"), # Contents instance + Content.from_text(text="Hello"), # Content instance ] result = _parse_inputs(inputs) assert len(result) == 4 - assert isinstance(result[0], UriContent) + assert result[0].type == "uri" assert result[0].uri == "http://example.com" - assert isinstance(result[1], UriContent) + assert result[1].type == "uri" assert result[1].uri == "https://test.org" assert result[1].media_type == "text/html" - assert isinstance(result[2], HostedFileContent) + assert result[2].type == "hosted_file" assert result[2].file_id == "file-456" - assert isinstance(result[3], TextContent) + assert result[3].type == "text" assert result[3].text == "Hello" @@ -765,55 +760,51 @@ def test_parse_inputs_unsupported_type(): def test_hosted_code_interpreter_tool_with_string_input(): """Test HostedCodeInterpreterTool with string input.""" - from agent_framework import UriContent tool = HostedCodeInterpreterTool(inputs="http://example.com") assert len(tool.inputs) == 1 - assert isinstance(tool.inputs[0], UriContent) + assert tool.inputs[0].type == "uri" assert tool.inputs[0].uri == "http://example.com" def test_hosted_code_interpreter_tool_with_dict_inputs(): """Test HostedCodeInterpreterTool with dictionary inputs.""" - from agent_framework import HostedFileContent, UriContent inputs = [{"uri": "http://example.com", "media_type": "text/html"}, {"file_id": "file-123"}] tool = HostedCodeInterpreterTool(inputs=inputs) assert len(tool.inputs) == 2 - assert isinstance(tool.inputs[0], UriContent) + assert tool.inputs[0].type == "uri" assert tool.inputs[0].uri == "http://example.com" assert tool.inputs[0].media_type == "text/html" - assert isinstance(tool.inputs[1], HostedFileContent) + assert tool.inputs[1].type == "hosted_file" assert tool.inputs[1].file_id == "file-123" def test_hosted_code_interpreter_tool_with_ai_contents(): - """Test HostedCodeInterpreterTool with Contents instances.""" - from agent_framework import DataContent, TextContent + """Test HostedCodeInterpreterTool with Content instances.""" - inputs = [TextContent(text="Hello, world!"), DataContent(data=b"test", media_type="text/plain")] + inputs = [Content.from_text(text="Hello, world!"), Content.from_data(data=b"test", media_type="text/plain")] tool = HostedCodeInterpreterTool(inputs=inputs) assert len(tool.inputs) == 2 - assert isinstance(tool.inputs[0], TextContent) + assert tool.inputs[0].type == "text" assert tool.inputs[0].text == "Hello, world!" - assert isinstance(tool.inputs[1], DataContent) + assert tool.inputs[1].type == "data" assert tool.inputs[1].media_type == "text/plain" def test_hosted_code_interpreter_tool_with_single_input(): """Test HostedCodeInterpreterTool with single input (not in list).""" - from agent_framework import HostedFileContent input_dict = {"file_id": "file-single"} tool = HostedCodeInterpreterTool(inputs=input_dict) assert len(tool.inputs) == 1 - assert isinstance(tool.inputs[0], HostedFileContent) + assert tool.inputs[0].type == "hosted_file" assert tool.inputs[0].file_id == "file-single" @@ -983,7 +974,7 @@ async def get_streaming_response(self, messages, **kwargs): yield ChatResponseUpdate(contents=[content], role=msg.role) else: # Default response - yield ChatResponseUpdate(contents=["Default response"], role="assistant") + yield ChatResponseUpdate(text="Default response", role="assistant") return MockChatClient() @@ -1006,7 +997,7 @@ def requires_approval_tool(x: int) -> int: async def test_non_streaming_single_function_no_approval(): """Test non-streaming handler with single function call that doesn't require approval.""" - from agent_framework import ChatMessage, ChatResponse, FunctionCallContent + from agent_framework import ChatMessage, ChatResponse from agent_framework._tools import _handle_function_calls_response # Create mock client @@ -1017,11 +1008,11 @@ async def test_non_streaming_single_function_no_approval(): messages=[ ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}')], + contents=[Content.from_function_call(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}')], ) ] ) - final_response = ChatResponse(messages=[ChatMessage(role="assistant", contents=["The result is 10"])]) + final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="The result is 10")]) call_count = [0] responses = [initial_response, final_response] @@ -1039,17 +1030,16 @@ async def mock_get_response(self, messages, **kwargs): # Verify: should have 3 messages: function call, function result, final answer assert len(result.messages) == 3 - assert isinstance(result.messages[0].contents[0], FunctionCallContent) - from agent_framework import FunctionResultContent + assert result.messages[0].contents[0].type == "function_call" - assert isinstance(result.messages[1].contents[0], FunctionResultContent) + assert result.messages[1].contents[0].type == "function_result" assert result.messages[1].contents[0].result == 10 # 5 * 2 - assert result.messages[2].contents[0] == "The result is 10" + assert result.messages[2].text == "The result is 10" async def test_non_streaming_single_function_requires_approval(): """Test non-streaming handler with single function call that requires approval.""" - from agent_framework import ChatMessage, ChatResponse, FunctionCallContent + from agent_framework import ChatMessage, ChatResponse from agent_framework._tools import _handle_function_calls_response mock_client = type("MockClient", (), {})() @@ -1059,7 +1049,9 @@ async def test_non_streaming_single_function_requires_approval(): messages=[ ChatMessage( role="assistant", - contents=[FunctionCallContent(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}')], + contents=[ + Content.from_function_call(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}') + ], ) ] ) @@ -1078,18 +1070,17 @@ async def mock_get_response(self, messages, **kwargs): result = await wrapped(mock_client, messages=[], options={"tools": [requires_approval_tool]}) # Verify: should return 1 message with function call and approval request - from agent_framework import FunctionApprovalRequestContent assert len(result.messages) == 1 assert len(result.messages[0].contents) == 2 - assert isinstance(result.messages[0].contents[0], FunctionCallContent) - assert isinstance(result.messages[0].contents[1], FunctionApprovalRequestContent) + assert result.messages[0].contents[0].type == "function_call" + assert result.messages[0].contents[1].type == "function_approval_request" assert result.messages[0].contents[1].function_call.name == "requires_approval_tool" async def test_non_streaming_two_functions_both_no_approval(): """Test non-streaming handler with two function calls, neither requiring approval.""" - from agent_framework import ChatMessage, ChatResponse, FunctionCallContent + from agent_framework import ChatMessage, ChatResponse from agent_framework._tools import _handle_function_calls_response mock_client = type("MockClient", (), {})() @@ -1100,15 +1091,13 @@ async def test_non_streaming_two_functions_both_no_approval(): ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}'), - FunctionCallContent(call_id="call_2", name="no_approval_tool", arguments='{"x": 3}'), + Content.from_function_call(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}'), + Content.from_function_call(call_id="call_2", name="no_approval_tool", arguments='{"x": 3}'), ], ) ] ) - final_response = ChatResponse( - messages=[ChatMessage(role="assistant", contents=["Both tools executed successfully"])] - ) + final_response = ChatResponse(messages=[ChatMessage(role="assistant", text="Both tools executed successfully")]) call_count = [0] responses = [initial_response, final_response] @@ -1124,21 +1113,20 @@ async def mock_get_response(self, messages, **kwargs): result = await wrapped(mock_client, messages=[], options={"tools": [no_approval_tool]}) # Verify: should have function calls, results, and final answer - from agent_framework import FunctionResultContent assert len(result.messages) == 3 # First message has both function calls assert len(result.messages[0].contents) == 2 # Second message has both results assert len(result.messages[1].contents) == 2 - assert all(isinstance(c, FunctionResultContent) for c in result.messages[1].contents) + assert all(c.type == "function_result" for c in result.messages[1].contents) assert result.messages[1].contents[0].result == 10 # 5 * 2 assert result.messages[1].contents[1].result == 6 # 3 * 2 async def test_non_streaming_two_functions_both_require_approval(): """Test non-streaming handler with two function calls, both requiring approval.""" - from agent_framework import ChatMessage, ChatResponse, FunctionCallContent + from agent_framework import ChatMessage, ChatResponse from agent_framework._tools import _handle_function_calls_response mock_client = type("MockClient", (), {})() @@ -1149,8 +1137,8 @@ async def test_non_streaming_two_functions_both_require_approval(): ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}'), - FunctionCallContent(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}'), + Content.from_function_call(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}'), + Content.from_function_call(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}'), ], ) ] @@ -1170,12 +1158,11 @@ async def mock_get_response(self, messages, **kwargs): result = await wrapped(mock_client, messages=[], options={"tools": [requires_approval_tool]}) # Verify: should return 1 message with function calls and approval requests - from agent_framework import FunctionApprovalRequestContent assert len(result.messages) == 1 assert len(result.messages[0].contents) == 4 # 2 function calls + 2 approval requests - function_calls = [c for c in result.messages[0].contents if isinstance(c, FunctionCallContent)] - approval_requests = [c for c in result.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)] + function_calls = [c for c in result.messages[0].contents if c.type == "function_call"] + approval_requests = [c for c in result.messages[0].contents if c.type == "function_approval_request"] assert len(function_calls) == 2 assert len(approval_requests) == 2 assert approval_requests[0].function_call.name == "requires_approval_tool" @@ -1184,7 +1171,7 @@ async def mock_get_response(self, messages, **kwargs): async def test_non_streaming_two_functions_mixed_approval(): """Test non-streaming handler with two function calls, one requiring approval.""" - from agent_framework import ChatMessage, ChatResponse, FunctionCallContent + from agent_framework import ChatMessage, ChatResponse from agent_framework._tools import _handle_function_calls_response mock_client = type("MockClient", (), {})() @@ -1195,8 +1182,8 @@ async def test_non_streaming_two_functions_mixed_approval(): ChatMessage( role="assistant", contents=[ - FunctionCallContent(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}'), - FunctionCallContent(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}'), + Content.from_function_call(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}'), + Content.from_function_call(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}'), ], ) ] @@ -1216,17 +1203,16 @@ async def mock_get_response(self, messages, **kwargs): result = await wrapped(mock_client, messages=[], options={"tools": [no_approval_tool, requires_approval_tool]}) # Verify: should return approval requests for both (when one needs approval, all are sent for approval) - from agent_framework import FunctionApprovalRequestContent assert len(result.messages) == 1 assert len(result.messages[0].contents) == 4 # 2 function calls + 2 approval requests - approval_requests = [c for c in result.messages[0].contents if isinstance(c, FunctionApprovalRequestContent)] + approval_requests = [c for c in result.messages[0].contents if c.type == "function_approval_request"] assert len(approval_requests) == 2 async def test_streaming_single_function_no_approval(): """Test streaming handler with single function call that doesn't require approval.""" - from agent_framework import ChatResponseUpdate, FunctionCallContent + from agent_framework import ChatResponseUpdate from agent_framework._tools import _handle_function_calls_streaming_response mock_client = type("MockClient", (), {})() @@ -1234,11 +1220,11 @@ async def test_streaming_single_function_no_approval(): # Initial response with function call, then final response after function execution initial_updates = [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}')], + contents=[Content.from_function_call(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}')], role="assistant", ) ] - final_updates = [ChatResponseUpdate(contents=["The result is 10"], role="assistant")] + final_updates = [ChatResponseUpdate(text="The result is 10", role="assistant")] call_count = [0] updates_list = [initial_updates, final_updates] @@ -1257,22 +1243,23 @@ async def mock_get_streaming_response(self, messages, **kwargs): updates.append(update) # Verify: should have function call update, tool result update (injected), and final update - from agent_framework import FunctionResultContent, Role + from agent_framework import Role assert len(updates) >= 3 # First update is the function call - assert isinstance(updates[0].contents[0], FunctionCallContent) + assert updates[0].contents[0].type == "function_call" # Second update should be the tool result (injected by the wrapper) assert updates[1].role == Role.TOOL - assert isinstance(updates[1].contents[0], FunctionResultContent) + assert updates[1].contents[0].type == "function_result" assert updates[1].contents[0].result == 10 # 5 * 2 # Last update is the final message - assert updates[-1].contents[0] == "The result is 10" + assert updates[-1].contents[0].type == "text" + assert updates[-1].contents[0].text == "The result is 10" async def test_streaming_single_function_requires_approval(): """Test streaming handler with single function call that requires approval.""" - from agent_framework import ChatResponseUpdate, FunctionCallContent + from agent_framework import ChatResponseUpdate from agent_framework._tools import _handle_function_calls_streaming_response mock_client = type("MockClient", (), {})() @@ -1280,7 +1267,9 @@ async def test_streaming_single_function_requires_approval(): # Initial response with function call initial_updates = [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}')], + contents=[ + Content.from_function_call(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}') + ], role="assistant", ) ] @@ -1302,17 +1291,17 @@ async def mock_get_streaming_response(self, messages, **kwargs): updates.append(update) # Verify: should yield function call and then approval request - from agent_framework import FunctionApprovalRequestContent, Role + from agent_framework import Role assert len(updates) == 2 - assert isinstance(updates[0].contents[0], FunctionCallContent) + assert updates[0].contents[0].type == "function_call" assert updates[1].role == Role.ASSISTANT - assert isinstance(updates[1].contents[0], FunctionApprovalRequestContent) + assert updates[1].contents[0].type == "function_approval_request" async def test_streaming_two_functions_both_no_approval(): """Test streaming handler with two function calls, neither requiring approval.""" - from agent_framework import ChatResponseUpdate, FunctionCallContent + from agent_framework import ChatResponseUpdate from agent_framework._tools import _handle_function_calls_streaming_response mock_client = type("MockClient", (), {})() @@ -1320,15 +1309,14 @@ async def test_streaming_two_functions_both_no_approval(): # Initial response with two function calls to the same tool initial_updates = [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}')], - role="assistant", - ), - ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_2", name="no_approval_tool", arguments='{"x": 3}')], + contents=[ + Content.from_function_call(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}'), + Content.from_function_call(call_id="call_2", name="no_approval_tool", arguments='{"x": 3}'), + ], role="assistant", ), ] - final_updates = [ChatResponseUpdate(contents=["Both tools executed successfully"], role="assistant")] + final_updates = [ChatResponseUpdate(text="Both tools executed successfully", role="assistant")] call_count = [0] updates_list = [initial_updates, final_updates] @@ -1347,22 +1335,23 @@ async def mock_get_streaming_response(self, messages, **kwargs): updates.append(update) # Verify: should have both function calls, one tool result update with both results, and final message - from agent_framework import FunctionResultContent, Role + from agent_framework import Role - assert len(updates) >= 3 - # First two updates are function calls - assert isinstance(updates[0].contents[0], FunctionCallContent) - assert isinstance(updates[1].contents[0], FunctionCallContent) + assert len(updates) >= 2 + # First update has both function calls + assert len(updates[0].contents) == 2 + assert updates[0].contents[0].type == "function_call" + assert updates[0].contents[1].type == "function_call" # Should have a tool result update with both results tool_updates = [u for u in updates if u.role == Role.TOOL] assert len(tool_updates) == 1 assert len(tool_updates[0].contents) == 2 - assert all(isinstance(c, FunctionResultContent) for c in tool_updates[0].contents) + assert all(c.type == "function_result" for c in tool_updates[0].contents) async def test_streaming_two_functions_both_require_approval(): """Test streaming handler with two function calls, both requiring approval.""" - from agent_framework import ChatResponseUpdate, FunctionCallContent + from agent_framework import ChatResponseUpdate from agent_framework._tools import _handle_function_calls_streaming_response mock_client = type("MockClient", (), {})() @@ -1370,11 +1359,15 @@ async def test_streaming_two_functions_both_require_approval(): # Initial response with two function calls to the same tool initial_updates = [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}')], + contents=[ + Content.from_function_call(call_id="call_1", name="requires_approval_tool", arguments='{"x": 5}') + ], role="assistant", ), ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}')], + contents=[ + Content.from_function_call(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}') + ], role="assistant", ), ] @@ -1396,20 +1389,20 @@ async def mock_get_streaming_response(self, messages, **kwargs): updates.append(update) # Verify: should yield both function calls and then approval requests - from agent_framework import FunctionApprovalRequestContent, Role + from agent_framework import Role assert len(updates) == 3 - assert isinstance(updates[0].contents[0], FunctionCallContent) - assert isinstance(updates[1].contents[0], FunctionCallContent) + assert updates[0].contents[0].type == "function_call" + assert updates[1].contents[0].type == "function_call" # Assistant update with both approval requests assert updates[2].role == Role.ASSISTANT assert len(updates[2].contents) == 2 - assert all(isinstance(c, FunctionApprovalRequestContent) for c in updates[2].contents) + assert all(c.type == "function_approval_request" for c in updates[2].contents) async def test_streaming_two_functions_mixed_approval(): """Test streaming handler with two function calls, one requiring approval.""" - from agent_framework import ChatResponseUpdate, FunctionCallContent + from agent_framework import ChatResponseUpdate from agent_framework._tools import _handle_function_calls_streaming_response mock_client = type("MockClient", (), {})() @@ -1417,11 +1410,13 @@ async def test_streaming_two_functions_mixed_approval(): # Initial response with two function calls initial_updates = [ ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}')], + contents=[Content.from_function_call(call_id="call_1", name="no_approval_tool", arguments='{"x": 5}')], role="assistant", ), ChatResponseUpdate( - contents=[FunctionCallContent(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}')], + contents=[ + Content.from_function_call(call_id="call_2", name="requires_approval_tool", arguments='{"x": 3}') + ], role="assistant", ), ] @@ -1445,15 +1440,15 @@ async def mock_get_streaming_response(self, messages, **kwargs): updates.append(update) # Verify: should yield both function calls and then approval requests (when one needs approval, all wait) - from agent_framework import FunctionApprovalRequestContent, Role + from agent_framework import Role assert len(updates) == 3 - assert isinstance(updates[0].contents[0], FunctionCallContent) - assert isinstance(updates[1].contents[0], FunctionCallContent) + assert updates[0].contents[0].type == "function_call" + assert updates[1].contents[0].type == "function_call" # Assistant update with both approval requests assert updates[2].role == Role.ASSISTANT assert len(updates[2].contents) == 2 - assert all(isinstance(c, FunctionApprovalRequestContent) for c in updates[2].contents) + assert all(c.type == "function_approval_request" for c in updates[2].contents) async def test_ai_function_with_kwargs_injection(): diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py index c5187fd960..3e5317fdae 100644 --- a/python/packages/core/tests/core/test_types.py +++ b/python/packages/core/tests/core/test_types.py @@ -12,41 +12,24 @@ from agent_framework import ( AgentResponse, AgentResponseUpdate, - BaseContent, + Annotation, ChatMessage, ChatOptions, ChatResponse, ChatResponseUpdate, - CitationAnnotation, - CodeInterpreterToolCallContent, - CodeInterpreterToolResultContent, - DataContent, - ErrorContent, + Content, FinishReason, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, - HostedFileContent, - HostedVectorStoreContent, - ImageGenerationToolCallContent, - ImageGenerationToolResultContent, - MCPServerToolCallContent, - MCPServerToolResultContent, Role, - TextContent, - TextReasoningContent, TextSpanRegion, ToolMode, ToolProtocol, - UriContent, - UsageContent, UsageDetails, ai_function, + detect_media_type_from_base64, merge_chat_options, prepare_function_call_results, ) -from agent_framework.exceptions import AdditionItemMismatch, ContentError +from agent_framework.exceptions import ContentError @fixture @@ -83,9 +66,11 @@ def simple_function(x: int, y: int) -> int: def test_text_content_positional(): - """Test the TextContent class to ensure it initializes correctly and inherits from BaseContent.""" + """Test the TextContent class to ensure it initializes correctly and inherits from Content.""" # Create an instance of TextContent - content = TextContent("Hello, world!", raw_representation="Hello, world!", additional_properties={"version": 1}) + content = Content.from_text( + "Hello, world!", raw_representation="Hello, world!", additional_properties={"version": 1} + ) # Check the type and content assert content.type == "text" @@ -93,15 +78,15 @@ def test_text_content_positional(): assert content.raw_representation == "Hello, world!" assert content.additional_properties["version"] == 1 # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) # Note: No longer using Pydantic validation, so type assignment should work content.type = "text" # This should work fine now def test_text_content_keyword(): - """Test the TextContent class to ensure it initializes correctly and inherits from BaseContent.""" + """Test the TextContent class to ensure it initializes correctly and inherits from Content.""" # Create an instance of TextContent - content = TextContent( + content = Content.from_text( text="Hello, world!", raw_representation="Hello, world!", additional_properties={"version": 1} ) @@ -111,7 +96,7 @@ def test_text_content_keyword(): assert content.raw_representation == "Hello, world!" assert content.additional_properties["version"] == 1 # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) # Note: No longer using Pydantic validation, so type assignment should work content.type = "text" # This should work fine now @@ -122,109 +107,112 @@ def test_text_content_keyword(): def test_data_content_bytes(): """Test the DataContent class to ensure it initializes correctly.""" # Create an instance of DataContent - content = DataContent(data=b"test", media_type="application/octet-stream", additional_properties={"version": 1}) + content = Content.from_data( + data=b"test", media_type="application/octet-stream", additional_properties={"version": 1} + ) # Check the type and content assert content.type == "data" assert content.uri == "data:application/octet-stream;base64,dGVzdA==" - assert content.has_top_level_media_type("application") is True - assert content.has_top_level_media_type("image") is False + assert content.media_type.startswith("application/") is True + assert content.media_type.startswith("image/") is False assert content.additional_properties["version"] == 1 # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) def test_data_content_uri(): - """Test the DataContent class to ensure it initializes correctly with a URI.""" - # Create an instance of DataContent with a URI - content = DataContent(uri="data:application/octet-stream;base64,dGVzdA==", additional_properties={"version": 1}) + """Test the Content.from_uri class to ensure it initializes correctly with a URI.""" + # Create an instance of Content.from_uri with a URI and explicit media_type + content = Content.from_uri( + uri="data:application/octet-stream;base64,dGVzdA==", + media_type="application/octet-stream", + additional_properties={"version": 1}, + ) # Check the type and content assert content.type == "data" assert content.uri == "data:application/octet-stream;base64,dGVzdA==" - # media_type is extracted from URI now + # media_type must be explicitly provided assert content.media_type == "application/octet-stream" - assert content.has_top_level_media_type("application") is True + assert content.media_type.startswith("application/") is True assert content.additional_properties["version"] == 1 # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) def test_data_content_invalid(): """Test the DataContent class to ensure it raises an error for invalid initialization.""" - # Attempt to create an instance of DataContent with invalid data - # not a proper uri - with raises(ValueError): - DataContent(uri="invalid_uri") - # unknown media type - with raises(ValueError): - DataContent(uri="data:application/random;base64,dGVzdA==") - # not valid base64 data would still be accepted by our basic validation - # but it's not a critical issue for now + with pytest.raises(ContentError): + Content.from_uri(uri="invalid_uri", media_type="text/plain") def test_data_content_empty(): """Test the DataContent class to ensure it raises an error for empty data.""" - # Attempt to create an instance of DataContent with empty data - with raises(ValueError): - DataContent(data=b"", media_type="application/octet-stream") - - # Attempt to create an instance of DataContent with empty URI - with raises(ValueError): - DataContent(uri="") + data = Content.from_data(data=b"", media_type="application/octet-stream") + assert data.uri == "data:application/octet-stream;base64," + assert data.media_type == "application/octet-stream" def test_data_content_detect_image_format_from_base64(): """Test the detect_image_format_from_base64 static method.""" # Test each supported format png_data = b"\x89PNG\r\n\x1a\n" + b"fake_data" - assert DataContent.detect_image_format_from_base64(base64.b64encode(png_data).decode()) == "png" + assert detect_media_type_from_base64(data_bytes=png_data) == "image/png" + assert detect_media_type_from_base64(data_str=base64.b64encode(png_data).decode()) == "image/png" jpeg_data = b"\xff\xd8\xff\xe0" + b"fake_data" - assert DataContent.detect_image_format_from_base64(base64.b64encode(jpeg_data).decode()) == "jpeg" + assert detect_media_type_from_base64(data_bytes=jpeg_data) == "image/jpeg" + assert detect_media_type_from_base64(data_str=base64.b64encode(jpeg_data).decode()) == "image/jpeg" webp_data = b"RIFF" + b"1234" + b"WEBP" + b"fake_data" - assert DataContent.detect_image_format_from_base64(base64.b64encode(webp_data).decode()) == "webp" - + assert detect_media_type_from_base64(data_str=base64.b64encode(webp_data).decode()) == "image/webp" gif_data = b"GIF89a" + b"fake_data" - assert DataContent.detect_image_format_from_base64(base64.b64encode(gif_data).decode()) == "gif" + assert detect_media_type_from_base64(data_str=base64.b64encode(gif_data).decode()) == "image/gif" # Test fallback behavior unknown_data = b"UNKNOWN_FORMAT" - assert DataContent.detect_image_format_from_base64(base64.b64encode(unknown_data).decode()) == "png" - + assert detect_media_type_from_base64(data_str=base64.b64encode(unknown_data).decode()) is None + assert ( + detect_media_type_from_base64( + data_uri=f"data:application/octet-stream;base64,{base64.b64encode(unknown_data).decode()}" + ) + is None + ) + assert detect_media_type_from_base64(data_bytes=unknown_data) is None # Test error handling - assert DataContent.detect_image_format_from_base64("invalid_base64!") == "png" - assert DataContent.detect_image_format_from_base64("") == "png" + with pytest.raises(ValueError, match="Invalid base64 data provided."): + detect_media_type_from_base64(data_str="invalid_base64!") + detect_media_type_from_base64(data_str="") + + with pytest.raises(ValueError, match="Provide exactly one of data_bytes, data_str, or data_uri."): + detect_media_type_from_base64() + detect_media_type_from_base64( + data_bytes=b"data", data_str="data", data_uri="data:application/octet-stream;base64,AAA" + ) + detect_media_type_from_base64(data_bytes=b"data", data_str="data") + detect_media_type_from_base64(data_bytes=b"data", data_uri="data:application/octet-stream;base64,AAA") + detect_media_type_from_base64(data_str="data", data_uri="data:application/octet-stream;base64,AAA") def test_data_content_create_data_uri_from_base64(): """Test the create_data_uri_from_base64 class method.""" # Test with PNG data png_data = b"\x89PNG\r\n\x1a\n" + b"fake_data" - png_base64 = base64.b64encode(png_data).decode() - uri, media_type = DataContent.create_data_uri_from_base64(png_base64) + content = Content.from_data(png_data, media_type=detect_media_type_from_base64(data_bytes=png_data)) - assert uri == f"data:image/png;base64,{png_base64}" - assert media_type == "image/png" + assert content.uri == f"data:image/png;base64,{base64.b64encode(png_data).decode()}" + assert content.media_type == "image/png" # Test with different format jpeg_data = b"\xff\xd8\xff\xe0" + b"fake_data" jpeg_base64 = base64.b64encode(jpeg_data).decode() - uri, media_type = DataContent.create_data_uri_from_base64(jpeg_base64) - - assert uri == f"data:image/jpeg;base64,{jpeg_base64}" - assert media_type == "image/jpeg" + content = Content.from_data(jpeg_data, media_type=detect_media_type_from_base64(data_bytes=jpeg_data)) - # Test fallback for unknown format - unknown_data = b"UNKNOWN_FORMAT" - unknown_base64 = base64.b64encode(unknown_data).decode() - uri, media_type = DataContent.create_data_uri_from_base64(unknown_base64) - - assert uri == f"data:image/png;base64,{unknown_base64}" - assert media_type == "image/png" + assert content.uri == f"data:image/jpeg;base64,{jpeg_base64}" + assert content.media_type == "image/jpeg" # region UriContent @@ -232,18 +220,16 @@ def test_data_content_create_data_uri_from_base64(): def test_uri_content(): """Test the UriContent class to ensure it initializes correctly.""" - content = UriContent(uri="http://example.com", media_type="image/jpg", additional_properties={"version": 1}) + content = Content.from_uri(uri="http://example.com", media_type="image/jpg", additional_properties={"version": 1}) # Check the type and content assert content.type == "uri" assert content.uri == "http://example.com" assert content.media_type == "image/jpg" - assert content.has_top_level_media_type("image") is True - assert content.has_top_level_media_type("application") is False + assert content.media_type.startswith("image/") is True + assert content.media_type.startswith("application/") is False assert content.additional_properties["version"] == 1 - - # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) # region: HostedFileContent @@ -251,101 +237,98 @@ def test_uri_content(): def test_hosted_file_content(): """Test the HostedFileContent class to ensure it initializes correctly.""" - content = HostedFileContent(file_id="file-123", additional_properties={"version": 1}) + content = Content.from_hosted_file(file_id="file-123", additional_properties={"version": 1}) # Check the type and content assert content.type == "hosted_file" assert content.file_id == "file-123" assert content.additional_properties["version"] == 1 - - # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) def test_hosted_file_content_minimal(): """Test the HostedFileContent class with minimal parameters.""" - content = HostedFileContent(file_id="file-456") + content = Content.from_hosted_file(file_id="file-456") # Check the type and content assert content.type == "hosted_file" assert content.file_id == "file-456" assert content.additional_properties == {} assert content.raw_representation is None - - # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) def test_hosted_file_content_optional_fields(): """HostedFileContent should capture optional media type and name.""" - content = HostedFileContent(file_id="file-789", media_type="image/png", name="plot.png") + content = Content.from_hosted_file(file_id="file-789", media_type="image/png", name="plot.png") assert content.media_type == "image/png" assert content.name == "plot.png" - assert content.has_top_level_media_type("image") - assert content.has_top_level_media_type("application") is False + assert content.media_type.startswith("image/") + assert content.media_type.startswith("application/") is False # region: CodeInterpreter content def test_code_interpreter_tool_call_content_parses_inputs(): - call = CodeInterpreterToolCallContent( + call = Content.from_code_interpreter_tool_call( call_id="call-1", - inputs=[{"type": "text", "text": "print('hi')"}], + inputs=[Content.from_text(text="print('hi')")], ) assert call.type == "code_interpreter_tool_call" assert call.call_id == "call-1" - assert call.inputs and isinstance(call.inputs[0], TextContent) + assert call.inputs and call.inputs[0].type == "text" assert call.inputs[0].text == "print('hi')" def test_code_interpreter_tool_result_content_outputs(): - result = CodeInterpreterToolResultContent( + result = Content.from_code_interpreter_tool_result( call_id="call-2", outputs=[ - {"type": "text", "text": "log output"}, - {"type": "uri", "uri": "https://example.com/file.png", "media_type": "image/png"}, + Content.from_text(text="log output"), + Content.from_uri(uri="https://example.com/file.png", media_type="image/png"), ], ) assert result.type == "code_interpreter_tool_result" assert result.call_id == "call-2" assert result.outputs is not None - assert isinstance(result.outputs[0], TextContent) - assert isinstance(result.outputs[1], UriContent) + assert result.outputs[0].type == "text" + assert result.outputs[1].type == "uri" # region: Image generation content def test_image_generation_tool_contents(): - call = ImageGenerationToolCallContent(image_id="img-1") - outputs = [DataContent(data=b"1234", media_type="image/png")] - result = ImageGenerationToolResultContent(image_id="img-1", outputs=outputs) + call = Content.from_image_generation_tool_call(image_id="img-1") + outputs = [Content.from_data(data=b"1234", media_type="image/png")] + result = Content.from_image_generation_tool_result(image_id="img-1", outputs=outputs) assert call.type == "image_generation_tool_call" assert call.image_id == "img-1" assert result.type == "image_generation_tool_result" assert result.image_id == "img-1" - assert result.outputs and isinstance(result.outputs[0], DataContent) + assert result.outputs and result.outputs[0].type == "data" # region: MCP server tool content def test_mcp_server_tool_call_and_result(): - call = MCPServerToolCallContent(call_id="c-1", tool_name="tool", server_name="server", arguments={"x": 1}) + call = Content.from_mcp_server_tool_call(call_id="c-1", tool_name="tool", server_name="server", arguments={"x": 1}) assert call.type == "mcp_server_tool_call" assert call.arguments == {"x": 1} - result = MCPServerToolResultContent(call_id="c-1", output=[{"type": "text", "text": "done"}]) + result = Content.from_mcp_server_tool_result(call_id="c-1", output=[{"type": "text", "text": "done"}]) assert result.type == "mcp_server_tool_result" assert result.output - with raises(ValueError): - MCPServerToolCallContent(call_id="", tool_name="tool") + # Empty call_id is allowed, validation happens elsewhere + call2 = Content.from_mcp_server_tool_call(call_id="", tool_name="tool", server_name="server") + assert call2.call_id == "" # region: HostedVectorStoreContent @@ -353,7 +336,7 @@ def test_mcp_server_tool_call_and_result(): def test_hosted_vector_store_content(): """Test the HostedVectorStoreContent class to ensure it initializes correctly.""" - content = HostedVectorStoreContent(vector_store_id="vs-789", additional_properties={"version": 1}) + content = Content.from_hosted_vector_store(vector_store_id="vs-789", additional_properties={"version": 1}) # Check the type and content assert content.type == "hosted_vector_store" @@ -361,13 +344,14 @@ def test_hosted_vector_store_content(): assert content.additional_properties["version"] == 1 # Ensure the instance is of type BaseContent - assert isinstance(content, HostedVectorStoreContent) - assert isinstance(content, BaseContent) + assert isinstance(content, Content) + assert content.type == "hosted_vector_store" + assert isinstance(content, Content) def test_hosted_vector_store_content_minimal(): """Test the HostedVectorStoreContent class with minimal parameters.""" - content = HostedVectorStoreContent(vector_store_id="vs-101112") + content = Content.from_hosted_vector_store(vector_store_id="vs-101112") # Check the type and content assert content.type == "hosted_vector_store" @@ -375,17 +359,13 @@ def test_hosted_vector_store_content_minimal(): assert content.additional_properties == {} assert content.raw_representation is None - # Ensure the instance is of type BaseContent - assert isinstance(content, HostedVectorStoreContent) - assert isinstance(content, BaseContent) - # region FunctionCallContent def test_function_call_content(): """Test the FunctionCallContent class to ensure it initializes correctly.""" - content = FunctionCallContent(call_id="1", name="example_function", arguments={"param1": "value1"}) + content = Content.from_function_call(call_id="1", name="example_function", arguments={"param1": "value1"}) # Check the type and content assert content.type == "function_call" @@ -393,42 +373,42 @@ def test_function_call_content(): assert content.arguments == {"param1": "value1"} # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) def test_function_call_content_parse_arguments(): - c1 = FunctionCallContent(call_id="1", name="f", arguments='{"a": 1, "b": 2}') + c1 = Content.from_function_call(call_id="1", name="f", arguments='{"a": 1, "b": 2}') assert c1.parse_arguments() == {"a": 1, "b": 2} - c2 = FunctionCallContent(call_id="1", name="f", arguments="not json") + c2 = Content.from_function_call(call_id="1", name="f", arguments="not json") assert c2.parse_arguments() == {"raw": "not json"} - c3 = FunctionCallContent(call_id="1", name="f", arguments={"x": None}) + c3 = Content.from_function_call(call_id="1", name="f", arguments={"x": None}) assert c3.parse_arguments() == {"x": None} def test_function_call_content_add_merging_and_errors(): # str + str concatenation - a = FunctionCallContent(call_id="1", name="f", arguments="abc") - b = FunctionCallContent(call_id="1", name="f", arguments="def") + a = Content.from_function_call(call_id="1", name="f", arguments="abc") + b = Content.from_function_call(call_id="1", name="f", arguments="def") c = a + b assert isinstance(c.arguments, str) and c.arguments == "abcdef" # dict + dict merge - a = FunctionCallContent(call_id="1", name="f", arguments={"x": 1}) - b = FunctionCallContent(call_id="1", name="f", arguments={"y": 2}) + a = Content.from_function_call(call_id="1", name="f", arguments={"x": 1}) + b = Content.from_function_call(call_id="1", name="f", arguments={"y": 2}) c = a + b assert c.arguments == {"x": 1, "y": 2} # incompatible argument types - a = FunctionCallContent(call_id="1", name="f", arguments="abc") - b = FunctionCallContent(call_id="1", name="f", arguments={"y": 2}) + a = Content.from_function_call(call_id="1", name="f", arguments="abc") + b = Content.from_function_call(call_id="1", name="f", arguments={"y": 2}) with raises(TypeError): _ = a + b # incompatible call ids - a = FunctionCallContent(call_id="1", name="f", arguments="abc") - b = FunctionCallContent(call_id="2", name="f", arguments="def") + a = Content.from_function_call(call_id="1", name="f", arguments="abc") + b = Content.from_function_call(call_id="2", name="f", arguments="def") - with raises(AdditionItemMismatch): + with raises(ContentError): _ = a + b @@ -437,14 +417,14 @@ def test_function_call_content_add_merging_and_errors(): def test_function_result_content(): """Test the FunctionResultContent class to ensure it initializes correctly.""" - content = FunctionResultContent(call_id="1", result={"param1": "value1"}) + content = Content.from_function_result(call_id="1", result={"param1": "value1"}) # Check the type and content assert content.type == "function_result" assert content.result == {"param1": "value1"} # Ensure the instance is of type BaseContent - assert isinstance(content, BaseContent) + assert isinstance(content, Content) # region UsageDetails @@ -452,13 +432,15 @@ def test_function_result_content(): def test_usage_details(): usage = UsageDetails(input_token_count=5, output_token_count=10, total_token_count=15) - assert usage.input_token_count == 5 - assert usage.output_token_count == 10 - assert usage.total_token_count == 15 - assert usage.additional_counts == {} + assert usage["input_token_count"] == 5 + assert usage["output_token_count"] == 10 + assert usage["total_token_count"] == 15 + assert usage.get("additional_counts", {}) == {} def test_usage_details_addition(): + from agent_framework._types import add_usage_details + usage1 = UsageDetails( input_token_count=5, output_token_count=10, @@ -474,39 +456,38 @@ def test_usage_details_addition(): test3=30, ) - combined_usage = usage1 + usage2 - assert combined_usage.input_token_count == 8 - assert combined_usage.output_token_count == 16 - assert combined_usage.total_token_count == 24 - assert combined_usage.additional_counts["test1"] == 20 - assert combined_usage.additional_counts["test2"] == 20 - assert combined_usage.additional_counts["test3"] == 30 + combined_usage = add_usage_details(usage1, usage2) + assert combined_usage["input_token_count"] == 8 + assert combined_usage["output_token_count"] == 16 + assert combined_usage["total_token_count"] == 24 + assert combined_usage["test1"] == 20 + assert combined_usage["test2"] == 20 + assert combined_usage["test3"] == 30 def test_usage_details_fail(): - with raises(ValueError): - UsageDetails(input_token_count=5, output_token_count=10, total_token_count=15, wrong_type="42.923") + # TypedDict doesn't validate types at runtime, so this test no longer applies + # Creating UsageDetails with wrong types won't raise ValueError + usage = UsageDetails(input_token_count=5, output_token_count=10, total_token_count=15, wrong_type="42.923") # type: ignore[typeddict-item] + assert usage["wrong_type"] == "42.923" # type: ignore[typeddict-item] def test_usage_details_additional_counts(): usage = UsageDetails(input_token_count=5, output_token_count=10, total_token_count=15, **{"test": 1}) - assert usage.additional_counts["test"] == 1 + assert usage.get("test") == 1 def test_usage_details_add_with_none_and_type_errors(): + from agent_framework._types import add_usage_details + u = UsageDetails(input_token_count=1) - # __add__ with None returns self (no change) - v = u + None - assert v is u - # __iadd__ with None leaves unchanged - u2 = UsageDetails(input_token_count=2) - u2 += None - assert u2.input_token_count == 2 - # wrong type raises - with raises(ValueError): - _ = u + 42 # type: ignore[arg-type] - with raises(ValueError): - u += 42 # type: ignore[arg-type] + # add_usage_details with None returns the non-None value + v = add_usage_details(u, None) + assert v == u + # add_usage_details with None on left + v2 = add_usage_details(None, u) + assert v2 == u + # TypedDict doesn't support + operator, use add_usage_details # region UserInputRequest and Response @@ -514,28 +495,29 @@ def test_usage_details_add_with_none_and_type_errors(): def test_function_approval_request_and_response_creation(): """Test creating a FunctionApprovalRequestContent and producing a response.""" - fc = FunctionCallContent(call_id="call-1", name="do_something", arguments={"a": 1}) - req = FunctionApprovalRequestContent(id="req-1", function_call=fc) + fc = Content.from_function_call(call_id="call-1", name="do_something", arguments={"a": 1}) + req = Content.from_function_approval_request(id="req-1", function_call=fc) assert req.type == "function_approval_request" assert req.function_call == fc assert req.id == "req-1" - assert isinstance(req, BaseContent) + assert isinstance(req, Content) - resp = req.create_response(True) + resp = req.to_function_approval_response(True) - assert isinstance(resp, FunctionApprovalResponseContent) + assert isinstance(resp, Content) + assert resp.type == "function_approval_response" assert resp.approved is True assert resp.function_call == fc assert resp.id == "req-1" def test_function_approval_serialization_roundtrip(): - fc = FunctionCallContent(call_id="c2", name="f", arguments='{"x":1}') - req = FunctionApprovalRequestContent(id="id-2", function_call=fc, additional_properties={"meta": 1}) + fc = Content.from_function_call(call_id="c2", name="f", arguments='{"x":1}') + req = Content.from_function_approval_request(id="id-2", function_call=fc, additional_properties={"meta": 1}) dumped = req.to_dict() - loaded = FunctionApprovalRequestContent.from_dict(dumped) + loaded = Content.from_dict(dumped) # Test that the basic properties match assert loaded.id == req.id @@ -545,15 +527,17 @@ def test_function_approval_serialization_roundtrip(): assert loaded.function_call.arguments == req.function_call.arguments # Skip the BaseModel validation test since we're no longer using Pydantic - # The Contents union will need to be handled differently when we fully migrate + # The Content union will need to be handled differently when we fully migrate def test_function_approval_accepts_mcp_call(): """Ensure FunctionApprovalRequestContent supports MCP server tool calls.""" - mcp_call = MCPServerToolCallContent(call_id="c-mcp", tool_name="tool", server_name="srv", arguments={"x": 1}) - req = FunctionApprovalRequestContent(id="req-mcp", function_call=mcp_call) + mcp_call = Content.from_mcp_server_tool_call( + call_id="c-mcp", tool_name="tool", server_name="srv", arguments={"x": 1} + ) + req = Content.from_function_approval_request(id="req-mcp", function_call=mcp_call) - assert isinstance(req.function_call, MCPServerToolCallContent) + assert isinstance(req.function_call, Content) assert req.function_call.call_id == "c-mcp" @@ -561,47 +545,21 @@ def test_function_approval_accepts_mcp_call(): @mark.parametrize( - "content_type, args", + "args", [ - (TextContent, {"text": "Hello, world!"}), - (DataContent, {"data": b"Hello, world!", "media_type": "text/plain"}), - (UriContent, {"uri": "http://example.com", "media_type": "text/html"}), - (FunctionCallContent, {"call_id": "1", "name": "example_function", "arguments": {}}), - (FunctionResultContent, {"call_id": "1", "result": {}}), - (HostedFileContent, {"file_id": "file-123"}), - (HostedVectorStoreContent, {"vector_store_id": "vs-789"}), + {"type": "text", "text": "Hello, world!"}, + {"type": "uri", "uri": "http://example.com", "media_type": "text/html"}, + {"type": "function_call", "call_id": "1", "name": "example_function", "arguments": {}}, + {"type": "function_result", "call_id": "1", "result": {}}, + {"type": "file", "file_id": "file-123"}, + {"type": "vector_store", "vector_store_id": "vs-789"}, ], ) -def test_ai_content_serialization(content_type: type[BaseContent], args: dict): - content = content_type(**args) +def test_ai_content_serialization(args: dict): + content = Content(**args) serialized = content.to_dict() - deserialized = content_type.from_dict(serialized) - # Note: Since we're no longer using Pydantic, we can't do direct equality comparison - # Instead, let's check that the deserialized object has the same attributes - - # Special handling for DataContent which doesn't expose the original 'data' parameter - if content_type == DataContent and "data" in args: - # For DataContent created with data, check uri and media_type instead - assert hasattr(deserialized, "uri") - assert hasattr(deserialized, "media_type") - assert deserialized.media_type == args["media_type"] # type: ignore - # Skip checking the 'data' attribute since it's converted to uri - for key, value in args.items(): - if key != "data": # Skip the 'data' key for DataContent - assert getattr(deserialized, key) == value - else: - # Normal attribute checking for other content types - for key, value in args.items(): - if value: - assert getattr(deserialized, key) == value - - # For now, skip the TestModel validation since it still uses Pydantic - # This would need to be updated when we migrate more classes - # class TestModel(BaseModel): - # content: Contents - # - # test_item = TestModel.model_validate({"content": serialized}) - # assert isinstance(test_item.content, content_type) + deserialized = Content.from_dict(serialized) + assert content == deserialized # region ChatMessage @@ -615,26 +573,26 @@ def test_chat_message_text(): # Check the type and content assert message.role == Role.USER assert len(message.contents) == 1 - assert isinstance(message.contents[0], TextContent) + assert message.contents[0].type == "text" assert message.contents[0].text == "Hello, how are you?" assert message.text == "Hello, how are you?" # Ensure the instance is of type BaseContent - assert isinstance(message.contents[0], BaseContent) + assert isinstance(message.contents[0], Content) def test_chat_message_contents(): """Test the ChatMessage class to ensure it initializes correctly with contents.""" # Create a ChatMessage with a role and multiple contents - content1 = TextContent("Hello, how are you?") - content2 = TextContent("I'm fine, thank you!") + content1 = Content.from_text("Hello, how are you?") + content2 = Content.from_text("I'm fine, thank you!") message = ChatMessage(role="user", contents=[content1, content2]) # Check the type and content assert message.role == Role.USER assert len(message.contents) == 2 - assert isinstance(message.contents[0], TextContent) - assert isinstance(message.contents[1], TextContent) + assert message.contents[0].type == "text" + assert message.contents[1].type == "text" assert message.contents[0].text == "Hello, how are you?" assert message.contents[1].text == "I'm fine, thank you!" assert message.text == "Hello, how are you? I'm fine, thank you!" @@ -705,28 +663,146 @@ def test_chat_response_with_format_init(): assert response.value.response == "Hello" +def test_chat_response_value_raises_on_invalid_schema(): + """Test that value property raises ValidationError with field constraint details.""" + from typing import Literal + + from pydantic import Field, ValidationError + + class StrictSchema(BaseModel): + id: Literal[5] + name: str = Field(min_length=10) + score: int = Field(gt=0, le=100) + + message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}') + response = ChatResponse(messages=message, response_format=StrictSchema) + + with raises(ValidationError) as exc_info: + _ = response.value + + errors = exc_info.value.errors() + error_fields = {e["loc"][0] for e in errors} + assert "id" in error_fields, "Expected 'id' Literal constraint error" + assert "name" in error_fields, "Expected 'name' min_length constraint error" + assert "score" in error_fields, "Expected 'score' gt constraint error" + + +def test_chat_response_try_parse_value_returns_none_on_invalid(): + """Test that try_parse_value returns None on validation failure with Field constraints.""" + from typing import Literal + + from pydantic import Field + + class StrictSchema(BaseModel): + id: Literal[5] + name: str = Field(min_length=10) + score: int = Field(gt=0, le=100) + + message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}') + response = ChatResponse(messages=message) + + result = response.try_parse_value(StrictSchema) + assert result is None + + +def test_chat_response_try_parse_value_returns_value_on_success(): + """Test that try_parse_value returns parsed value when all constraints pass.""" + from pydantic import Field + + class MySchema(BaseModel): + name: str = Field(min_length=3) + score: int = Field(ge=0, le=100) + + message = ChatMessage(role="assistant", text='{"name": "test", "score": 85}') + response = ChatResponse(messages=message) + + result = response.try_parse_value(MySchema) + assert result is not None + assert result.name == "test" + assert result.score == 85 + + +def test_agent_response_value_raises_on_invalid_schema(): + """Test that AgentResponse.value property raises ValidationError with field constraint details.""" + from typing import Literal + + from pydantic import Field, ValidationError + + class StrictSchema(BaseModel): + id: Literal[5] + name: str = Field(min_length=10) + score: int = Field(gt=0, le=100) + + message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}') + response = AgentResponse(messages=message, response_format=StrictSchema) + + with raises(ValidationError) as exc_info: + _ = response.value + + errors = exc_info.value.errors() + error_fields = {e["loc"][0] for e in errors} + assert "id" in error_fields, "Expected 'id' Literal constraint error" + assert "name" in error_fields, "Expected 'name' min_length constraint error" + assert "score" in error_fields, "Expected 'score' gt constraint error" + + +def test_agent_response_try_parse_value_returns_none_on_invalid(): + """Test that AgentResponse.try_parse_value returns None on Field constraint failure.""" + from typing import Literal + + from pydantic import Field + + class StrictSchema(BaseModel): + id: Literal[5] + name: str = Field(min_length=10) + score: int = Field(gt=0, le=100) + + message = ChatMessage(role="assistant", text='{"id": 1, "name": "test", "score": -5}') + response = AgentResponse(messages=message) + + result = response.try_parse_value(StrictSchema) + assert result is None + + +def test_agent_response_try_parse_value_returns_value_on_success(): + """Test that AgentResponse.try_parse_value returns parsed value when all constraints pass.""" + from pydantic import Field + + class MySchema(BaseModel): + name: str = Field(min_length=3) + score: int = Field(ge=0, le=100) + + message = ChatMessage(role="assistant", text='{"name": "test", "score": 85}') + response = AgentResponse(messages=message) + + result = response.try_parse_value(MySchema) + assert result is not None + assert result.name == "test" + assert result.score == 85 + + # region ChatResponseUpdate def test_chat_response_update(): """Test the ChatResponseUpdate class to ensure it initializes correctly with a message.""" # Create a ChatMessage - message = TextContent(text="I'm doing well, thank you!") + message = Content.from_text(text="I'm doing well, thank you!") # Create a ChatResponseUpdate with the message response_update = ChatResponseUpdate(contents=[message]) # Check the type and content assert response_update.contents[0].text == "I'm doing well, thank you!" - assert isinstance(response_update.contents[0], TextContent) + assert response_update.contents[0].type == "text" assert response_update.text == "I'm doing well, thank you!" def test_chat_response_updates_to_chat_response_one(): """Test converting ChatResponseUpdate to ChatResponse.""" # Create a ChatMessage - message1 = TextContent("I'm doing well, ") - message2 = TextContent("thank you!") + message1 = Content.from_text("I'm doing well, ") + message2 = Content.from_text("thank you!") # Create a ChatResponseUpdate with the message response_updates = [ @@ -748,8 +824,8 @@ def test_chat_response_updates_to_chat_response_one(): def test_chat_response_updates_to_chat_response_two(): """Test converting ChatResponseUpdate to ChatResponse.""" # Create a ChatMessage - message1 = TextContent("I'm doing well, ") - message2 = TextContent("thank you!") + message1 = Content.from_text("I'm doing well, ") + message2 = Content.from_text("thank you!") # Create a ChatResponseUpdate with the message response_updates = [ @@ -772,13 +848,13 @@ def test_chat_response_updates_to_chat_response_two(): def test_chat_response_updates_to_chat_response_multiple(): """Test converting ChatResponseUpdate to ChatResponse.""" # Create a ChatMessage - message1 = TextContent("I'm doing well, ") - message2 = TextContent("thank you!") + message1 = Content.from_text("I'm doing well, ") + message2 = Content.from_text("thank you!") # Create a ChatResponseUpdate with the message response_updates = [ ChatResponseUpdate(text=message1, message_id="1"), - ChatResponseUpdate(contents=[TextReasoningContent(text="Additional context")], message_id="1"), + ChatResponseUpdate(contents=[Content.from_text_reasoning(text="Additional context")], message_id="1"), ChatResponseUpdate(text=message2, message_id="1"), ] @@ -796,15 +872,15 @@ def test_chat_response_updates_to_chat_response_multiple(): def test_chat_response_updates_to_chat_response_multiple_multiple(): """Test converting ChatResponseUpdate to ChatResponse.""" # Create a ChatMessage - message1 = TextContent("I'm doing well, ", raw_representation="I'm doing well, ") - message2 = TextContent("thank you!") + message1 = Content.from_text("I'm doing well, ", raw_representation="I'm doing well, ") + message2 = Content.from_text("thank you!") # Create a ChatResponseUpdate with the message response_updates = [ ChatResponseUpdate(text=message1, message_id="1"), ChatResponseUpdate(text=message2, message_id="1"), - ChatResponseUpdate(contents=[TextReasoningContent(text="Additional context")], message_id="1"), - ChatResponseUpdate(contents=[TextContent(text="More context")], message_id="1"), + ChatResponseUpdate(contents=[Content.from_text_reasoning(text="Additional context")], message_id="1"), + ChatResponseUpdate(contents=[Content.from_text(text="More context")], message_id="1"), ChatResponseUpdate(text="Final part", message_id="1"), ] @@ -818,11 +894,11 @@ def test_chat_response_updates_to_chat_response_multiple_multiple(): assert chat_response.messages[0].contents[0].raw_representation is not None assert len(chat_response.messages[0].contents) == 3 - assert isinstance(chat_response.messages[0].contents[0], TextContent) + assert chat_response.messages[0].contents[0].type == "text" assert chat_response.messages[0].contents[0].text == "I'm doing well, thank you!" - assert isinstance(chat_response.messages[0].contents[1], TextReasoningContent) + assert chat_response.messages[0].contents[1].type == "text_reasoning" assert chat_response.messages[0].contents[1].text == "Additional context" - assert isinstance(chat_response.messages[0].contents[2], TextContent) + assert chat_response.messages[0].contents[2].type == "text" assert chat_response.messages[0].contents[2].text == "More contextFinal part" assert chat_response.text == "I'm doing well, thank you! More contextFinal part" @@ -1021,8 +1097,8 @@ def chat_message() -> ChatMessage: @fixture -def text_content() -> TextContent: - return TextContent(text="Test content") +def text_content() -> Content: + return Content.from_text(text="Test content") @fixture @@ -1031,7 +1107,7 @@ def agent_response(chat_message: ChatMessage) -> AgentResponse: @fixture -def agent_response_update(text_content: TextContent) -> AgentResponseUpdate: +def agent_response_update(text_content: Content) -> AgentResponseUpdate: return AgentResponseUpdate(role=Role.ASSISTANT, contents=[text_content]) @@ -1079,7 +1155,7 @@ def test_agent_run_response_str_method(chat_message: ChatMessage) -> None: # region AgentResponseUpdate -def test_agent_run_response_update_init_content_list(text_content: TextContent) -> None: +def test_agent_run_response_update_init_content_list(text_content: Content) -> None: update = AgentResponseUpdate(contents=[text_content, text_content]) assert len(update.contents) == 2 assert update.contents[0] == text_content @@ -1090,7 +1166,7 @@ def test_agent_run_response_update_init_none_content() -> None: assert update.contents == [] -def test_agent_run_response_update_text_property(text_content: TextContent) -> None: +def test_agent_run_response_update_text_property(text_content: Content) -> None: update = AgentResponseUpdate(contents=[text_content, text_content]) assert update.text == "Test contentTest content" @@ -1100,7 +1176,7 @@ def test_agent_run_response_update_text_property_empty() -> None: assert update.text == "" -def test_agent_run_response_update_str_method(text_content: TextContent) -> None: +def test_agent_run_response_update_str_method(text_content: Content) -> None: update = AgentResponseUpdate(contents=[text_content]) assert str(update) == "Test content" @@ -1110,7 +1186,7 @@ def test_agent_run_response_update_created_at() -> None: # Test with a properly formatted UTC timestamp utc_timestamp = "2024-12-01T00:31:30.000000Z" update = AgentResponseUpdate( - contents=[TextContent(text="test")], + contents=[Content.from_text(text="test")], role=Role.ASSISTANT, created_at=utc_timestamp, ) @@ -1121,7 +1197,7 @@ def test_agent_run_response_update_created_at() -> None: now_utc = datetime.now(tz=timezone.utc) formatted_utc = now_utc.strftime("%Y-%m-%dT%H:%M:%S.%fZ") update_with_now = AgentResponseUpdate( - contents=[TextContent(text="test")], + contents=[Content.from_text(text="test")], role=Role.ASSISTANT, created_at=formatted_utc, ) @@ -1155,71 +1231,77 @@ def test_agent_run_response_created_at() -> None: def test_error_content_str(): - e1 = ErrorContent(message="Oops", error_code="E1") + e1 = Content.from_error(message="Oops", error_code="E1") assert str(e1) == "Error E1: Oops" - e2 = ErrorContent(message="Oops") + e2 = Content.from_error(message="Oops") assert str(e2) == "Oops" - e3 = ErrorContent() + e3 = Content.from_error() assert str(e3) == "Unknown error" -# region Annotations +# region Annotation def test_annotations_models_and_roundtrip(): - span = TextSpanRegion(start_index=0, end_index=5) - cit = CitationAnnotation(title="Doc", url="http://example.com", snippet="Snippet", annotated_regions=[span]) + span = TextSpanRegion(type="text_span", start_index=0, end_index=5) + cit = Annotation( + type="citation", title="Doc", url="http://example.com", snippet="Snippet", annotated_regions=[span] + ) # Attach to content - content = TextContent(text="hello", additional_properties={"v": 1}) + content = Content.from_text(text="hello", additional_properties={"v": 1}) content.annotations = [cit] dumped = content.to_dict() - loaded = TextContent.from_dict(dumped) + loaded = Content.from_dict(dumped) assert isinstance(loaded.annotations, list) assert len(loaded.annotations) == 1 - # After migration from Pydantic, annotations should be properly reconstructed as objects - assert isinstance(loaded.annotations[0], CitationAnnotation) + # After migration from Pydantic, annotations are now TypedDicts (dicts at runtime) + assert isinstance(loaded.annotations[0], dict) # Check the annotation properties loaded_cit = loaded.annotations[0] - assert loaded_cit.type == "citation" - assert loaded_cit.title == "Doc" - assert loaded_cit.url == "http://example.com" - assert loaded_cit.snippet == "Snippet" + assert loaded_cit["type"] == "citation" + assert loaded_cit["title"] == "Doc" + assert loaded_cit["url"] == "http://example.com" + assert loaded_cit["snippet"] == "Snippet" # Check the annotated_regions - assert isinstance(loaded_cit.annotated_regions, list) - assert len(loaded_cit.annotated_regions) == 1 - assert isinstance(loaded_cit.annotated_regions[0], TextSpanRegion) - assert loaded_cit.annotated_regions[0].type == "text_span" - assert loaded_cit.annotated_regions[0].start_index == 0 - assert loaded_cit.annotated_regions[0].end_index == 5 + assert isinstance(loaded_cit["annotated_regions"], list) + assert len(loaded_cit["annotated_regions"]) == 1 + assert isinstance(loaded_cit["annotated_regions"][0], dict) + assert loaded_cit["annotated_regions"][0]["type"] == "text_span" + assert loaded_cit["annotated_regions"][0]["start_index"] == 0 + assert loaded_cit["annotated_regions"][0]["end_index"] == 5 def test_function_call_merge_in_process_update_and_usage_aggregation(): # Two function call chunks with same call_id should merge - u1 = ChatResponseUpdate(contents=[FunctionCallContent(call_id="c1", name="f", arguments="{")], message_id="m") - u2 = ChatResponseUpdate(contents=[FunctionCallContent(call_id="c1", name="f", arguments="}")], message_id="m") + u1 = ChatResponseUpdate( + contents=[Content.from_function_call(call_id="c1", name="f", arguments="{")], message_id="m" + ) + u2 = ChatResponseUpdate( + contents=[Content.from_function_call(call_id="c1", name="f", arguments="}")], message_id="m" + ) # plus usage - u3 = ChatResponseUpdate(contents=[UsageContent(UsageDetails(input_token_count=1, output_token_count=2))]) + u3 = ChatResponseUpdate(contents=[Content.from_usage(UsageDetails(input_token_count=1, output_token_count=2))]) resp = ChatResponse.from_chat_response_updates([u1, u2, u3]) assert len(resp.messages) == 1 last_contents = resp.messages[0].contents - assert any(isinstance(c, FunctionCallContent) for c in last_contents) - fcs = [c for c in last_contents if isinstance(c, FunctionCallContent)] + assert any(c.type == "function_call" for c in last_contents) + fcs = [c for c in last_contents if c.type == "function_call"] assert len(fcs) == 1 assert fcs[0].arguments == "{}" assert resp.usage_details is not None - assert resp.usage_details.input_token_count == 1 - assert resp.usage_details.output_token_count == 2 + assert resp.usage_details["input_token_count"] == 1 + assert resp.usage_details["output_token_count"] == 2 def test_function_call_incompatible_ids_are_not_merged(): - u1 = ChatResponseUpdate(contents=[FunctionCallContent(call_id="a", name="f", arguments="x")], message_id="m") - u2 = ChatResponseUpdate(contents=[FunctionCallContent(call_id="b", name="f", arguments="y")], message_id="m") + u1 = ChatResponseUpdate(contents=[Content.from_function_call(call_id="a", name="f", arguments="x")], message_id="m") + u2 = ChatResponseUpdate(contents=[Content.from_function_call(call_id="b", name="f", arguments="y")], message_id="m") resp = ChatResponse.from_chat_response_updates([u1, u2]) - fcs = [c for c in resp.messages[0].contents if isinstance(c, FunctionCallContent)] + fcs = [c for c in resp.messages[0].contents if c.type == "function_call"] assert len(fcs) == 2 @@ -1261,13 +1343,13 @@ def test_response_update_propagates_fields_and_metadata(): def test_text_coalescing_preserves_first_properties(): - t1 = TextContent("A", raw_representation={"r": 1}, additional_properties={"p": 1}) - t2 = TextContent("B") + t1 = Content.from_text("A", raw_representation={"r": 1}, additional_properties={"p": 1}) + t2 = Content.from_text("B") upd1 = ChatResponseUpdate(text=t1, message_id="x") upd2 = ChatResponseUpdate(text=t2, message_id="x") resp = ChatResponse.from_chat_response_updates([upd1, upd2]) # After coalescing there should be a single TextContent with merged text and preserved props from first - items = [c for c in resp.messages[0].contents if isinstance(c, TextContent)] + items = [c for c in resp.messages[0].contents if c.type == "text"] assert len(items) >= 1 assert items[0].text == "AB" assert items[0].raw_representation == {"r": 1} @@ -1275,9 +1357,9 @@ def test_text_coalescing_preserves_first_properties(): def test_function_call_content_parse_numeric_or_list(): - c_num = FunctionCallContent(call_id="1", name="f", arguments="123") + c_num = Content.from_function_call(call_id="1", name="f", arguments="123") assert c_num.parse_arguments() == {"raw": 123} - c_list = FunctionCallContent(call_id="1", name="f", arguments="[1,2]") + c_list = Content.from_function_call(call_id="1", name="f", arguments="[1,2]") assert c_list.parse_arguments() == {"raw": [1, 2]} @@ -1295,8 +1377,8 @@ def agent_run_response_async() -> AgentResponse: async def test_agent_run_response_from_async_generator(): async def gen(): - yield AgentResponseUpdate(contents=[TextContent("A")]) - yield AgentResponseUpdate(contents=[TextContent("B")]) + yield AgentResponseUpdate(contents=[Content.from_text("A")]) + yield AgentResponseUpdate(contents=[Content.from_text("B")]) r = await AgentResponse.from_agent_response_generator(gen()) assert r.text == "AB" @@ -1309,67 +1391,65 @@ def test_text_content_add_comprehensive_coverage(): """Test TextContent __add__ method with various combinations to improve coverage.""" # Test with None raw_representation - t1 = TextContent("Hello", raw_representation=None, annotations=None) - t2 = TextContent(" World", raw_representation=None, annotations=None) + t1 = Content.from_text("Hello", raw_representation=None, annotations=None) + t2 = Content.from_text(" World", raw_representation=None, annotations=None) result = t1 + t2 assert result.text == "Hello World" assert result.raw_representation is None assert result.annotations is None # Test first has raw_representation, second has None - t1 = TextContent("Hello", raw_representation="raw1", annotations=None) - t2 = TextContent(" World", raw_representation=None, annotations=None) + t1 = Content.from_text("Hello", raw_representation="raw1", annotations=None) + t2 = Content.from_text(" World", raw_representation=None, annotations=None) result = t1 + t2 assert result.text == "Hello World" assert result.raw_representation == "raw1" # Test first has None, second has raw_representation - t1 = TextContent("Hello", raw_representation=None, annotations=None) - t2 = TextContent(" World", raw_representation="raw2", annotations=None) + t1 = Content.from_text("Hello", raw_representation=None, annotations=None) + t2 = Content.from_text(" World", raw_representation="raw2", annotations=None) result = t1 + t2 assert result.text == "Hello World" assert result.raw_representation == "raw2" # Test both have raw_representation (non-list) - t1 = TextContent("Hello", raw_representation="raw1", annotations=None) - t2 = TextContent(" World", raw_representation="raw2", annotations=None) + t1 = Content.from_text("Hello", raw_representation="raw1", annotations=None) + t2 = Content.from_text(" World", raw_representation="raw2", annotations=None) result = t1 + t2 assert result.text == "Hello World" assert result.raw_representation == ["raw1", "raw2"] # Test first has list raw_representation, second has single - t1 = TextContent("Hello", raw_representation=["raw1", "raw2"], annotations=None) - t2 = TextContent(" World", raw_representation="raw3", annotations=None) + t1 = Content.from_text("Hello", raw_representation=["raw1", "raw2"], annotations=None) + t2 = Content.from_text(" World", raw_representation="raw3", annotations=None) result = t1 + t2 assert result.text == "Hello World" assert result.raw_representation == ["raw1", "raw2", "raw3"] # Test both have list raw_representation - t1 = TextContent("Hello", raw_representation=["raw1", "raw2"], annotations=None) - t2 = TextContent(" World", raw_representation=["raw3", "raw4"], annotations=None) + t1 = Content.from_text("Hello", raw_representation=["raw1", "raw2"], annotations=None) + t2 = Content.from_text(" World", raw_representation=["raw3", "raw4"], annotations=None) result = t1 + t2 assert result.text == "Hello World" assert result.raw_representation == ["raw1", "raw2", "raw3", "raw4"] # Test first has single raw_representation, second has list - t1 = TextContent("Hello", raw_representation="raw1", annotations=None) - t2 = TextContent(" World", raw_representation=["raw2", "raw3"], annotations=None) + t1 = Content.from_text("Hello", raw_representation="raw1", annotations=None) + t2 = Content.from_text(" World", raw_representation=["raw2", "raw3"], annotations=None) result = t1 + t2 assert result.text == "Hello World" assert result.raw_representation == ["raw1", "raw2", "raw3"] def test_text_content_iadd_coverage(): - """Test TextContent __iadd__ method for better coverage.""" + """Test TextContent += operator for better coverage.""" - t1 = TextContent("Hello", raw_representation="raw1", additional_properties={"key1": "val1"}) - t2 = TextContent(" World", raw_representation="raw2", additional_properties={"key2": "val2"}) + t1 = Content.from_text("Hello", raw_representation="raw1", additional_properties={"key1": "val1"}) + t2 = Content.from_text(" World", raw_representation="raw2", additional_properties={"key2": "val2"}) - original_id = id(t1) t1 += t2 - # Should modify in place - assert id(t1) == original_id + # Content doesn't implement __iadd__, so += creates a new object via __add__ assert t1.text == "Hello World" assert t1.raw_representation == ["raw1", "raw2"] assert t1.additional_properties == {"key1": "val1", "key2": "val2"} @@ -1378,23 +1458,22 @@ def test_text_content_iadd_coverage(): def test_text_reasoning_content_add_coverage(): """Test TextReasoningContent __add__ method for better coverage.""" - t1 = TextReasoningContent("Thinking 1") - t2 = TextReasoningContent(" Thinking 2") + t1 = Content.from_text_reasoning(text="Thinking 1") + t2 = Content.from_text_reasoning(text=" Thinking 2") result = t1 + t2 assert result.text == "Thinking 1 Thinking 2" def test_text_reasoning_content_iadd_coverage(): - """Test TextReasoningContent __iadd__ method for better coverage.""" + """Test TextReasoningContent += operator for better coverage.""" - t1 = TextReasoningContent("Thinking 1") - t2 = TextReasoningContent(" Thinking 2") + t1 = Content.from_text_reasoning(text="Thinking 1") + t2 = Content.from_text_reasoning(text=" Thinking 2") - original_id = id(t1) t1 += t2 - assert id(t1) == original_id + # Content doesn't implement __iadd__, so += creates a new object via __add__ assert t1.text == "Thinking 1 Thinking 2" @@ -1402,49 +1481,46 @@ def test_comprehensive_to_dict_exclude_options(): """Test to_dict methods with various exclude options for better coverage.""" # Test TextContent with exclude_none - text_content = TextContent("Hello", raw_representation=None, additional_properties={"prop": "val"}) + text_content = Content.from_text("Hello", raw_representation=None, additional_properties={"prop": "val"}) text_dict = text_content.to_dict(exclude_none=True) assert "raw_representation" not in text_dict - assert text_dict["prop"] == "val" + assert text_dict["additional_properties"]["prop"] == "val" # Test with custom exclude set text_dict_exclude = text_content.to_dict(exclude={"additional_properties"}) assert "additional_properties" not in text_dict_exclude assert "text" in text_dict_exclude - # Test UsageDetails with additional counts + # Test UsageDetails - it's a TypedDict now, not a class with to_dict usage = UsageDetails(input_token_count=5, custom_count=10) - usage_dict = usage.to_dict() - assert usage_dict["input_token_count"] == 5 - assert usage_dict["custom_count"] == 10 + assert usage["input_token_count"] == 5 + assert usage["custom_count"] == 10 - # Test UsageDetails exclude_none - usage_none = UsageDetails(input_token_count=5, output_token_count=None) - usage_dict_no_none = usage_none.to_dict(exclude_none=True) - assert "output_token_count" not in usage_dict_no_none - assert usage_dict_no_none["input_token_count"] == 5 + # Test UsageDetails exclude_none behavior isn't applicable to TypedDict + # TypedDict doesn't have a to_dict method def test_usage_details_iadd_edge_cases(): - """Test UsageDetails __iadd__ with edge cases for better coverage.""" + """Test UsageDetails addition with edge cases for better coverage.""" + from agent_framework._types import add_usage_details # Test with None values u1 = UsageDetails(input_token_count=None, output_token_count=5, custom1=10) u2 = UsageDetails(input_token_count=3, output_token_count=None, custom2=20) - u1 += u2 - assert u1.input_token_count == 3 - assert u1.output_token_count == 5 - assert u1.additional_counts["custom1"] == 10 - assert u1.additional_counts["custom2"] == 20 + result = add_usage_details(u1, u2) + assert result["input_token_count"] == 3 + assert result["output_token_count"] == 5 + assert result.get("custom1") == 10 + assert result.get("custom2") == 20 # Test merging additional counts u3 = UsageDetails(input_token_count=1, shared_count=5) u4 = UsageDetails(input_token_count=2, shared_count=15) - u3 += u4 - assert u3.input_token_count == 3 - assert u3.additional_counts["shared_count"] == 20 + result2 = add_usage_details(u3, u4) + assert result2["input_token_count"] == 3 + assert result2.get("shared_count") == 20 def test_chat_message_from_dict_with_mixed_content(): @@ -1461,9 +1537,9 @@ def test_chat_message_from_dict_with_mixed_content(): message = ChatMessage.from_dict(message_data) assert len(message.contents) == 3 # Unknown type is ignored - assert isinstance(message.contents[0], TextContent) - assert isinstance(message.contents[1], FunctionCallContent) - assert isinstance(message.contents[2], FunctionResultContent) + assert message.contents[0].type == "text" + assert message.contents[1].type == "function_call" + assert message.contents[2].type == "function_result" # Test round-trip message_dict = message.to_dict() @@ -1472,7 +1548,7 @@ def test_chat_message_from_dict_with_mixed_content(): def test_text_content_add_type_error(): """Test TextContent __add__ raises TypeError for incompatible types.""" - t1 = TextContent("Hello") + t1 = Content.from_text("Hello") with raises(TypeError, match="Incompatible type"): t1 + "not a TextContent" @@ -1483,12 +1559,13 @@ def test_comprehensive_serialization_methods(): # Test TextContent with all fields text_data = { + "type": "text", "text": "Hello world", "raw_representation": {"key": "value"}, - "prop": "val", + "additional_properties": {"prop": "val"}, "annotations": None, } - text_content = TextContent.from_dict(text_data) + text_content = Content.from_dict(text_data) assert text_content.text == "Hello world" assert text_content.raw_representation == {"key": "value"} assert text_content.additional_properties == {"prop": "val"} @@ -1496,7 +1573,7 @@ def test_comprehensive_serialization_methods(): # Test round-trip text_dict = text_content.to_dict() assert text_dict["text"] == "Hello world" - assert text_dict["prop"] == "val" + assert text_dict["additional_properties"] == {"prop": "val"} # Note: raw_representation is always excluded from to_dict() output # Test with exclude_none @@ -1504,8 +1581,13 @@ def test_comprehensive_serialization_methods(): assert "annotations" not in text_dict_no_none # Test FunctionResultContent - result_data = {"call_id": "call123", "result": "success", "additional_properties": {"meta": "data"}} - result_content = FunctionResultContent.from_dict(result_data) + result_data = { + "type": "function_result", + "call_id": "call123", + "result": "success", + "additional_properties": {"meta": "data"}, + } + result_content = Content.from_dict(result_data) assert result_content.call_id == "call123" assert result_content.result == "success" @@ -1515,9 +1597,9 @@ def test_chat_message_complex_content_serialization(): # Create a message with multiple content types contents = [ - TextContent("Hello"), - FunctionCallContent(call_id="call1", name="func", arguments={"arg": "val"}), - FunctionResultContent(call_id="call1", result="success"), + Content.from_text("Hello"), + Content.from_function_call(call_id="call1", name="func", arguments={"arg": "val"}), + Content.from_function_result(call_id="call1", result="success"), ] message = ChatMessage(role=Role.ASSISTANT, contents=contents) @@ -1532,9 +1614,9 @@ def test_chat_message_complex_content_serialization(): # Test from_dict round-trip reconstructed = ChatMessage.from_dict(message_dict) assert len(reconstructed.contents) == 3 - assert isinstance(reconstructed.contents[0], TextContent) - assert isinstance(reconstructed.contents[1], FunctionCallContent) - assert isinstance(reconstructed.contents[2], FunctionResultContent) + assert reconstructed.contents[0].type == "text" + assert reconstructed.contents[1].type == "function_call" + assert reconstructed.contents[2].type == "function_result" def test_usage_content_serialization_with_details(): @@ -1543,7 +1625,7 @@ def test_usage_content_serialization_with_details(): # Test from_dict with details as dict usage_data = { "type": "usage", - "details": { + "usage_details": { "type": "usage_details", "input_token_count": 10, "output_token_count": 20, @@ -1551,15 +1633,15 @@ def test_usage_content_serialization_with_details(): "custom_count": 5, }, } - usage_content = UsageContent.from_dict(usage_data) - assert isinstance(usage_content.details, UsageDetails) - assert usage_content.details.input_token_count == 10 - assert usage_content.details.additional_counts["custom_count"] == 5 + usage_content = Content(**usage_data) + assert isinstance(usage_content.usage_details, dict) + assert usage_content.usage_details["input_token_count"] == 10 + assert usage_content.usage_details["custom_count"] == 5 # Custom fields go directly in UsageDetails # Test to_dict with UsageDetails object usage_dict = usage_content.to_dict() - assert isinstance(usage_dict["details"], dict) - assert usage_dict["details"]["input_token_count"] == 10 + assert isinstance(usage_dict["usage_details"], dict) + assert usage_dict["usage_details"]["input_token_count"] == 10 def test_function_approval_response_content_serialization(): @@ -1577,8 +1659,8 @@ def test_function_approval_response_content_serialization(): "arguments": {"param": "value"}, }, } - response_content = FunctionApprovalResponseContent.from_dict(response_data) - assert isinstance(response_content.function_call, FunctionCallContent) + response_content = Content.from_dict(response_data) + assert response_content.function_call.type == "function_call" assert response_content.function_call.call_id == "call123" # Test to_dict with FunctionCallContent object @@ -1610,7 +1692,7 @@ def test_chat_response_complex_serialization(): assert len(response.messages) == 2 assert isinstance(response.messages[0], ChatMessage) assert isinstance(response.finish_reason, FinishReason) - assert isinstance(response.usage_details, UsageDetails) + assert isinstance(response.usage_details, dict) assert response.model_id == "gpt-4" # Should be stored as model_id # Test to_dict with complex objects @@ -1630,10 +1712,10 @@ def test_chat_response_update_all_content_types(): {"type": "text", "text": "Hello"}, {"type": "data", "data": b"base64data", "media_type": "text/plain"}, {"type": "uri", "uri": "http://example.com", "media_type": "text/html"}, - {"type": "error", "error": "An error occurred"}, + {"type": "error", "message": "An error occurred"}, {"type": "function_call", "call_id": "call1", "name": "func", "arguments": {}}, {"type": "function_result", "call_id": "call1", "result": "success"}, - {"type": "usage", "details": {"type": "usage_details", "input_token_count": 1}}, + {"type": "usage", "usage_details": {"input_token_count": 1}}, {"type": "hosted_file", "file_id": "file123"}, {"type": "hosted_vector_store", "vector_store_id": "vs123"}, { @@ -1653,18 +1735,18 @@ def test_chat_response_update_all_content_types(): update = ChatResponseUpdate.from_dict(update_data) assert len(update.contents) == 12 # unknown_type is skipped with warning - assert isinstance(update.contents[0], TextContent) - assert isinstance(update.contents[1], DataContent) - assert isinstance(update.contents[2], UriContent) - assert isinstance(update.contents[3], ErrorContent) - assert isinstance(update.contents[4], FunctionCallContent) - assert isinstance(update.contents[5], FunctionResultContent) - assert isinstance(update.contents[6], UsageContent) - assert isinstance(update.contents[7], HostedFileContent) - assert isinstance(update.contents[8], HostedVectorStoreContent) - assert isinstance(update.contents[9], FunctionApprovalRequestContent) - assert isinstance(update.contents[10], FunctionApprovalResponseContent) - assert isinstance(update.contents[11], TextReasoningContent) + assert update.contents[0].type == "text" + assert update.contents[1].type == "data" + assert update.contents[2].type == "uri" + assert update.contents[3].type == "error" + assert update.contents[4].type == "function_call" + assert update.contents[5].type == "function_result" + assert update.contents[6].type == "usage" + assert update.contents[7].type == "hosted_file" + assert update.contents[8].type == "hosted_vector_store" + assert update.contents[9].type == "function_approval_request" + assert update.contents[10].type == "function_approval_response" + assert update.contents[11].type == "text_reasoning" def test_agent_run_response_complex_serialization(): @@ -1686,7 +1768,7 @@ def test_agent_run_response_complex_serialization(): response = AgentResponse.from_dict(response_data) assert len(response.messages) == 2 assert isinstance(response.messages[0], ChatMessage) - assert isinstance(response.usage_details, UsageDetails) + assert isinstance(response.usage_details, dict) # Test to_dict response_dict = response.to_dict() @@ -1703,10 +1785,10 @@ def test_agent_run_response_update_all_content_types(): {"type": "text", "text": "Hello"}, {"type": "data", "data": b"base64data", "media_type": "text/plain"}, {"type": "uri", "uri": "http://example.com", "media_type": "text/html"}, - {"type": "error", "error": "An error occurred"}, + {"type": "error", "message": "An error occurred"}, {"type": "function_call", "call_id": "call1", "name": "func", "arguments": {}}, {"type": "function_result", "call_id": "call1", "result": "success"}, - {"type": "usage", "details": {"type": "usage_details", "input_token_count": 1}}, + {"type": "usage", "usage_details": {"input_token_count": 1}}, {"type": "hosted_file", "file_id": "file123"}, {"type": "hosted_vector_store", "vector_store_id": "vs123"}, { @@ -1750,7 +1832,7 @@ def test_agent_run_response_update_all_content_types(): "content_class,init_kwargs", [ pytest.param( - TextContent, + Content, { "type": "text", "text": "Hello world", @@ -1759,7 +1841,7 @@ def test_agent_run_response_update_all_content_types(): id="text_content", ), pytest.param( - TextReasoningContent, + Content, { "type": "text_reasoning", "text": "Reasoning text", @@ -1768,7 +1850,7 @@ def test_agent_run_response_update_all_content_types(): id="text_reasoning_content", ), pytest.param( - DataContent, + Content, { "type": "data", "uri": "data:text/plain;base64,dGVzdCBkYXRh", @@ -1776,7 +1858,7 @@ def test_agent_run_response_update_all_content_types(): id="data_content_with_uri", ), pytest.param( - DataContent, + Content, { "type": "data", "data": b"test data", @@ -1785,7 +1867,7 @@ def test_agent_run_response_update_all_content_types(): id="data_content_with_bytes", ), pytest.param( - UriContent, + Content, { "type": "uri", "uri": "http://example.com", @@ -1794,12 +1876,12 @@ def test_agent_run_response_update_all_content_types(): id="uri_content", ), pytest.param( - HostedFileContent, + Content, {"type": "hosted_file", "file_id": "file-123"}, id="hosted_file_content", ), pytest.param( - HostedVectorStoreContent, + Content, { "type": "hosted_vector_store", "vector_store_id": "vs-789", @@ -1807,7 +1889,7 @@ def test_agent_run_response_update_all_content_types(): id="hosted_vector_store_content", ), pytest.param( - FunctionCallContent, + Content, { "type": "function_call", "call_id": "call-1", @@ -1817,7 +1899,7 @@ def test_agent_run_response_update_all_content_types(): id="function_call_content", ), pytest.param( - FunctionResultContent, + Content, { "type": "function_result", "call_id": "call-1", @@ -1826,7 +1908,7 @@ def test_agent_run_response_update_all_content_types(): id="function_result_content", ), pytest.param( - ErrorContent, + Content, { "type": "error", "message": "Error occurred", @@ -1835,10 +1917,10 @@ def test_agent_run_response_update_all_content_types(): id="error_content", ), pytest.param( - UsageContent, + Content, { "type": "usage", - "details": { + "usage_details": { "type": "usage_details", "input_token_count": 10, "output_token_count": 20, @@ -1848,7 +1930,7 @@ def test_agent_run_response_update_all_content_types(): id="usage_content", ), pytest.param( - FunctionApprovalRequestContent, + Content, { "type": "function_approval_request", "id": "req-1", @@ -1857,7 +1939,7 @@ def test_agent_run_response_update_all_content_types(): id="function_approval_request", ), pytest.param( - FunctionApprovalResponseContent, + Content, { "type": "function_approval_response", "id": "resp-1", @@ -1960,10 +2042,10 @@ def test_agent_run_response_update_all_content_types(): ), ], ) -def test_content_roundtrip_serialization(content_class: type[BaseContent], init_kwargs: dict[str, Any]): +def test_content_roundtrip_serialization(content_class: type[Content], init_kwargs: dict[str, Any]): """Test to_dict/from_dict roundtrip for all content types.""" - # Create instance - content = content_class(**init_kwargs) + # Create instance using from_dict to handle nested dict-to-object conversions + content = content_class.from_dict(init_kwargs) # Serialize to dict content_dict = content.to_dict() @@ -1991,7 +2073,7 @@ def test_content_roundtrip_serialization(content_class: type[BaseContent], init_ continue # Special handling for DataContent created with 'data' parameter - if content_class == DataContent and key == "data": + if hasattr(content, "type") and content.type == "data" and key == "data": # DataContent converts 'data' to 'uri', so we skip checking 'data' attribute # Instead we verify that uri and media_type are set correctly assert hasattr(reconstructed, "uri") @@ -2013,108 +2095,48 @@ def test_content_roundtrip_serialization(content_class: type[BaseContent], init_ if isinstance(value[0], dict) and hasattr(reconstructed_value[0], "to_dict"): # Compare each item by serializing the reconstructed object assert len(reconstructed_value) == len(value) - + for orig_dict, recon_obj in zip(value, reconstructed_value): + recon_dict = recon_obj.to_dict() + # Compare all keys from original dict (reconstructed may have extra default fields) + for k, v in orig_dict.items(): + assert k in recon_dict, f"Key '{k}' missing from reconstructed dict" + # For nested lists, recursively compare + if isinstance(v, list) and v and isinstance(v[0], dict): + assert len(recon_dict[k]) == len(v) + for orig_item, recon_item in zip(v, recon_dict[k]): + # Compare essential keys, ignoring fields like additional_properties + for item_key, item_val in orig_item.items(): + assert item_key in recon_item + assert recon_item[item_key] == item_val + else: + assert recon_dict[k] == v, f"Value mismatch for key '{k}'" else: assert reconstructed_value == value # Special handling for dicts that get converted to objects (like UsageDetails, FunctionCallContent) elif isinstance(value, dict) and hasattr(reconstructed_value, "to_dict"): - # Compare the dict with the serialized form of the object, excluding 'type' key + # Compare the dict with the serialized form of the object reconstructed_dict = reconstructed_value.to_dict() - if value: - assert len(reconstructed_dict) == len(value) + # Verify all keys from the original dict are in the reconstructed dict + for k, v in value.items(): + assert k in reconstructed_dict, f"Key '{k}' missing from reconstructed dict" + assert reconstructed_dict[k] == v, f"Value mismatch for key '{k}'" else: assert reconstructed_value == value def test_text_content_with_annotations_serialization(): - """Test TextContent with CitationAnnotation and TextSpanRegion roundtrip serialization.""" - # Create TextSpanRegion - region = TextSpanRegion(start_index=0, end_index=5) - - # Create CitationAnnotation with region - citation = CitationAnnotation( - title="Test Citation", - url="http://example.com/citation", - file_id="file-123", - tool_name="test_tool", - snippet="This is a test snippet", - annotated_regions=[region], - additional_properties={"custom": "value"}, - ) - - # Create TextContent with annotation - content = TextContent( - text="Hello world", annotations=[citation], additional_properties={"content_key": "content_val"} - ) - - # Serialize to dict - content_dict = content.to_dict() - - # Verify structure - assert content_dict["type"] == "text" - assert content_dict["text"] == "Hello world" - assert content_dict["content_key"] == "content_val" - assert len(content_dict["annotations"]) == 1 - - # Verify annotation structure - annotation_dict = content_dict["annotations"][0] - assert annotation_dict["type"] == "citation" - assert annotation_dict["title"] == "Test Citation" - assert annotation_dict["url"] == "http://example.com/citation" - assert annotation_dict["file_id"] == "file-123" - assert annotation_dict["tool_name"] == "test_tool" - assert annotation_dict["snippet"] == "This is a test snippet" - assert annotation_dict["custom"] == "value" - - # Verify region structure - assert len(annotation_dict["annotated_regions"]) == 1 - region_dict = annotation_dict["annotated_regions"][0] - assert region_dict["type"] == "text_span" - assert region_dict["start_index"] == 0 - assert region_dict["end_index"] == 5 - - # Deserialize from dict - reconstructed = TextContent.from_dict(content_dict) - - # Verify reconstructed content - assert isinstance(reconstructed, TextContent) - assert reconstructed.text == "Hello world" - assert reconstructed.type == "text" - assert reconstructed.additional_properties == {"content_key": "content_val"} - - # Verify reconstructed annotation - assert len(reconstructed.annotations) == 1 # type: ignore[arg-type] - recon_annotation = reconstructed.annotations[0] # type: ignore[index] - assert isinstance(recon_annotation, CitationAnnotation) - assert recon_annotation.title == "Test Citation" - assert recon_annotation.url == "http://example.com/citation" - assert recon_annotation.file_id == "file-123" - assert recon_annotation.tool_name == "test_tool" - assert recon_annotation.snippet == "This is a test snippet" - assert recon_annotation.additional_properties == {"custom": "value"} - - # Verify reconstructed region - assert len(recon_annotation.annotated_regions) == 1 # type: ignore[arg-type] - recon_region = recon_annotation.annotated_regions[0] # type: ignore[index] - assert isinstance(recon_region, TextSpanRegion) - assert recon_region.start_index == 0 - assert recon_region.end_index == 5 - assert recon_region.type == "text_span" - - -def test_text_content_with_multiple_annotations_serialization(): """Test TextContent with multiple annotations roundtrip serialization.""" # Create multiple regions - region1 = TextSpanRegion(start_index=0, end_index=5) - region2 = TextSpanRegion(start_index=6, end_index=11) + region1 = TextSpanRegion(type="text_span", start_index=0, end_index=5) + region2 = TextSpanRegion(type="text_span", start_index=6, end_index=11) # Create multiple citations - citation1 = CitationAnnotation(title="Citation 1", url="http://example.com/1", annotated_regions=[region1]) + citation1 = Annotation(type="citation", title="Citation 1", url="http://example.com/1", annotated_regions=[region1]) - citation2 = CitationAnnotation(title="Citation 2", url="http://example.com/2", annotated_regions=[region2]) + citation2 = Annotation(type="citation", title="Citation 2", url="http://example.com/2", annotated_regions=[region2]) # Create TextContent with multiple annotations - content = TextContent(text="Hello world", annotations=[citation1, citation2]) + content = Content.from_text(text="Hello world", annotations=[citation1, citation2]) # Serialize content_dict = content.to_dict() @@ -2125,14 +2147,15 @@ def test_text_content_with_multiple_annotations_serialization(): assert content_dict["annotations"][1]["title"] == "Citation 2" # Deserialize - reconstructed = TextContent.from_dict(content_dict) + reconstructed = Content.from_dict(content_dict) # Verify reconstruction assert len(reconstructed.annotations) == 2 - assert all(isinstance(ann, CitationAnnotation) for ann in reconstructed.annotations) - assert reconstructed.annotations[0].title == "Citation 1" - assert reconstructed.annotations[1].title == "Citation 2" - assert all(isinstance(ann.annotated_regions[0], TextSpanRegion) for ann in reconstructed.annotations) + # Annotation are TypedDicts (dicts at runtime) + assert all(isinstance(ann, dict) for ann in reconstructed.annotations) + assert reconstructed.annotations[0]["title"] == "Citation 1" + assert reconstructed.annotations[1]["title"] == "Citation 2" + assert all(isinstance(ann["annotated_regions"][0], dict) for ann in reconstructed.annotations) # region prepare_function_call_results with Pydantic models diff --git a/python/packages/core/tests/openai/test_openai_assistants_client.py b/python/packages/core/tests/openai/test_openai_assistants_client.py index 424c1cc044..a35c554e1d 100644 --- a/python/packages/core/tests/openai/test_openai_assistants_client.py +++ b/python/packages/core/tests/openai/test_openai_assistants_client.py @@ -19,15 +19,10 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - FunctionCallContent, - FunctionResultContent, + Content, HostedCodeInterpreterTool, HostedFileSearchTool, - HostedVectorStoreContent, Role, - TextContent, - UriContent, - UsageContent, ai_function, ) from agent_framework.exceptions import ServiceInitializationError @@ -68,7 +63,7 @@ def create_test_openai_assistants_client( return client -async def create_vector_store(client: OpenAIAssistantsClient) -> tuple[str, HostedVectorStoreContent]: +async def create_vector_store(client: OpenAIAssistantsClient) -> tuple[str, Content]: """Create a vector store with sample documents for testing.""" file = await client.client.files.create( file=("todays_weather.txt", b"The weather today is sunny with a high of 25C."), purpose="user_data" @@ -81,7 +76,7 @@ async def create_vector_store(client: OpenAIAssistantsClient) -> tuple[str, Host if result.last_error is not None: raise Exception(f"Vector store file processing failed with status: {result.last_error.message}") - return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id) + return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id) async def delete_vector_store(client: OpenAIAssistantsClient, file_id: str, vector_store_id: str) -> None: @@ -464,7 +459,7 @@ async def test_process_stream_events_requires_action(mock_async_openai: MagicMoc chat_client = create_test_openai_assistants_client(mock_async_openai) # Mock the _parse_function_calls_from_assistants method to return test content - test_function_content = FunctionCallContent(call_id="call-123", name="test_func", arguments={"arg": "value"}) + test_function_content = Content.from_function_call(call_id="call-123", name="test_func", arguments={"arg": "value"}) chat_client._parse_function_calls_from_assistants = MagicMock(return_value=[test_function_content]) # type: ignore # Create a mock Run object @@ -578,10 +573,10 @@ async def async_iterator() -> Any: # Check the usage content usage_content = update.contents[0] - assert isinstance(usage_content, UsageContent) - assert usage_content.details.input_token_count == 100 - assert usage_content.details.output_token_count == 50 - assert usage_content.details.total_token_count == 150 + assert usage_content.type == "usage" + assert usage_content.usage_details["input_token_count"] == 100 + assert usage_content.usage_details["output_token_count"] == 50 + assert usage_content.usage_details["total_token_count"] == 150 assert update.raw_representation == mock_run @@ -609,11 +604,86 @@ def test_parse_function_calls_from_assistants_basic(mock_async_openai: MagicMock # Test that one function call content was created assert len(contents) == 1 - assert isinstance(contents[0], FunctionCallContent) + assert contents[0].type == "function_call" assert contents[0].name == "get_weather" assert contents[0].arguments == {"location": "Seattle"} +def test_parse_run_step_with_code_interpreter_tool_call(mock_async_openai: MagicMock) -> None: + """Test _parse_run_step_tool_call with code_interpreter type creates CodeInterpreterToolCallContent.""" + client = create_test_openai_assistants_client( + mock_async_openai, + model_id="test-model", + assistant_id="test-assistant", + ) + + # Mock a run with required_action containing code_interpreter tool call + mock_run = MagicMock() + mock_run.id = "run_123" + mock_run.status = "requires_action" + + mock_tool_call = MagicMock() + mock_tool_call.id = "call_code_123" + mock_tool_call.type = "code_interpreter" + mock_code_interpreter = MagicMock() + mock_code_interpreter.input = "print('Hello, World!')" + mock_tool_call.code_interpreter = mock_code_interpreter + + mock_required_action = MagicMock() + mock_required_action.submit_tool_outputs = MagicMock() + mock_required_action.submit_tool_outputs.tool_calls = [mock_tool_call] + mock_run.required_action = mock_required_action + + # Parse the run step + contents = client._parse_function_calls_from_assistants(mock_run, "response_123") + + # Should have CodeInterpreterToolCallContent + assert len(contents) == 1 + assert contents[0].type == "code_interpreter_tool_call" + assert contents[0].call_id == '["response_123", "call_code_123"]' + assert contents[0].inputs is not None + assert len(contents[0].inputs) == 1 + assert contents[0].inputs[0].type == "text" + assert contents[0].inputs[0].text == "print('Hello, World!')" + + +def test_parse_run_step_with_mcp_tool_call(mock_async_openai: MagicMock) -> None: + """Test _parse_run_step_tool_call with mcp type creates MCPServerToolCallContent.""" + client = create_test_openai_assistants_client( + mock_async_openai, + model_id="test-model", + assistant_id="test-assistant", + ) + + # Mock a run with required_action containing mcp tool call + mock_run = MagicMock() + mock_run.id = "run_456" + mock_run.status = "requires_action" + + mock_tool_call = MagicMock() + mock_tool_call.id = "call_mcp_456" + mock_tool_call.type = "mcp" + mock_tool_call.name = "fetch_data" + mock_tool_call.server_label = "DataServer" + mock_tool_call.args = {"key": "value"} + + mock_required_action = MagicMock() + mock_required_action.submit_tool_outputs = MagicMock() + mock_required_action.submit_tool_outputs.tool_calls = [mock_tool_call] + mock_run.required_action = mock_required_action + + # Parse the run step + contents = client._parse_function_calls_from_assistants(mock_run, "response_456") + + # Should have MCPServerToolCallContent + assert len(contents) == 1 + assert contents[0].type == "mcp_server_tool_call" + assert contents[0].call_id == '["response_456", "call_mcp_456"]' + assert contents[0].tool_name == "fetch_data" + assert contents[0].server_name == "DataServer" + assert contents[0].arguments == {"key": "value"} + + def test_prepare_options_basic(mock_async_openai: MagicMock) -> None: """Test _prepare_options with basic chat options.""" chat_client = create_test_openai_assistants_client(mock_async_openai) @@ -830,7 +900,7 @@ def test_prepare_options_with_image_content(mock_async_openai: MagicMock) -> Non chat_client = create_test_openai_assistants_client(mock_async_openai) # Create message with image content - image_content = UriContent(uri="https://example.com/image.jpg", media_type="image/jpeg") + image_content = Content.from_uri(uri="https://example.com/image.jpg", media_type="image/jpeg") messages = [ChatMessage(role=Role.USER, contents=[image_content])] # Call the method @@ -861,7 +931,7 @@ def test_prepare_tool_outputs_for_assistants_valid(mock_async_openai: MagicMock) chat_client = create_test_openai_assistants_client(mock_async_openai) call_id = json.dumps(["run-123", "call-456"]) - function_result = FunctionResultContent(call_id=call_id, result="Function executed successfully") + function_result = Content.from_function_result(call_id=call_id, result="Function executed successfully") run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result]) # type: ignore @@ -881,8 +951,8 @@ def test_prepare_tool_outputs_for_assistants_mismatched_run_ids( # Create function results with different run IDs call_id1 = json.dumps(["run-123", "call-456"]) call_id2 = json.dumps(["run-789", "call-xyz"]) # Different run ID - function_result1 = FunctionResultContent(call_id=call_id1, result="Result 1") - function_result2 = FunctionResultContent(call_id=call_id2, result="Result 2") + function_result1 = Content.from_function_result(call_id=call_id1, result="Result 1") + function_result2 = Content.from_function_result(call_id=call_id2, result="Result 2") run_id, tool_outputs = chat_client._prepare_tool_outputs_for_assistants([function_result1, function_result2]) # type: ignore @@ -1006,7 +1076,7 @@ async def test_streaming() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert any(word in full_message.lower() for word in ["sunny", "25", "weather", "seattle"]) @@ -1035,7 +1105,7 @@ async def test_streaming_tools() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text assert any(word in full_message.lower() for word in ["sunny", "25", "weather"]) @@ -1121,7 +1191,7 @@ async def test_file_search_streaming() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text await delete_vector_store(openai_assistants_client, file_id, vector_store.vector_store_id) diff --git a/python/packages/core/tests/openai/test_openai_chat_client.py b/python/packages/core/tests/openai/test_openai_chat_client.py index 1f1d624345..a4ffbde03d 100644 --- a/python/packages/core/tests/openai/test_openai_chat_client.py +++ b/python/packages/core/tests/openai/test_openai_chat_client.py @@ -7,6 +7,8 @@ import pytest from openai import BadRequestError +from openai.types.chat.chat_completion import ChatCompletion, Choice +from openai.types.chat.chat_completion_message import ChatCompletionMessage from pydantic import BaseModel from pytest import param @@ -14,8 +16,7 @@ ChatClientProtocol, ChatMessage, ChatResponse, - DataContent, - FunctionResultContent, + Content, HostedWebSearchTool, ToolProtocol, ai_function, @@ -282,7 +283,9 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s client = OpenAIChatClient() # Test with empty list (falsy but not None) - message_with_empty_list = ChatMessage(role="tool", contents=[FunctionResultContent(call_id="call-123", result=[])]) + message_with_empty_list = ChatMessage( + role="tool", contents=[Content.from_function_result(call_id="call-123", result=[])] + ) openai_messages = client._prepare_message_for_openai(message_with_empty_list) assert len(openai_messages) == 1 @@ -290,7 +293,7 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s # Test with empty string (falsy but not None) message_with_empty_string = ChatMessage( - role="tool", contents=[FunctionResultContent(call_id="call-456", result="")] + role="tool", contents=[Content.from_function_result(call_id="call-456", result="")] ) openai_messages = client._prepare_message_for_openai(message_with_empty_string) @@ -298,7 +301,9 @@ def test_function_result_falsy_values_handling(openai_unit_test_env: dict[str, s assert openai_messages[0]["content"] == "" # Empty string should be preserved # Test with False (falsy but not None) - message_with_false = ChatMessage(role="tool", contents=[FunctionResultContent(call_id="call-789", result=False)]) + message_with_false = ChatMessage( + role="tool", contents=[Content.from_function_result(call_id="call-789", result=False)] + ) openai_messages = client._prepare_message_for_openai(message_with_false) assert len(openai_messages) == 1 @@ -317,7 +322,7 @@ def test_function_result_exception_handling(openai_unit_test_env: dict[str, str] message_with_exception = ChatMessage( role="tool", contents=[ - FunctionResultContent(call_id="call-123", result="Error: Function failed.", exception=test_exception) + Content.from_function_result(call_id="call-123", result="Error: Function failed.", exception=test_exception) ], ) @@ -339,7 +344,7 @@ def test_prepare_content_for_openai_data_content_image(openai_unit_test_env: dic client = OpenAIChatClient() # Test DataContent with image media type - image_data_content = DataContent( + image_data_content = Content.from_uri( uri="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg==", media_type="image/png", ) @@ -351,7 +356,7 @@ def test_prepare_content_for_openai_data_content_image(openai_unit_test_env: dic assert result["image_url"]["url"] == image_data_content.uri # Test DataContent with non-image media type should use default model_dump - text_data_content = DataContent(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") + text_data_content = Content.from_uri(uri="data:text/plain;base64,SGVsbG8gV29ybGQ=", media_type="text/plain") result = client._prepare_content_for_openai(text_data_content) # type: ignore @@ -361,7 +366,7 @@ def test_prepare_content_for_openai_data_content_image(openai_unit_test_env: dic assert result["media_type"] == "text/plain" # Test DataContent with audio media type - audio_data_content = DataContent( + audio_data_content = Content.from_uri( uri="data:audio/wav;base64,UklGRjBEAABXQVZFZm10IBAAAAABAAEAQB8AAEAfAAABAAgAZGF0YQwEAAAAAAAAAAAA", media_type="audio/wav", ) @@ -375,7 +380,9 @@ def test_prepare_content_for_openai_data_content_image(openai_unit_test_env: dic assert result["input_audio"]["format"] == "wav" # Test DataContent with MP3 audio - mp3_data_content = DataContent(uri="data:audio/mp3;base64,//uQAAAAWGluZwAAAA8AAAACAAACcQ==", media_type="audio/mp3") + mp3_data_content = Content.from_uri( + uri="data:audio/mp3;base64,//uQAAAAWGluZwAAAA8AAAACAAACcQ==", media_type="audio/mp3" + ) result = client._prepare_content_for_openai(mp3_data_content) # type: ignore @@ -391,7 +398,7 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: client = OpenAIChatClient() # Test PDF without filename - should omit filename in OpenAI payload - pdf_data_content = DataContent( + pdf_data_content = Content.from_uri( uri="data:application/pdf;base64,JVBERi0xLjQKJcfsj6IKNSAwIG9iago8PC9UeXBlL0NhdGFsb2cvUGFnZXMgMiAwIFI+PgplbmRvYmoKMiAwIG9iago8PC9UeXBlL1BhZ2VzL0tpZHNbMyAwIFJdL0NvdW50IDE+PgplbmRvYmoKMyAwIG9iago8PC9UeXBlL1BhZ2UvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXS9QYXJlbnQgMiAwIFIvUmVzb3VyY2VzPDwvRm9udDw8L0YxIDQgMCBSPj4+Pi9Db250ZW50cyA1IDAgUj4+CmVuZG9iago0IDAgb2JqCjw8L1R5cGUvRm9udC9TdWJ0eXBlL1R5cGUxL0Jhc2VGb250L0hlbHZldGljYT4+CmVuZG9iago1IDAgb2JqCjw8L0xlbmd0aCA0ND4+CnN0cmVhbQpCVApxCjcwIDUwIFRECi9GMSA4IFRmCihIZWxsbyBXb3JsZCEpIFRqCkVUCmVuZHN0cmVhbQplbmRvYmoKeHJlZgowIDYKMDAwMDAwMDAwMCA2NTUzNSBmIAowMDAwMDAwMDA5IDAwMDAwIG4gCjAwMDAwMDAwNTggMDAwMDAgbiAKMDAwMDAwMDExNSAwMDAwMCBuIAowMDAwMDAwMjQ1IDAwMDAwIG4gCjAwMDAwMDAzMDcgMDAwMDAgbiAKdHJhaWxlcgo8PC9TaXplIDYvUm9vdCAxIDAgUj4+CnN0YXJ0eHJlZgo0MDUKJSVFT0Y=", media_type="application/pdf", ) @@ -407,7 +414,7 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: assert result["file"]["file_data"] == pdf_data_content.uri # Test PDF with custom filename via additional_properties - pdf_with_filename = DataContent( + pdf_with_filename = Content.from_uri( uri="data:application/pdf;base64,JVBERi0xLjQ=", media_type="application/pdf", additional_properties={"filename": "report.pdf"}, @@ -441,7 +448,7 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: for case in test_cases: # Test without filename - doc_content = DataContent( + doc_content = Content.from_uri( uri=f"data:{case['media_type']};base64,{case['base64']}", media_type=case["media_type"], ) @@ -454,7 +461,7 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: assert result["file"]["file_data"] == doc_content.uri # Test with filename - should now use file format with filename - doc_with_filename = DataContent( + doc_with_filename = Content.from_uri( uri=f"data:{case['media_type']};base64,{case['base64']}", media_type=case["media_type"], additional_properties={"filename": case["filename"]}, @@ -468,7 +475,7 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: assert result["file"]["file_data"] == doc_with_filename.uri # Test edge case: empty additional_properties dict - pdf_empty_props = DataContent( + pdf_empty_props = Content.from_uri( uri="data:application/pdf;base64,JVBERi0xLjQ=", media_type="application/pdf", additional_properties={}, @@ -480,7 +487,7 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: assert "filename" not in result["file"] # Test edge case: None filename in additional_properties - pdf_none_filename = DataContent( + pdf_none_filename = Content.from_uri( uri="data:application/pdf;base64,JVBERi0xLjQ=", media_type="application/pdf", additional_properties={"filename": None}, @@ -492,6 +499,430 @@ def test_prepare_content_for_openai_document_file_mapping(openai_unit_test_env: assert "filename" not in result["file"] # None filename should be omitted +def test_parse_text_reasoning_content_from_response(openai_unit_test_env: dict[str, str]) -> None: + """Test that TextReasoningContent is correctly parsed from OpenAI response with reasoning_details.""" + + client = OpenAIChatClient() + + # Mock response with reasoning_details + mock_reasoning_details = { + "effort": "high", + "summary": "Analyzed the problem carefully", + "content": [{"type": "reasoning_text", "text": "Step-by-step thinking..."}], + } + + mock_response = ChatCompletion( + id="test-response", + object="chat.completion", + created=1234567890, + model="gpt-5", + choices=[ + Choice( + index=0, + message=ChatCompletionMessage( + role="assistant", + content="The answer is 42.", + reasoning_details=mock_reasoning_details, + ), + finish_reason="stop", + ) + ], + ) + + response = client._parse_response_from_openai(mock_response, {}) + + # Should have both text and reasoning content + assert len(response.messages) == 1 + message = response.messages[0] + assert len(message.contents) == 2 + + # First should be text content + assert message.contents[0].type == "text" + assert message.contents[0].text == "The answer is 42." + + # Second should be reasoning content with protected_data + assert message.contents[1].type == "text_reasoning" + assert message.contents[1].protected_data is not None + parsed_details = json.loads(message.contents[1].protected_data) + assert parsed_details == mock_reasoning_details + + +def test_parse_text_reasoning_content_from_streaming_chunk(openai_unit_test_env: dict[str, str]) -> None: + """Test that TextReasoningContent is correctly parsed from streaming OpenAI chunk with reasoning_details.""" + from openai.types.chat.chat_completion_chunk import ChatCompletionChunk + from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice + from openai.types.chat.chat_completion_chunk import ChoiceDelta as ChunkChoiceDelta + + client = OpenAIChatClient() + + # Mock streaming chunk with reasoning_details + mock_reasoning_details = { + "type": "reasoning", + "content": "Analyzing the question...", + } + + mock_chunk = ChatCompletionChunk( + id="test-chunk", + object="chat.completion.chunk", + created=1234567890, + model="gpt-5", + choices=[ + ChunkChoice( + index=0, + delta=ChunkChoiceDelta( + role="assistant", + content="Partial answer", + reasoning_details=mock_reasoning_details, + ), + finish_reason=None, + ) + ], + ) + + update = client._parse_response_update_from_openai(mock_chunk) + + # Should have both text and reasoning content + assert len(update.contents) == 2 + + # First should be text content + assert update.contents[0].type == "text" + assert update.contents[0].text == "Partial answer" + + # Second should be reasoning content + assert update.contents[1].type == "text_reasoning" + assert update.contents[1].protected_data is not None + parsed_details = json.loads(update.contents[1].protected_data) + assert parsed_details == mock_reasoning_details + + +def test_prepare_message_with_text_reasoning_content(openai_unit_test_env: dict[str, str]) -> None: + """Test that TextReasoningContent with protected_data is correctly prepared for OpenAI.""" + client = OpenAIChatClient() + + # Create message with text_reasoning content that has protected_data + # text_reasoning is meant to be added to an existing message, so include text content first + mock_reasoning_data = { + "effort": "medium", + "summary": "Quick analysis", + } + + reasoning_content = Content.from_text_reasoning(text=None, protected_data=json.dumps(mock_reasoning_data)) + + # Message must have other content first for reasoning to attach to + message = ChatMessage( + role="assistant", + contents=[ + Content.from_text(text="The answer is 42."), + reasoning_content, + ], + ) + + prepared = client._prepare_message_for_openai(message) + + # Should have one message with reasoning_details attached + assert len(prepared) == 1 + assert "reasoning_details" in prepared[0] + assert prepared[0]["reasoning_details"] == mock_reasoning_data + # Should also have the text content + assert prepared[0]["content"][0]["type"] == "text" + assert prepared[0]["content"][0]["text"] == "The answer is 42." + + +def test_function_approval_content_is_skipped_in_preparation(openai_unit_test_env: dict[str, str]) -> None: + """Test that function approval request and response content are skipped.""" + client = OpenAIChatClient() + + # Create approval request + function_call = Content.from_function_call( + call_id="call_123", + name="dangerous_action", + arguments='{"confirm": true}', + ) + + approval_request = Content.from_function_approval_request( + id="approval_001", + function_call=function_call, + ) + + # Create approval response + approval_response = Content.from_function_approval_response( + approved=False, + id="approval_001", + function_call=function_call, + ) + + # Test that approval request is skipped + message_with_request = ChatMessage(role="assistant", contents=[approval_request]) + prepared_request = client._prepare_message_for_openai(message_with_request) + assert len(prepared_request) == 0 # Should be empty - approval content is skipped + + # Test that approval response is skipped + message_with_response = ChatMessage(role="user", contents=[approval_response]) + prepared_response = client._prepare_message_for_openai(message_with_response) + assert len(prepared_response) == 0 # Should be empty - approval content is skipped + + # Test with mixed content - approval should be skipped, text should remain + mixed_message = ChatMessage( + role="assistant", + contents=[ + Content.from_text(text="I need approval for this action."), + approval_request, + ], + ) + prepared_mixed = client._prepare_message_for_openai(mixed_message) + assert len(prepared_mixed) == 1 # Only text content should remain + assert prepared_mixed[0]["content"][0]["type"] == "text" + assert prepared_mixed[0]["content"][0]["text"] == "I need approval for this action." + + +def test_usage_content_in_streaming_response(openai_unit_test_env: dict[str, str]) -> None: + """Test that UsageContent is correctly parsed from streaming response with usage data.""" + from openai.types.chat.chat_completion_chunk import ChatCompletionChunk + from openai.types.completion_usage import CompletionUsage + + client = OpenAIChatClient() + + # Mock streaming chunk with usage data (typically last chunk) + mock_usage = CompletionUsage( + prompt_tokens=100, + completion_tokens=50, + total_tokens=150, + ) + + mock_chunk = ChatCompletionChunk( + id="test-chunk", + object="chat.completion.chunk", + created=1234567890, + model="gpt-4o", + choices=[], # Empty choices when sending usage + usage=mock_usage, + ) + + update = client._parse_response_update_from_openai(mock_chunk) + + # Should have usage content + assert len(update.contents) == 1 + assert update.contents[0].type == "usage" + + usage_content = update.contents[0] + assert isinstance(usage_content.usage_details, dict) + assert usage_content.usage_details["input_token_count"] == 100 + assert usage_content.usage_details["output_token_count"] == 50 + assert usage_content.usage_details["total_token_count"] == 150 + + +def test_parse_text_with_refusal(openai_unit_test_env: dict[str, str]) -> None: + """Test that refusal content is parsed correctly.""" + from openai.types.chat.chat_completion import ChatCompletion, Choice + from openai.types.chat.chat_completion_message import ChatCompletionMessage + + client = OpenAIChatClient() + + # Mock response with refusal + mock_response = ChatCompletion( + id="test-response", + object="chat.completion", + created=1234567890, + model="gpt-4o", + choices=[ + Choice( + index=0, + message=ChatCompletionMessage( + role="assistant", + content=None, + refusal="I cannot provide that information.", + ), + finish_reason="stop", + ) + ], + ) + + response = client._parse_response_from_openai(mock_response, {}) + + # Should have text content with refusal message + assert len(response.messages) == 1 + message = response.messages[0] + assert len(message.contents) == 1 + assert message.contents[0].type == "text" + assert message.contents[0].text == "I cannot provide that information." + + +def test_prepare_options_without_model_id(openai_unit_test_env: dict[str, str]) -> None: + """Test that prepare_options raises error when model_id is not set.""" + client = OpenAIChatClient() + client.model_id = None # Remove model_id + + messages = [ChatMessage(role="user", text="test")] + + with pytest.raises(ValueError, match="model_id must be a non-empty string"): + client._prepare_options(messages, {}) + + +def test_prepare_options_without_messages(openai_unit_test_env: dict[str, str]) -> None: + """Test that prepare_options raises error when messages are missing.""" + from agent_framework.exceptions import ServiceInvalidRequestError + + client = OpenAIChatClient() + + with pytest.raises(ServiceInvalidRequestError, match="Messages are required"): + client._prepare_options([], {}) + + +def test_prepare_tools_with_web_search_no_location(openai_unit_test_env: dict[str, str]) -> None: + """Test preparing web search tool without user location.""" + client = OpenAIChatClient() + + # Web search tool without additional_properties + web_search_tool = HostedWebSearchTool() + + result = client._prepare_tools_for_openai([web_search_tool]) + + # Should have empty web_search_options (no location) + assert "web_search_options" in result + assert result["web_search_options"] == {} + + +def test_prepare_options_with_instructions(openai_unit_test_env: dict[str, str]) -> None: + """Test that instructions are prepended as system message.""" + client = OpenAIChatClient() + + messages = [ChatMessage(role="user", text="Hello")] + options = {"instructions": "You are a helpful assistant."} + + prepared_options = client._prepare_options(messages, options) + + # Should have messages with system message prepended + assert "messages" in prepared_options + assert len(prepared_options["messages"]) == 2 + assert prepared_options["messages"][0]["role"] == "system" + assert prepared_options["messages"][0]["content"][0]["text"] == "You are a helpful assistant." + + +def test_prepare_message_with_author_name(openai_unit_test_env: dict[str, str]) -> None: + """Test that author_name is included in prepared message.""" + client = OpenAIChatClient() + + message = ChatMessage( + role="user", + author_name="TestUser", + contents=[Content.from_text(text="Hello")], + ) + + prepared = client._prepare_message_for_openai(message) + + assert len(prepared) == 1 + assert prepared[0]["name"] == "TestUser" + + +def test_prepare_message_with_tool_result_author_name(openai_unit_test_env: dict[str, str]) -> None: + """Test that author_name is not included for TOOL role messages.""" + client = OpenAIChatClient() + + # Tool messages should not have 'name' field (it's for function name instead) + message = ChatMessage( + role="tool", + author_name="ShouldNotAppear", + contents=[Content.from_function_result(call_id="call_123", result="result")], + ) + + prepared = client._prepare_message_for_openai(message) + + assert len(prepared) == 1 + # Should not have 'name' field for tool messages + assert "name" not in prepared[0] + + +def test_tool_choice_required_with_function_name(openai_unit_test_env: dict[str, str]) -> None: + """Test that tool_choice with required mode and function name is correctly prepared.""" + client = OpenAIChatClient() + + messages = [ChatMessage(role="user", text="test")] + options = { + "tools": [get_weather], + "tool_choice": {"mode": "required", "required_function_name": "get_weather"}, + } + + prepared_options = client._prepare_options(messages, options) + + # Should format tool_choice correctly + assert "tool_choice" in prepared_options + assert prepared_options["tool_choice"]["type"] == "function" + assert prepared_options["tool_choice"]["function"]["name"] == "get_weather" + + +def test_response_format_dict_passthrough(openai_unit_test_env: dict[str, str]) -> None: + """Test that response_format as dict is passed through directly.""" + client = OpenAIChatClient() + + messages = [ChatMessage(role="user", text="test")] + custom_format = { + "type": "json_schema", + "json_schema": {"name": "Test", "schema": {"type": "object"}}, + } + options = {"response_format": custom_format} + + prepared_options = client._prepare_options(messages, options) + + # Should pass through the dict directly + assert prepared_options["response_format"] == custom_format + + +def test_multiple_function_calls_in_single_message(openai_unit_test_env: dict[str, str]) -> None: + """Test that multiple function calls in a message are correctly prepared.""" + client = OpenAIChatClient() + + # Create message with multiple function calls + message = ChatMessage( + role="assistant", + contents=[ + Content.from_function_call(call_id="call_1", name="func_1", arguments='{"a": 1}'), + Content.from_function_call(call_id="call_2", name="func_2", arguments='{"b": 2}'), + ], + ) + + prepared = client._prepare_message_for_openai(message) + + # Should have one message with multiple tool_calls + assert len(prepared) == 1 + assert "tool_calls" in prepared[0] + assert len(prepared[0]["tool_calls"]) == 2 + assert prepared[0]["tool_calls"][0]["id"] == "call_1" + assert prepared[0]["tool_calls"][1]["id"] == "call_2" + + +def test_prepare_options_removes_parallel_tool_calls_when_no_tools(openai_unit_test_env: dict[str, str]) -> None: + """Test that parallel_tool_calls is removed when no tools are present.""" + client = OpenAIChatClient() + + messages = [ChatMessage(role="user", text="test")] + options = {"allow_multiple_tool_calls": True} + + prepared_options = client._prepare_options(messages, options) + + # Should not have parallel_tool_calls when no tools + assert "parallel_tool_calls" not in prepared_options + + +async def test_streaming_exception_handling(openai_unit_test_env: dict[str, str]) -> None: + """Test that streaming errors are properly handled.""" + client = OpenAIChatClient() + messages = [ChatMessage(role="user", text="test")] + + # Create a mock error during streaming + mock_error = Exception("Streaming error") + + with ( + patch.object(client.client.chat.completions, "create", side_effect=mock_error), + pytest.raises(ServiceResponseException), + ): + + async def consume_stream(): + async for _ in client._inner_get_streaming_response(messages=messages, options={}): # type: ignore + pass + + await consume_stream() + + # region Integration Tests diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index c91297d7df..834af317c8 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -33,26 +33,13 @@ ChatOptions, ChatResponse, ChatResponseUpdate, - CodeInterpreterToolCallContent, - CodeInterpreterToolResultContent, - DataContent, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, + Content, HostedCodeInterpreterTool, - HostedFileContent, HostedFileSearchTool, HostedImageGenerationTool, HostedMCPTool, - HostedVectorStoreContent, HostedWebSearchTool, - ImageGenerationToolCallContent, - ImageGenerationToolResultContent, Role, - TextContent, - TextReasoningContent, - UriContent, ai_function, ) from agent_framework.exceptions import ( @@ -81,7 +68,7 @@ class OutputStruct(BaseModel): async def create_vector_store( client: OpenAIResponsesClient, -) -> tuple[str, HostedVectorStoreContent]: +) -> tuple[str, Content]: """Create a vector store with sample documents for testing.""" file = await client.client.files.create( file=("todays_weather.txt", b"The weather today is sunny with a high of 75F."), @@ -99,7 +86,7 @@ async def create_vector_store( if result.last_error is not None: raise Exception(f"Vector store file processing failed with status: {result.last_error.message}") - return file.id, HostedVectorStoreContent(vector_store_id=vector_store.id) + return file.id, Content.from_hosted_vector_store(vector_store_id=vector_store.id) async def delete_vector_store(client: OpenAIResponsesClient, file_id: str, vector_store_id: str) -> None: @@ -285,7 +272,7 @@ def test_file_search_tool_with_invalid_inputs() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test with invalid inputs type (should trigger ValueError) - file_search_tool = HostedFileSearchTool(inputs=[HostedFileContent(file_id="invalid")]) + file_search_tool = HostedFileSearchTool(inputs=[Content.from_hosted_file(file_id="invalid")]) # Should raise an error due to invalid inputs with pytest.raises(ValueError, match="HostedFileSearchTool requires inputs to be of type"): @@ -314,7 +301,7 @@ def test_code_interpreter_tool_variations() -> None: # Test code interpreter with files code_tool_with_files = HostedCodeInterpreterTool( - inputs=[HostedFileContent(file_id="file1"), HostedFileContent(file_id="file2")] + inputs=[Content.from_hosted_file(file_id="file1"), Content.from_hosted_file(file_id="file2")] ) with pytest.raises(ServiceResponseException): @@ -367,14 +354,14 @@ def test_chat_message_parsing_with_function_calls() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Create messages with function call and result content - function_call = FunctionCallContent( + function_call = Content.from_function_call( call_id="test-call-id", name="test_function", arguments='{"param": "value"}', additional_properties={"fc_id": "test-fc-id"}, ) - function_result = FunctionResultContent(call_id="test-call-id", result="Function executed successfully") + function_result = Content.from_function_result(call_id="test-call-id", result="Function executed successfully") messages = [ ChatMessage(role="user", text="Call a function"), @@ -516,7 +503,7 @@ def test_response_content_creation_with_annotations() -> None: response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) >= 1 - assert isinstance(response.messages[0].contents[0], TextContent) + assert response.messages[0].contents[0].type == "text" assert response.messages[0].contents[0].text == "Text with annotations." assert response.messages[0].contents[0].annotations is not None @@ -547,7 +534,7 @@ def test_response_content_creation_with_refusal() -> None: response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) == 1 - assert isinstance(response.messages[0].contents[0], TextContent) + assert response.messages[0].contents[0].type == "text" assert response.messages[0].contents[0].text == "I cannot provide that information." @@ -577,7 +564,7 @@ def test_response_content_creation_with_reasoning() -> None: response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) == 2 - assert isinstance(response.messages[0].contents[0], TextReasoningContent) + assert response.messages[0].contents[0].type == "text_reasoning" assert response.messages[0].contents[0].text == "Reasoning step" @@ -614,13 +601,13 @@ def test_response_content_creation_with_code_interpreter() -> None: assert len(response.messages[0].contents) == 2 call_content, result_content = response.messages[0].contents - assert isinstance(call_content, CodeInterpreterToolCallContent) + assert call_content.type == "code_interpreter_tool_call" assert call_content.inputs is not None - assert isinstance(call_content.inputs[0], TextContent) - assert isinstance(result_content, CodeInterpreterToolResultContent) + assert call_content.inputs[0].type == "text" + assert result_content.type == "code_interpreter_tool_result" assert result_content.outputs is not None - assert any(isinstance(out, TextContent) for out in result_content.outputs) - assert any(isinstance(out, UriContent) for out in result_content.outputs) + assert any(out.type == "text" for out in result_content.outputs) + assert any(out.type == "uri" for out in result_content.outputs) def test_response_content_creation_with_function_call() -> None: @@ -648,13 +635,460 @@ def test_response_content_creation_with_function_call() -> None: response = client._parse_response_from_openai(mock_response, options={}) # type: ignore assert len(response.messages[0].contents) == 1 - assert isinstance(response.messages[0].contents[0], FunctionCallContent) + assert response.messages[0].contents[0].type == "function_call" function_call = response.messages[0].contents[0] assert function_call.call_id == "call_123" assert function_call.name == "get_weather" assert function_call.arguments == '{"location": "Seattle"}' +def test_prepare_content_for_openai_function_approval_response() -> None: + """Test _prepare_content_for_openai with function approval response content.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + # Test approved response + function_call = Content.from_function_call( + call_id="call_123", + name="send_email", + arguments='{"to": "user@example.com"}', + ) + approval_response = Content.from_function_approval_response( + approved=True, + id="approval_001", + function_call=function_call, + ) + + result = client._prepare_content_for_openai(Role.ASSISTANT, approval_response, {}) + + assert result["type"] == "mcp_approval_response" + assert result["approval_request_id"] == "approval_001" + assert result["approve"] is True + + +def test_prepare_content_for_openai_error_content() -> None: + """Test _prepare_content_for_openai with error content.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + error_content = Content.from_error( + message="Operation failed", + error_code="ERR_123", + error_details="Invalid parameter", + ) + + result = client._prepare_content_for_openai(Role.ASSISTANT, error_content, {}) + + # ErrorContent should return empty dict (logged but not sent) + assert result == {} + + +def test_prepare_content_for_openai_usage_content() -> None: + """Test _prepare_content_for_openai with usage content.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + usage_content = Content.from_usage( + usage_details={ + "input_token_count": 100, + "output_token_count": 50, + "total_token_count": 150, + } + ) + + result = client._prepare_content_for_openai(Role.ASSISTANT, usage_content, {}) + + # UsageContent should return empty dict (logged but not sent) + assert result == {} + + +def test_prepare_content_for_openai_hosted_vector_store_content() -> None: + """Test _prepare_content_for_openai with hosted vector store content.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + vector_store_content = Content.from_hosted_vector_store( + vector_store_id="vs_123", + ) + + result = client._prepare_content_for_openai(Role.ASSISTANT, vector_store_content, {}) + + # HostedVectorStoreContent should return empty dict (logged but not sent) + assert result == {} + + +def test_parse_response_from_openai_with_mcp_server_tool_result() -> None: + """Test _parse_response_from_openai with MCP server tool result.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + mock_response = MagicMock() + mock_response.output_parsed = None + mock_response.metadata = {} + mock_response.usage = None + mock_response.id = "resp-id" + mock_response.model = "test-model" + mock_response.created_at = 1000000000 + + # Mock MCP call item with result + mock_mcp_item = MagicMock() + mock_mcp_item.type = "mcp_call" + mock_mcp_item.id = "mcp_call_123" + mock_mcp_item.name = "get_data" + mock_mcp_item.arguments = {"key": "value"} + mock_mcp_item.server_label = "TestServer" + mock_mcp_item.result = [{"content": [{"type": "text", "text": "MCP result"}]}] + + mock_response.output = [mock_mcp_item] + + response = client._parse_response_from_openai(mock_response, options={}) # type: ignore + + # Should have both call and result content + assert len(response.messages[0].contents) == 2 + call_content, result_content = response.messages[0].contents + + assert call_content.type == "mcp_server_tool_call" + assert call_content.call_id == "mcp_call_123" + assert call_content.tool_name == "get_data" + assert call_content.server_name == "TestServer" + + assert result_content.type == "mcp_server_tool_result" + assert result_content.call_id == "mcp_call_123" + assert result_content.output is not None + + +def test_parse_chunk_from_openai_with_mcp_call_result() -> None: + """Test _parse_chunk_from_openai with MCP call output.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + # Mock event with MCP call that has output + mock_event = MagicMock() + mock_event.type = "response.output_item.added" + + mock_item = MagicMock() + mock_item.type = "mcp_call" + mock_item.id = "mcp_call_456" + mock_item.call_id = "call_456" + mock_item.name = "fetch_resource" + mock_item.server_label = "ResourceServer" + mock_item.arguments = {"resource_id": "123"} + # Use proper content structure that _parse_content can handle + mock_item.result = [{"type": "text", "text": "test result"}] + + mock_event.item = mock_item + mock_event.output_index = 0 + + function_call_ids: dict[int, tuple[str, str]] = {} + + update = client._parse_chunk_from_openai(mock_event, options={}, function_call_ids=function_call_ids) + + # Should have both call and result in contents + assert len(update.contents) == 2 + call_content, result_content = update.contents + + assert call_content.type == "mcp_server_tool_call" + assert call_content.call_id in ["mcp_call_456", "call_456"] + assert call_content.tool_name == "fetch_resource" + + assert result_content.type == "mcp_server_tool_result" + assert result_content.call_id in ["mcp_call_456", "call_456"] + # Verify the output was parsed + assert result_content.output is not None + + +def test_prepare_message_for_openai_with_function_approval_response() -> None: + """Test _prepare_message_for_openai with function approval response content in messages.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + function_call = Content.from_function_call( + call_id="call_789", + name="execute_command", + arguments='{"command": "ls"}', + ) + + approval_response = Content.from_function_approval_response( + approved=True, + id="approval_003", + function_call=function_call, + ) + + message = ChatMessage(role="user", contents=[approval_response]) + call_id_to_id: dict[str, str] = {} + + result = client._prepare_message_for_openai(message, call_id_to_id) + + # FunctionApprovalResponseContent is added directly, not nested in args with role + assert len(result) == 1 + prepared_message = result[0] + assert prepared_message["type"] == "mcp_approval_response" + assert prepared_message["approval_request_id"] == "approval_003" + assert prepared_message["approve"] is True + + +def test_chat_message_with_error_content() -> None: + """Test that error content in messages is handled properly.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + error_content = Content.from_error( + message="Test error", + error_code="TEST_ERR", + ) + + message = ChatMessage(role="assistant", contents=[error_content]) + call_id_to_id: dict[str, str] = {} + + result = client._prepare_message_for_openai(message, call_id_to_id) + + # Message should be prepared with empty content list since ErrorContent returns {} + assert len(result) == 1 + prepared_message = result[0] + assert prepared_message["role"] == "assistant" + # Content should be a list with empty dict since ErrorContent returns {} + assert prepared_message.get("content") == [{}] + + +def test_chat_message_with_usage_content() -> None: + """Test that usage content in messages is handled properly.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + usage_content = Content.from_usage( + usage_details={ + "input_token_count": 200, + "output_token_count": 100, + "total_token_count": 300, + } + ) + + message = ChatMessage(role="assistant", contents=[usage_content]) + call_id_to_id: dict[str, str] = {} + + result = client._prepare_message_for_openai(message, call_id_to_id) + + # Message should be prepared with empty content list since UsageContent returns {} + assert len(result) == 1 + prepared_message = result[0] + assert prepared_message["role"] == "assistant" + # Content should be a list with empty dict since UsageContent returns {} + assert prepared_message.get("content") == [{}] + + +def test_hosted_file_content_preparation() -> None: + """Test _prepare_content_for_openai with hosted file content.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + hosted_file = Content.from_hosted_file( + file_id="file_abc123", + media_type="application/pdf", + name="document.pdf", + ) + + result = client._prepare_content_for_openai(Role.USER, hosted_file, {}) + + assert result["type"] == "input_file" + assert result["file_id"] == "file_abc123" + + +def test_function_approval_response_with_mcp_tool_call() -> None: + """Test function approval response content with MCP server tool call content.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + mcp_call = Content.from_mcp_server_tool_call( + call_id="mcp_call_999", + tool_name="sensitive_action", + server_name="SecureServer", + arguments={"action": "delete"}, + ) + + approval_response = Content.from_function_approval_response( + approved=False, + id="approval_mcp_001", + function_call=mcp_call, + ) + + result = client._prepare_content_for_openai(Role.ASSISTANT, approval_response, {}) + + assert result["type"] == "mcp_approval_response" + assert result["approval_request_id"] == "approval_mcp_001" + assert result["approve"] is False + + +def test_response_format_with_conflicting_definitions() -> None: + """Test that conflicting response_format definitions raise an error.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + # Mock response_format and text_config that conflict + response_format = {"type": "json_schema", "format": {"type": "json_schema", "name": "Test", "schema": {}}} + text_config = {"format": {"type": "json_object"}} + + with pytest.raises(ServiceInvalidRequestError, match="Conflicting response_format definitions"): + client._prepare_response_and_text_format(response_format=response_format, text_config=text_config) + + +def test_response_format_json_object_type() -> None: + """Test response_format with json_object type.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = {"type": "json_object"} + + _, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + assert text_config is not None + assert text_config["format"]["type"] == "json_object" + + +def test_response_format_text_type() -> None: + """Test response_format with text type.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = {"type": "text"} + + _, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + assert text_config is not None + assert text_config["format"]["type"] == "text" + + +def test_response_format_with_format_key() -> None: + """Test response_format that already has a format key.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = {"format": {"type": "json_schema", "name": "MySchema", "schema": {"type": "object"}}} + + _, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + assert text_config is not None + assert text_config["format"]["type"] == "json_schema" + assert text_config["format"]["name"] == "MySchema" + + +def test_response_format_json_schema_no_name_uses_title() -> None: + """Test json_schema response_format without name uses title from schema.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = { + "type": "json_schema", + "json_schema": {"schema": {"title": "MyTitle", "type": "object", "properties": {}}}, + } + + _, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + assert text_config is not None + assert text_config["format"]["name"] == "MyTitle" + + +def test_response_format_json_schema_with_strict() -> None: + """Test json_schema response_format with strict mode.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = { + "type": "json_schema", + "json_schema": {"name": "StrictSchema", "schema": {"type": "object"}, "strict": True}, + } + + _, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + assert text_config is not None + assert text_config["format"]["strict"] is True + + +def test_response_format_json_schema_with_description() -> None: + """Test json_schema response_format with description.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = { + "type": "json_schema", + "json_schema": { + "name": "DescribedSchema", + "schema": {"type": "object"}, + "description": "A test schema", + }, + } + + _, text_config = client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + assert text_config is not None + assert text_config["format"]["description"] == "A test schema" + + +def test_response_format_json_schema_missing_schema() -> None: + """Test json_schema response_format without schema raises error.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = {"type": "json_schema", "json_schema": {"name": "NoSchema"}} + + with pytest.raises(ServiceInvalidRequestError, match="json_schema response_format requires a schema"): + client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + +def test_response_format_unsupported_type() -> None: + """Test unsupported response_format type raises error.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = {"type": "unsupported_format"} + + with pytest.raises(ServiceInvalidRequestError, match="Unsupported response_format"): + client._prepare_response_and_text_format(response_format=response_format, text_config=None) + + +def test_response_format_invalid_type() -> None: + """Test invalid response_format type raises error.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + response_format = "invalid" # Not a Pydantic model or mapping + + with pytest.raises(ServiceInvalidRequestError, match="response_format must be a Pydantic model or mapping"): + client._prepare_response_and_text_format(response_format=response_format, text_config=None) # type: ignore + + +def test_parse_response_with_store_false() -> None: + """Test _get_conversation_id returns None when store is False.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + mock_response = MagicMock() + mock_response.id = "resp_123" + mock_response.conversation = MagicMock() + mock_response.conversation.id = "conv_456" + + conversation_id = client._get_conversation_id(mock_response, store=False) + + assert conversation_id is None + + +def test_parse_response_uses_response_id_when_no_conversation() -> None: + """Test _get_conversation_id returns response ID when no conversation exists.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + + mock_response = MagicMock() + mock_response.id = "resp_789" + mock_response.conversation = None + + conversation_id = client._get_conversation_id(mock_response, store=True) + + assert conversation_id == "resp_789" + + +def test_streaming_chunk_with_usage_only() -> None: + """Test streaming chunk that only contains usage info.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} + + mock_event = MagicMock() + mock_event.type = "response.completed" + mock_event.response = MagicMock() + mock_event.response.id = "resp_usage" + mock_event.response.model = "test-model" + mock_event.response.conversation = None + mock_event.response.usage = MagicMock() + mock_event.response.usage.input_tokens = 50 + mock_event.response.usage.output_tokens = 25 + mock_event.response.usage.total_tokens = 75 + mock_event.response.usage.input_tokens_details = None + mock_event.response.usage.output_tokens_details = None + + update = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) + + # Should have usage content + assert len(update.contents) == 1 + assert update.contents[0].type == "usage" + assert update.contents[0].usage_details["total_token_count"] == 75 + + def test_prepare_tools_for_openai_with_hosted_mcp() -> None: """Test that HostedMCPTool is converted to the correct response tool dict.""" client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") @@ -708,7 +1142,7 @@ def test_parse_response_from_openai_with_mcp_approval_request() -> None: response = client._parse_response_from_openai(mock_response, options={}) # type: ignore - assert isinstance(response.messages[0].contents[0], FunctionApprovalRequestContent) + assert response.messages[0].contents[0].type == "function_approval_request" req = response.messages[0].contents[0] assert req.id == "approval-1" assert req.function_call.name == "do_sensitive_action" @@ -874,8 +1308,8 @@ def test_parse_chunk_from_openai_with_mcp_approval_request() -> None: mock_event.item = mock_item update = client._parse_chunk_from_openai(mock_event, chat_options, function_call_ids) - assert any(isinstance(c, FunctionApprovalRequestContent) for c in update.contents) - fa = next(c for c in update.contents if isinstance(c, FunctionApprovalRequestContent)) + assert any(c.type == "function_approval_request" for c in update.contents) + fa = next(c for c in update.contents if c.type == "function_approval_request") assert fa.id == "approval-stream-1" assert fa.function_call.name == "do_stream_action" @@ -925,12 +1359,12 @@ async def test_end_to_end_mcp_approval_flow(span_exporter) -> None: with patch.object(client.client.responses, "create", side_effect=[mock_response1, mock_response2]) as mock_create: # First call: get the approval request response = await client.get_response(messages=[ChatMessage(role="user", text="Trigger approval")]) - assert isinstance(response.messages[0].contents[0], FunctionApprovalRequestContent) + assert response.messages[0].contents[0].type == "function_approval_request" req = response.messages[0].contents[0] assert req.id == "approval-1" # Build a user approval and send it (include required function_call) - approval = FunctionApprovalResponseContent(approved=True, id=req.id, function_call=req.function_call) + approval = Content.from_function_approval_response(approved=True, id=req.id, function_call=req.function_call) approval_message = ChatMessage(role="user", contents=[approval]) _ = await client.get_response(messages=[approval_message]) @@ -961,9 +1395,9 @@ def test_usage_details_basic() -> None: details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None - assert details.input_token_count == 100 - assert details.output_token_count == 50 - assert details.total_token_count == 150 + assert details["input_token_count"] == 100 + assert details["output_token_count"] == 50 + assert details["total_token_count"] == 150 def test_usage_details_with_cached_tokens() -> None: @@ -980,8 +1414,8 @@ def test_usage_details_with_cached_tokens() -> None: details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None - assert details.input_token_count == 200 - assert details.additional_counts["openai.cached_input_tokens"] == 25 + assert details["input_token_count"] == 200 + assert details["openai.cached_input_tokens"] == 25 def test_usage_details_with_reasoning_tokens() -> None: @@ -998,8 +1432,8 @@ def test_usage_details_with_reasoning_tokens() -> None: details = client._parse_usage_from_openai(mock_usage) # type: ignore assert details is not None - assert details.output_token_count == 80 - assert details.additional_counts["openai.reasoning_tokens"] == 30 + assert details["output_token_count"] == 80 + assert details["openai.reasoning_tokens"] == 30 def test_get_metadata_from_response() -> None: @@ -1098,7 +1532,7 @@ def test_streaming_annotation_added_with_file_path() -> None: assert len(response.contents) == 1 content = response.contents[0] - assert isinstance(content, HostedFileContent) + assert content.type == "hosted_file" assert content.file_id == "file-abc123" assert content.additional_properties is not None assert content.additional_properties.get("annotation_index") == 0 @@ -1125,7 +1559,7 @@ def test_streaming_annotation_added_with_file_citation() -> None: assert len(response.contents) == 1 content = response.contents[0] - assert isinstance(content, HostedFileContent) + assert content.type == "hosted_file" assert content.file_id == "file-xyz789" assert content.additional_properties is not None assert content.additional_properties.get("filename") == "sample.txt" @@ -1154,7 +1588,7 @@ def test_streaming_annotation_added_with_container_file_citation() -> None: assert len(response.contents) == 1 content = response.contents[0] - assert isinstance(content, HostedFileContent) + assert content.type == "hosted_file" assert content.file_id == "file-container123" assert content.additional_properties is not None assert content.additional_properties.get("container_id") == "container-456" @@ -1228,7 +1662,7 @@ def test_prepare_content_for_openai_image_content() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test image content with detail parameter and file_id - image_content_with_detail = UriContent( + image_content_with_detail = Content.from_uri( uri="https://example.com/image.jpg", media_type="image/jpeg", additional_properties={"detail": "high", "file_id": "file_123"}, @@ -1240,7 +1674,7 @@ def test_prepare_content_for_openai_image_content() -> None: assert result["file_id"] == "file_123" # Test image content without additional properties (defaults) - image_content_basic = UriContent(uri="https://example.com/basic.png", media_type="image/png") + image_content_basic = Content.from_uri(uri="https://example.com/basic.png", media_type="image/png") result = client._prepare_content_for_openai(Role.USER, image_content_basic, {}) # type: ignore assert result["type"] == "input_image" assert result["detail"] == "auto" @@ -1252,14 +1686,14 @@ def test_prepare_content_for_openai_audio_content() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test WAV audio content - wav_content = UriContent(uri="data:audio/wav;base64,abc123", media_type="audio/wav") + wav_content = Content.from_uri(uri="data:audio/wav;base64,abc123", media_type="audio/wav") result = client._prepare_content_for_openai(Role.USER, wav_content, {}) # type: ignore assert result["type"] == "input_audio" assert result["input_audio"]["data"] == "data:audio/wav;base64,abc123" assert result["input_audio"]["format"] == "wav" # Test MP3 audio content - mp3_content = UriContent(uri="data:audio/mp3;base64,def456", media_type="audio/mp3") + mp3_content = Content.from_uri(uri="data:audio/mp3;base64,def456", media_type="audio/mp3") result = client._prepare_content_for_openai(Role.USER, mp3_content, {}) # type: ignore assert result["type"] == "input_audio" assert result["input_audio"]["format"] == "mp3" @@ -1270,12 +1704,12 @@ def test_prepare_content_for_openai_unsupported_content() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test unsupported audio format - unsupported_audio = UriContent(uri="data:audio/ogg;base64,ghi789", media_type="audio/ogg") + unsupported_audio = Content.from_uri(uri="data:audio/ogg;base64,ghi789", media_type="audio/ogg") result = client._prepare_content_for_openai(Role.USER, unsupported_audio, {}) # type: ignore assert result == {} # Test non-media content - text_uri_content = UriContent(uri="https://example.com/document.txt", media_type="text/plain") + text_uri_content = Content.from_uri(uri="https://example.com/document.txt", media_type="text/plain") result = client._prepare_content_for_openai(Role.USER, text_uri_content, {}) # type: ignore assert result == {} @@ -1299,11 +1733,9 @@ def test_parse_chunk_from_openai_code_interpreter() -> None: result = client._parse_chunk_from_openai(mock_event_image, chat_options, function_call_ids) # type: ignore assert len(result.contents) == 1 - assert isinstance(result.contents[0], CodeInterpreterToolResultContent) + assert result.contents[0].type == "code_interpreter_tool_result" assert result.contents[0].outputs - assert any( - isinstance(out, UriContent) and out.uri == "https://example.com/plot.png" for out in result.contents[0].outputs - ) + assert any(out.type == "uri" and out.uri == "https://example.com/plot.png" for out in result.contents[0].outputs) def test_parse_chunk_from_openai_reasoning() -> None: @@ -1324,7 +1756,7 @@ def test_parse_chunk_from_openai_reasoning() -> None: result = client._parse_chunk_from_openai(mock_event_reasoning, chat_options, function_call_ids) # type: ignore assert len(result.contents) == 1 - assert isinstance(result.contents[0], TextReasoningContent) + assert result.contents[0].type == "text_reasoning" assert result.contents[0].text == "Analyzing the problem step by step..." if result.contents[0].additional_properties: assert result.contents[0].additional_properties["summary"] == "Problem analysis summary" @@ -1335,7 +1767,7 @@ def test_prepare_content_for_openai_text_reasoning_comprehensive() -> None: client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") # Test TextReasoningContent with all additional properties - comprehensive_reasoning = TextReasoningContent( + comprehensive_reasoning = Content.from_text_reasoning( text="Comprehensive reasoning summary", additional_properties={ "status": "in_progress", @@ -1371,7 +1803,7 @@ def test_streaming_reasoning_text_delta_event() -> None: response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].type == "text_reasoning" assert response.contents[0].text == "reasoning delta" assert response.contents[0].raw_representation == event mock_metadata.assert_called_once_with(event) @@ -1396,7 +1828,7 @@ def test_streaming_reasoning_text_done_event() -> None: response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].type == "text_reasoning" assert response.contents[0].text == "complete reasoning" assert response.contents[0].raw_representation == event mock_metadata.assert_called_once_with(event) @@ -1422,7 +1854,7 @@ def test_streaming_reasoning_summary_text_delta_event() -> None: response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].type == "text_reasoning" assert response.contents[0].text == "summary delta" assert response.contents[0].raw_representation == event mock_metadata.assert_called_once_with(event) @@ -1447,7 +1879,7 @@ def test_streaming_reasoning_summary_text_done_event() -> None: response = client._parse_chunk_from_openai(event, chat_options, function_call_ids) # type: ignore assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].type == "text_reasoning" assert response.contents[0].text == "complete summary" assert response.contents[0].raw_representation == event mock_metadata.assert_called_once_with(event) @@ -1488,8 +1920,8 @@ def test_streaming_reasoning_events_preserve_metadata() -> None: assert reasoning_response.additional_properties == {"test": "metadata"} # Content types should be different - assert isinstance(text_response.contents[0], TextContent) - assert isinstance(reasoning_response.contents[0], TextReasoningContent) + assert text_response.contents[0].type == "text" + assert reasoning_response.contents[0].type == "text_reasoning" def test_parse_response_from_openai_image_generation_raw_base64(): @@ -1521,11 +1953,11 @@ def test_parse_response_from_openai_image_generation_raw_base64(): # Verify the response contains call + result with DataContent output assert len(response.messages[0].contents) == 2 call_content, result_content = response.messages[0].contents - assert isinstance(call_content, ImageGenerationToolCallContent) - assert isinstance(result_content, ImageGenerationToolResultContent) + assert call_content.type == "image_generation_tool_call" + assert result_content.type == "image_generation_tool_result" assert result_content.outputs data_out = result_content.outputs - assert isinstance(data_out, DataContent) + assert data_out.type == "data" assert data_out.uri.startswith("data:image/png;base64,") assert data_out.media_type == "image/png" @@ -1558,11 +1990,11 @@ def test_parse_response_from_openai_image_generation_existing_data_uri(): # Verify the response contains call + result with DataContent output assert len(response.messages[0].contents) == 2 call_content, result_content = response.messages[0].contents - assert isinstance(call_content, ImageGenerationToolCallContent) - assert isinstance(result_content, ImageGenerationToolResultContent) + assert call_content.type == "image_generation_tool_call" + assert result_content.type == "image_generation_tool_result" assert result_content.outputs data_out = result_content.outputs - assert isinstance(data_out, DataContent) + assert data_out.type == "data" assert data_out.uri == f"data:image/webp;base64,{valid_webp_base64}" assert data_out.media_type == "image/webp" @@ -1591,9 +2023,9 @@ def test_parse_response_from_openai_image_generation_format_detection(): with patch.object(client, "_get_metadata_from_response", return_value={}): response_jpeg = client._parse_response_from_openai(mock_response_jpeg, options={}) # type: ignore result_contents = response_jpeg.messages[0].contents - assert isinstance(result_contents[1], ImageGenerationToolResultContent) + assert result_contents[1].type == "image_generation_tool_result" outputs = result_contents[1].outputs - assert outputs and isinstance(outputs, DataContent) + assert outputs and outputs.type == "data" assert outputs.media_type == "image/jpeg" assert "data:image/jpeg;base64," in outputs.uri @@ -1617,7 +2049,7 @@ def test_parse_response_from_openai_image_generation_format_detection(): with patch.object(client, "_get_metadata_from_response", return_value={}): response_webp = client._parse_response_from_openai(mock_response_webp, options={}) # type: ignore outputs_webp = response_webp.messages[0].contents[1].outputs - assert outputs_webp and isinstance(outputs_webp, DataContent) + assert outputs_webp and outputs_webp.type == "data" assert outputs_webp.media_type == "image/webp" assert "data:image/webp;base64," in outputs_webp.uri @@ -1650,7 +2082,7 @@ def test_parse_response_from_openai_image_generation_fallback(): # Verify it falls back to PNG format for unrecognized binary data assert len(response.messages[0].contents) == 2 result_content = response.messages[0].contents[1] - assert isinstance(result_content, ImageGenerationToolResultContent) + assert result_content.type == "image_generation_tool_result" assert result_content.outputs content = result_content.outputs assert content.media_type == "image/png" @@ -1682,6 +2114,20 @@ async def test_prepare_options_store_parameter_handling() -> None: assert "previous_response_id" not in options +async def test_conversation_id_precedence_kwargs_over_options() -> None: + """When both kwargs and options contain conversation_id, kwargs wins.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + messages = [ChatMessage(role="user", text="Hello")] + + # options has a stale response id, kwargs carries the freshest one + opts = {"conversation_id": "resp_old_123"} + run_opts = await client._prepare_options(messages, opts, conversation_id="resp_new_456") # type: ignore + + # Verify kwargs takes precedence and maps to previous_response_id for resp_* IDs + assert run_opts.get("previous_response_id") == "resp_new_456" + assert "conversation" not in run_opts + + def test_with_callable_api_key() -> None: """Test OpenAIResponsesClient initialization with callable API key.""" @@ -1944,7 +2390,7 @@ async def test_integration_streaming_file_search() -> None: assert chunk is not None assert isinstance(chunk, ChatResponseUpdate) for content in chunk.contents: - if isinstance(content, TextContent) and content.text: + if content.type == "text" and content.text: full_message += content.text await delete_vector_store(openai_responses_client, file_id, vector_store.vector_store_id) diff --git a/python/packages/core/tests/test_observability_datetime.py b/python/packages/core/tests/test_observability_datetime.py index 6ad3d77e1a..2510a5b355 100644 --- a/python/packages/core/tests/test_observability_datetime.py +++ b/python/packages/core/tests/test_observability_datetime.py @@ -5,7 +5,7 @@ import json from datetime import datetime -from agent_framework._types import FunctionResultContent +from agent_framework import Content from agent_framework.observability import _to_otel_part @@ -14,7 +14,7 @@ def test_datetime_in_tool_results() -> None: Reproduces issue #2219 where datetime objects caused TypeError. """ - content = FunctionResultContent( + content = Content.from_function_result( call_id="test-call", result={"timestamp": datetime(2025, 11, 16, 10, 30, 0)}, ) diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index 95225cb4a3..0fa2bfd952 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -11,9 +11,9 @@ BaseAgent, ChatMessage, ChatMessageStore, + Content, Role, SequentialBuilder, - TextContent, WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, @@ -49,7 +49,7 @@ async def run_stream( # type: ignore[override] **kwargs: Any, ) -> AsyncIterable[AgentResponseUpdate]: self.call_count += 1 - yield AgentResponseUpdate(contents=[TextContent(text=f"Response #{self.call_count}: {self.name}")]) + yield AgentResponseUpdate(contents=[Content.from_text(text=f"Response #{self.call_count}: {self.name}")]) async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index ecf8b3d635..ac861d34b2 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -19,12 +19,9 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - FunctionApprovalRequestContent, - FunctionCallContent, - FunctionResultContent, + Content, RequestInfoEvent, Role, - TextContent, WorkflowBuilder, WorkflowContext, WorkflowOutputEvent, @@ -60,14 +57,14 @@ async def run_stream( """Simulate streaming with tool calls and results.""" # First update: some text yield AgentResponseUpdate( - contents=[TextContent(text="Let me search for that...")], + contents=[Content.from_text(text="Let me search for that...")], role=Role.ASSISTANT, ) # Second update: tool call (no text!) yield AgentResponseUpdate( contents=[ - FunctionCallContent( + Content.from_function_call( call_id="call_123", name="search", arguments={"query": "weather"}, @@ -79,7 +76,7 @@ async def run_stream( # Third update: tool result (no text!) yield AgentResponseUpdate( contents=[ - FunctionResultContent( + Content.from_function_result( call_id="call_123", result={"temperature": 72, "condition": "sunny"}, ) @@ -89,7 +86,7 @@ async def run_stream( # Fourth update: final text response yield AgentResponseUpdate( - contents=[TextContent(text="The weather is sunny, 72°F.")], + contents=[Content.from_text(text="The weather is sunny, 72°F.")], role=Role.ASSISTANT, ) @@ -113,25 +110,25 @@ async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: # First event: text update assert events[0].data is not None - assert isinstance(events[0].data.contents[0], TextContent) + assert events[0].data.contents[0].type == "text" assert "Let me search" in events[0].data.contents[0].text # Second event: function call assert events[1].data is not None - assert isinstance(events[1].data.contents[0], FunctionCallContent) + assert events[1].data.contents[0].type == "function_call" func_call = events[1].data.contents[0] assert func_call.call_id == "call_123" assert func_call.name == "search" # Third event: function result assert events[2].data is not None - assert isinstance(events[2].data.contents[0], FunctionResultContent) + assert events[2].data.contents[0].type == "function_result" func_result = events[2].data.contents[0] assert func_result.call_id == "call_123" # Fourth event: final text assert events[3].data is not None - assert isinstance(events[3].data.contents[0], TextContent) + assert events[3].data.contents[0].type == "text" assert "sunny" in events[3].data.contents[0].text @@ -161,10 +158,10 @@ async def get_response( messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' ), - FunctionCallContent( + Content.from_function_call( call_id="2", name="mock_tool_requiring_approval", arguments='{"query": "test"}' ), ], @@ -175,7 +172,7 @@ async def get_response( messages=ChatMessage( role="assistant", contents=[ - FunctionCallContent( + Content.from_function_call( call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' ) ], @@ -196,10 +193,10 @@ async def get_streaming_response( if self._parallel_request: yield ChatResponseUpdate( contents=[ - FunctionCallContent( + Content.from_function_call( call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' ), - FunctionCallContent( + Content.from_function_call( call_id="2", name="mock_tool_requiring_approval", arguments='{"query": "test"}' ), ], @@ -208,15 +205,15 @@ async def get_streaming_response( else: yield ChatResponseUpdate( contents=[ - FunctionCallContent( + Content.from_function_call( call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' ) ], role="assistant", ) else: - yield ChatResponseUpdate(text=TextContent(text="Tool executed "), role="assistant") - yield ChatResponseUpdate(contents=[TextContent(text="successfully.")], role="assistant") + yield ChatResponseUpdate(text=Content.from_text(text="Tool executed "), role="assistant") + yield ChatResponseUpdate(contents=[Content.from_text(text="successfully.")], role="assistant") self._iteration += 1 @@ -243,12 +240,14 @@ async def test_agent_executor_tool_call_with_approval() -> None: # Assert assert len(events.get_request_info_events()) == 1 approval_request = events.get_request_info_events()[0] - assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.type == "function_approval_request" assert approval_request.data.function_call.name == "mock_tool_requiring_approval" assert approval_request.data.function_call.arguments == '{"query": "test"}' # Act - events = await workflow.send_responses({approval_request.request_id: approval_request.data.create_response(True)}) + events = await workflow.send_responses({ + approval_request.request_id: approval_request.data.to_function_approval_response(True) + }) # Assert final_response = events.get_outputs() @@ -276,14 +275,14 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: # Assert assert len(request_info_events) == 1 approval_request = request_info_events[0] - assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.type == "function_approval_request" assert approval_request.data.function_call.name == "mock_tool_requiring_approval" assert approval_request.data.function_call.arguments == '{"query": "test"}' # Act output: str | None = None async for event in workflow.send_responses_streaming({ - approval_request.request_id: approval_request.data.create_response(True) + approval_request.request_id: approval_request.data.to_function_approval_response(True) }): if isinstance(event, WorkflowOutputEvent): output = event.data @@ -310,13 +309,13 @@ async def test_agent_executor_parallel_tool_call_with_approval() -> None: # Assert assert len(events.get_request_info_events()) == 2 for approval_request in events.get_request_info_events(): - assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.type == "function_approval_request" assert approval_request.data.function_call.name == "mock_tool_requiring_approval" assert approval_request.data.function_call.arguments == '{"query": "test"}' # Act responses = { - approval_request.request_id: approval_request.data.create_response(True) # type: ignore + approval_request.request_id: approval_request.data.to_function_approval_response(True) # type: ignore for approval_request in events.get_request_info_events() } events = await workflow.send_responses(responses) @@ -347,13 +346,13 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No # Assert assert len(request_info_events) == 2 for approval_request in request_info_events: - assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.type == "function_approval_request" assert approval_request.data.function_call.name == "mock_tool_requiring_approval" assert approval_request.data.function_call.arguments == '{"query": "test"}' # Act responses = { - approval_request.request_id: approval_request.data.create_response(True) # type: ignore + approval_request.request_id: approval_request.data.to_function_approval_response(True) # type: ignore for approval_request in request_info_events } diff --git a/python/packages/core/tests/workflow/test_checkpoint_decode.py b/python/packages/core/tests/workflow/test_checkpoint_decode.py index b126eafacf..431c70cc3c 100644 --- a/python/packages/core/tests/workflow/test_checkpoint_decode.py +++ b/python/packages/core/tests/workflow/test_checkpoint_decode.py @@ -3,7 +3,10 @@ from dataclasses import dataclass # noqa: I001 from typing import Any, cast + from agent_framework._workflows._checkpoint_encoding import ( + DATACLASS_MARKER, + MODEL_MARKER, decode_checkpoint_value, encode_checkpoint_value, ) @@ -126,3 +129,110 @@ def test_encode_decode_nested_structures() -> None: assert response.data == "first response" assert isinstance(response.original_request, SampleRequest) assert response.original_request.request_id == "req-1" + + +def test_encode_allows_marker_key_without_value_key() -> None: + """Test that encoding a dict with only the marker key (no 'value') is allowed.""" + dict_with_marker_only = { + MODEL_MARKER: "some.module:FakeClass", + "other_key": "test", + } + encoded = encode_checkpoint_value(dict_with_marker_only) + assert MODEL_MARKER in encoded + assert "other_key" in encoded + + +def test_encode_allows_value_key_without_marker_key() -> None: + """Test that encoding a dict with only 'value' key (no marker) is allowed.""" + dict_with_value_only = { + "value": {"data": "test"}, + "other_key": "test", + } + encoded = encode_checkpoint_value(dict_with_value_only) + assert "value" in encoded + assert "other_key" in encoded + + +def test_encode_allows_marker_with_value_key() -> None: + """Test that encoding a dict with marker and 'value' keys is allowed. + + This is allowed because legitimate encoded data may contain these keys, + and security is enforced at deserialization time by validating class types. + """ + dict_with_both = { + MODEL_MARKER: "some.module:SomeClass", + "value": {"data": "test"}, + "strategy": "to_dict", + } + encoded = encode_checkpoint_value(dict_with_both) + assert MODEL_MARKER in encoded + assert "value" in encoded + + +class NotADataclass: + """A regular class that is not a dataclass.""" + + def __init__(self, value: str) -> None: + self.value = value + + def get_value(self) -> str: + return self.value + + +class NotAModel: + """A regular class that does not support the model protocol.""" + + def __init__(self, value: str) -> None: + self.value = value + + def get_value(self) -> str: + return self.value + + +def test_decode_rejects_non_dataclass_with_dataclass_marker() -> None: + """Test that decode returns raw value when marked class is not a dataclass.""" + # Manually construct a payload that claims NotADataclass is a dataclass + fake_payload = { + DATACLASS_MARKER: f"{NotADataclass.__module__}:{NotADataclass.__name__}", + "value": {"value": "test_value"}, + } + + decoded = decode_checkpoint_value(fake_payload) + + # Should return the raw decoded value, not an instance of NotADataclass + assert isinstance(decoded, dict) + assert decoded["value"] == "test_value" + + +def test_decode_rejects_non_model_with_model_marker() -> None: + """Test that decode returns raw value when marked class doesn't support model protocol.""" + # Manually construct a payload that claims NotAModel supports the model protocol + fake_payload = { + MODEL_MARKER: f"{NotAModel.__module__}:{NotAModel.__name__}", + "strategy": "to_dict", + "value": {"value": "test_value"}, + } + + decoded = decode_checkpoint_value(fake_payload) + + # Should return the raw decoded value, not an instance of NotAModel + assert isinstance(decoded, dict) + assert decoded["value"] == "test_value" + + +def test_encode_allows_nested_dict_with_marker_keys() -> None: + """Test that encoding allows nested dicts containing marker patterns. + + Security is enforced at deserialization time, not serialization time, + so legitimate encoded data can contain markers at any nesting level. + """ + nested_data = { + "outer": { + MODEL_MARKER: "some.module:SomeClass", + "value": {"data": "test"}, + } + } + + encoded = encode_checkpoint_value(nested_data) + assert "outer" in encoded + assert MODEL_MARKER in encoded["outer"] diff --git a/python/packages/core/tests/workflow/test_checkpoint_encode.py b/python/packages/core/tests/workflow/test_checkpoint_encode.py new file mode 100644 index 0000000000..3f4db1f864 --- /dev/null +++ b/python/packages/core/tests/workflow/test_checkpoint_encode.py @@ -0,0 +1,423 @@ +# Copyright (c) Microsoft. All rights reserved. + +from dataclasses import dataclass +from typing import Any + +from agent_framework._workflows._checkpoint_encoding import ( + _CYCLE_SENTINEL, + DATACLASS_MARKER, + MODEL_MARKER, + encode_checkpoint_value, +) + + +@dataclass +class SimpleDataclass: + """A simple dataclass for testing encoding.""" + + name: str + value: int + + +@dataclass +class NestedDataclass: + """A dataclass with nested dataclass field.""" + + outer_name: str + inner: SimpleDataclass + + +class ModelWithToDict: + """A class that implements to_dict/from_dict protocol.""" + + def __init__(self, data: str) -> None: + self.data = data + + def to_dict(self) -> dict[str, Any]: + return {"data": self.data} + + @classmethod + def from_dict(cls, d: dict[str, Any]) -> "ModelWithToDict": + return cls(data=d["data"]) + + +class ModelWithToJson: + """A class that implements to_json/from_json protocol.""" + + def __init__(self, data: str) -> None: + self.data = data + + def to_json(self) -> str: + return f'{{"data": "{self.data}"}}' + + @classmethod + def from_json(cls, json_str: str) -> "ModelWithToJson": + import json + + d = json.loads(json_str) + return cls(data=d["data"]) + + +class UnknownObject: + """A class that doesn't support any serialization protocol.""" + + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return f"UnknownObject({self.value})" + + +# --- Tests for primitive encoding --- + + +def test_encode_string() -> None: + """Test encoding a string value.""" + result = encode_checkpoint_value("hello") + assert result == "hello" + + +def test_encode_integer() -> None: + """Test encoding an integer value.""" + result = encode_checkpoint_value(42) + assert result == 42 + + +def test_encode_float() -> None: + """Test encoding a float value.""" + result = encode_checkpoint_value(3.14) + assert result == 3.14 + + +def test_encode_boolean_true() -> None: + """Test encoding a True boolean value.""" + result = encode_checkpoint_value(True) + assert result is True + + +def test_encode_boolean_false() -> None: + """Test encoding a False boolean value.""" + result = encode_checkpoint_value(False) + assert result is False + + +def test_encode_none() -> None: + """Test encoding a None value.""" + result = encode_checkpoint_value(None) + assert result is None + + +# --- Tests for collection encoding --- + + +def test_encode_empty_dict() -> None: + """Test encoding an empty dictionary.""" + result = encode_checkpoint_value({}) + assert result == {} + + +def test_encode_simple_dict() -> None: + """Test encoding a simple dictionary with primitive values.""" + data = {"name": "test", "count": 5, "active": True} + result = encode_checkpoint_value(data) + assert result == {"name": "test", "count": 5, "active": True} + + +def test_encode_dict_with_non_string_keys() -> None: + """Test encoding a dictionary with non-string keys (converted to strings).""" + data = {1: "one", 2: "two"} + result = encode_checkpoint_value(data) + assert result == {"1": "one", "2": "two"} + + +def test_encode_empty_list() -> None: + """Test encoding an empty list.""" + result = encode_checkpoint_value([]) + assert result == [] + + +def test_encode_simple_list() -> None: + """Test encoding a simple list with primitive values.""" + data = [1, 2, 3, "four"] + result = encode_checkpoint_value(data) + assert result == [1, 2, 3, "four"] + + +def test_encode_tuple() -> None: + """Test encoding a tuple (converted to list).""" + data = (1, 2, 3) + result = encode_checkpoint_value(data) + assert result == [1, 2, 3] + + +def test_encode_set() -> None: + """Test encoding a set (converted to list).""" + data = {1, 2, 3} + result = encode_checkpoint_value(data) + assert isinstance(result, list) + assert sorted(result) == [1, 2, 3] + + +def test_encode_nested_dict() -> None: + """Test encoding a nested dictionary structure.""" + data = { + "outer": { + "inner": { + "value": 42, + } + } + } + result = encode_checkpoint_value(data) + assert result == {"outer": {"inner": {"value": 42}}} + + +def test_encode_list_of_dicts() -> None: + """Test encoding a list containing dictionaries.""" + data = [{"a": 1}, {"b": 2}] + result = encode_checkpoint_value(data) + assert result == [{"a": 1}, {"b": 2}] + + +# --- Tests for dataclass encoding --- + + +def test_encode_simple_dataclass() -> None: + """Test encoding a simple dataclass.""" + obj = SimpleDataclass(name="test", value=42) + result = encode_checkpoint_value(obj) + + assert isinstance(result, dict) + assert DATACLASS_MARKER in result + assert "value" in result + assert result["value"] == {"name": "test", "value": 42} + + +def test_encode_nested_dataclass() -> None: + """Test encoding a dataclass with nested dataclass fields.""" + inner = SimpleDataclass(name="inner", value=10) + outer = NestedDataclass(outer_name="outer", inner=inner) + result = encode_checkpoint_value(outer) + + assert isinstance(result, dict) + assert DATACLASS_MARKER in result + assert "value" in result + + outer_value = result["value"] + assert outer_value["outer_name"] == "outer" + assert DATACLASS_MARKER in outer_value["inner"] + + +def test_encode_list_of_dataclasses() -> None: + """Test encoding a list containing dataclass instances.""" + data = [ + SimpleDataclass(name="first", value=1), + SimpleDataclass(name="second", value=2), + ] + result = encode_checkpoint_value(data) + + assert isinstance(result, list) + assert len(result) == 2 + for item in result: + assert DATACLASS_MARKER in item + + +def test_encode_dict_with_dataclass_values() -> None: + """Test encoding a dictionary with dataclass values.""" + data = { + "item1": SimpleDataclass(name="first", value=1), + "item2": SimpleDataclass(name="second", value=2), + } + result = encode_checkpoint_value(data) + + assert isinstance(result, dict) + assert DATACLASS_MARKER in result["item1"] + assert DATACLASS_MARKER in result["item2"] + + +# --- Tests for model protocol encoding --- + + +def test_encode_model_with_to_dict() -> None: + """Test encoding an object implementing to_dict/from_dict protocol.""" + obj = ModelWithToDict(data="test_data") + result = encode_checkpoint_value(obj) + + assert isinstance(result, dict) + assert MODEL_MARKER in result + assert result["strategy"] == "to_dict" + assert result["value"] == {"data": "test_data"} + + +def test_encode_model_with_to_json() -> None: + """Test encoding an object implementing to_json/from_json protocol.""" + obj = ModelWithToJson(data="test_data") + result = encode_checkpoint_value(obj) + + assert isinstance(result, dict) + assert MODEL_MARKER in result + assert result["strategy"] == "to_json" + assert '"data": "test_data"' in result["value"] + + +# --- Tests for unknown object encoding --- + + +def test_encode_unknown_object_fallback_to_string() -> None: + """Test that unknown objects are encoded as strings.""" + obj = UnknownObject(value="test") + result = encode_checkpoint_value(obj) + + assert isinstance(result, str) + assert "UnknownObject" in result + + +# --- Tests for cycle detection --- + + +def test_encode_dict_with_self_reference() -> None: + """Test that dict self-references are detected and handled.""" + data: dict[str, Any] = {"name": "test"} + data["self"] = data # Create circular reference + + result = encode_checkpoint_value(data) + assert result["name"] == "test" + assert result["self"] == _CYCLE_SENTINEL + + +def test_encode_list_with_self_reference() -> None: + """Test that list self-references are detected and handled.""" + data: list[Any] = [1, 2] + data.append(data) # Create circular reference + + result = encode_checkpoint_value(data) + assert result[0] == 1 + assert result[1] == 2 + assert result[2] == _CYCLE_SENTINEL + + +# --- Tests for reserved keyword handling --- +# Note: Security is enforced at deserialization time by validating class types, +# not at serialization time. This allows legitimate encoded data to be re-encoded. + + +def test_encode_allows_dict_with_model_marker_and_value() -> None: + """Test that encoding a dict with MODEL_MARKER and 'value' is allowed. + + Security is enforced at deserialization time, not serialization time. + """ + data = { + MODEL_MARKER: "some.module:SomeClass", + "value": {"data": "test"}, + } + result = encode_checkpoint_value(data) + assert MODEL_MARKER in result + assert "value" in result + + +def test_encode_allows_dict_with_dataclass_marker_and_value() -> None: + """Test that encoding a dict with DATACLASS_MARKER and 'value' is allowed. + + Security is enforced at deserialization time, not serialization time. + """ + data = { + DATACLASS_MARKER: "some.module:SomeClass", + "value": {"field": "test"}, + } + result = encode_checkpoint_value(data) + assert DATACLASS_MARKER in result + assert "value" in result + + +def test_encode_allows_nested_dict_with_marker_keys() -> None: + """Test that encoding nested dict with marker keys is allowed. + + Security is enforced at deserialization time, not serialization time. + """ + nested_data = { + "outer": { + MODEL_MARKER: "some.module:SomeClass", + "value": {"data": "test"}, + } + } + result = encode_checkpoint_value(nested_data) + assert "outer" in result + assert MODEL_MARKER in result["outer"] + + +def test_encode_allows_marker_without_value() -> None: + """Test that a dict with marker key but without 'value' key is allowed.""" + data = { + MODEL_MARKER: "some.module:SomeClass", + "other_key": "allowed", + } + result = encode_checkpoint_value(data) + assert MODEL_MARKER in result + assert result["other_key"] == "allowed" + + +def test_encode_allows_value_without_marker() -> None: + """Test that a dict with 'value' key but without marker is allowed.""" + data = { + "value": {"nested": "data"}, + "other_key": "allowed", + } + result = encode_checkpoint_value(data) + assert "value" in result + assert result["other_key"] == "allowed" + + +# --- Tests for max depth protection --- + + +def test_encode_deep_nesting_triggers_max_depth() -> None: + """Test that very deep nesting triggers max depth protection.""" + # Create a deeply nested structure (over 100 levels) + data: dict[str, Any] = {"level": 0} + current = data + for i in range(105): + current["nested"] = {"level": i + 1} + current = current["nested"] + + result = encode_checkpoint_value(data) + + # Navigate to find the max_depth sentinel + current_result = result + found_max_depth = False + for _ in range(110): + if isinstance(current_result, dict) and "nested" in current_result: + current_result = current_result["nested"] + if current_result == "": + found_max_depth = True + break + else: + break + + assert found_max_depth, "Expected sentinel to be found in deeply nested structure" + + +# --- Tests for mixed complex structures --- + + +def test_encode_complex_mixed_structure() -> None: + """Test encoding a complex structure with mixed types.""" + data = { + "string_value": "hello", + "int_value": 42, + "float_value": 3.14, + "bool_value": True, + "none_value": None, + "list_value": [1, 2, 3], + "nested_dict": {"a": 1, "b": 2}, + "dataclass_value": SimpleDataclass(name="test", value=100), + } + + result = encode_checkpoint_value(data) + + assert result["string_value"] == "hello" + assert result["int_value"] == 42 + assert result["float_value"] == 3.14 + assert result["bool_value"] is True + assert result["none_value"] is None + assert result["list_value"] == [1, 2, 3] + assert result["nested_dict"] == {"a": 1, "b": 2} + assert DATACLASS_MARKER in result["dataclass_value"] diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index b1a3194468..9a8f4bd9c9 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -14,10 +14,10 @@ AgentThread, BaseAgent, ChatMessage, + Content, Executor, Role, SequentialBuilder, - TextContent, WorkflowBuilder, WorkflowContext, WorkflowRunState, @@ -50,7 +50,7 @@ async def run_stream( # type: ignore[override] **kwargs: Any, ) -> AsyncIterable[AgentResponseUpdate]: # This agent does not support streaming; yield a single complete response - yield AgentResponseUpdate(contents=[TextContent(text=self._reply_text)]) + yield AgentResponseUpdate(contents=[Content.from_text(text=self._reply_text)]) class _CaptureFullConversation(Executor): @@ -136,7 +136,7 @@ async def run_stream( # type: ignore[override] elif isinstance(m, str): norm.append(ChatMessage(role=Role.USER, text=m)) self._last_messages = norm - yield AgentResponseUpdate(contents=[TextContent(text=self._reply_text)]) + yield AgentResponseUpdate(contents=[Content.from_text(text=self._reply_text)]) async def test_sequential_adapter_uses_full_conversation() -> None: diff --git a/python/packages/core/tests/workflow/test_group_chat.py b/python/packages/core/tests/workflow/test_group_chat.py index c65f19d599..330a887a30 100644 --- a/python/packages/core/tests/workflow/test_group_chat.py +++ b/python/packages/core/tests/workflow/test_group_chat.py @@ -17,6 +17,7 @@ ChatMessage, ChatResponse, ChatResponseUpdate, + Content, GroupChatBuilder, GroupChatState, MagenticContext, @@ -25,7 +26,6 @@ MagenticProgressLedgerItem, RequestInfoEvent, Role, - TextContent, WorkflowOutputEvent, WorkflowRunState, WorkflowStatusEvent, @@ -57,7 +57,7 @@ def run_stream( # type: ignore[override] ) -> AsyncIterable[AgentResponseUpdate]: async def _stream() -> AsyncIterable[AgentResponseUpdate]: yield AgentResponseUpdate( - contents=[TextContent(text=self._reply_text)], role=Role.ASSISTANT, author_name=self.name + contents=[Content.from_text(text=self._reply_text)], role=Role.ASSISTANT, author_name=self.name ) return _stream() @@ -141,7 +141,7 @@ def run_stream( async def _stream_initial() -> AsyncIterable[AgentResponseUpdate]: yield AgentResponseUpdate( contents=[ - TextContent( + Content.from_text( text=( '{"terminate": false, "reason": "Selecting agent", ' '"next_speaker": "agent", "final_message": null}' @@ -157,7 +157,7 @@ async def _stream_initial() -> AsyncIterable[AgentResponseUpdate]: async def _stream_final() -> AsyncIterable[AgentResponseUpdate]: yield AgentResponseUpdate( contents=[ - TextContent( + Content.from_text( text=( '{"terminate": true, "reason": "Task complete", ' '"next_speaker": null, "final_message": "agent manager final"}' diff --git a/python/packages/core/tests/workflow/test_handoff.py b/python/packages/core/tests/workflow/test_handoff.py index 268f89d513..83531139c7 100644 --- a/python/packages/core/tests/workflow/test_handoff.py +++ b/python/packages/core/tests/workflow/test_handoff.py @@ -11,12 +11,11 @@ ChatMessage, ChatResponse, ChatResponseUpdate, - FunctionCallContent, + Content, HandoffAgentUserRequest, HandoffBuilder, RequestInfoEvent, Role, - TextContent, WorkflowEvent, WorkflowOutputEvent, resolve_agent_id, @@ -74,14 +73,16 @@ def _build_reply_contents( agent_name: str, handoff_to: str | None, call_id: str | None, -) -> list[TextContent | FunctionCallContent]: - contents: list[TextContent | FunctionCallContent] = [] +) -> list[Content]: + contents: list[Content] = [] if handoff_to and call_id: contents.append( - FunctionCallContent(call_id=call_id, name=f"handoff_to_{handoff_to}", arguments={"handoff_to": handoff_to}) + Content.from_function_call( + call_id=call_id, name=f"handoff_to_{handoff_to}", arguments={"handoff_to": handoff_to} + ) ) text = f"{agent_name} reply" - contents.append(TextContent(text=text)) + contents.append(Content.from_text(text=text)) return contents diff --git a/python/packages/core/tests/workflow/test_magentic.py b/python/packages/core/tests/workflow/test_magentic.py index 7e4a5bb48e..24c830968f 100644 --- a/python/packages/core/tests/workflow/test_magentic.py +++ b/python/packages/core/tests/workflow/test_magentic.py @@ -15,6 +15,7 @@ AgentThread, BaseAgent, ChatMessage, + Content, Executor, GroupChatRequestMessage, MagenticBuilder, @@ -28,7 +29,6 @@ RequestInfoEvent, Role, StandardMagenticManager, - TextContent, Workflow, WorkflowCheckpoint, WorkflowCheckpointException, @@ -172,7 +172,7 @@ def run_stream( # type: ignore[override] ) -> AsyncIterable[AgentResponseUpdate]: async def _stream() -> AsyncIterable[AgentResponseUpdate]: yield AgentResponseUpdate( - contents=[TextContent(text=self._reply_text)], role=Role.ASSISTANT, author_name=self.name + contents=[Content.from_text(text=self._reply_text)], role=Role.ASSISTANT, author_name=self.name ) return _stream() @@ -541,7 +541,7 @@ def __init__(self, name: str | None = None) -> None: async def run_stream(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] yield AgentResponseUpdate( - contents=[TextContent(text="thread-ok")], + contents=[Content.from_text(text="thread-ok")], author_name=self.name, role=Role.ASSISTANT, ) @@ -563,7 +563,7 @@ def __init__(self) -> None: async def run_stream(self, messages=None, *, thread=None, **kwargs): # type: ignore[override] yield AgentResponseUpdate( - contents=[TextContent(text="assistants-ok")], + contents=[Content.from_text(text="assistants-ok")], author_name=self.name, role=Role.ASSISTANT, ) diff --git a/python/packages/core/tests/workflow/test_sequential.py b/python/packages/core/tests/workflow/test_sequential.py index d104eb8a02..a685db73db 100644 --- a/python/packages/core/tests/workflow/test_sequential.py +++ b/python/packages/core/tests/workflow/test_sequential.py @@ -12,10 +12,10 @@ AgentThread, BaseAgent, ChatMessage, + Content, Executor, Role, SequentialBuilder, - TextContent, TypeCompatibilityError, WorkflowContext, WorkflowOutputEvent, @@ -46,7 +46,7 @@ async def run_stream( # type: ignore[override] **kwargs: Any, ) -> AsyncIterable[AgentResponseUpdate]: # Minimal async generator with one assistant update - yield AgentResponseUpdate(contents=[TextContent(text=f"{self.name} reply")]) + yield AgentResponseUpdate(contents=[Content.from_text(text=f"{self.name} reply")]) class _SummarizerExec(Executor): diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 5a0f54a24e..6b08b7b22a 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -18,12 +18,12 @@ AgentThread, BaseAgent, ChatMessage, + Content, Executor, FileCheckpointStorage, Message, RequestInfoEvent, Role, - TextContent, WorkflowBuilder, WorkflowCheckpointException, WorkflowContext, @@ -881,7 +881,7 @@ async def run_stream( """Streaming run - yields incremental updates.""" # Simulate streaming by yielding character by character for char in self._reply_text: - yield AgentResponseUpdate(contents=[TextContent(text=char)]) + yield AgentResponseUpdate(contents=[Content.from_text(text=char)]) async def test_agent_streaming_vs_non_streaming() -> None: diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index 7e47a82c9c..9514efdf74 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -14,16 +14,9 @@ AgentThread, ChatMessage, ChatMessageStore, - DataContent, + Content, Executor, - FunctionApprovalRequestContent, - FunctionApprovalResponseContent, - FunctionCallContent, - FunctionResultContent, Role, - TextContent, - UriContent, - UsageContent, UsageDetails, WorkflowAgent, WorkflowBuilder, @@ -44,17 +37,15 @@ def __init__(self, id: str, response_text: str, emit_streaming: bool = False): @handler async def handle_message(self, message: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: - input_text = ( - message[0].contents[0].text if message and isinstance(message[0].contents[0], TextContent) else "no input" - ) + input_text = message[0].contents[0].text if message and message[0].contents[0].type == "text" else "no input" response_text = f"{self.response_text}: {input_text}" # Create response message for both streaming and non-streaming cases - response_message = ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text=response_text)]) + response_message = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=response_text)]) # Emit update event. streaming_update = AgentResponseUpdate( - contents=[TextContent(text=response_text)], role=Role.ASSISTANT, message_id=str(uuid.uuid4()) + contents=[Content.from_text(text=response_text)], role=Role.ASSISTANT, message_id=str(uuid.uuid4()) ) await ctx.add_event(AgentRunUpdateEvent(executor_id=self.id, data=streaming_update)) @@ -76,7 +67,7 @@ async def handle_request_response( ) -> None: # Handle the response and emit completion response update = AgentResponseUpdate( - contents=[TextContent(text="Request completed successfully")], + contents=[Content.from_text(text="Request completed successfully")], role=Role.ASSISTANT, message_id=str(uuid.uuid4()), ) @@ -99,10 +90,10 @@ async def handle_message(self, messages: list[ChatMessage], ctx: WorkflowContext message_count = len(messages) response_text = f"Received {message_count} messages" - response_message = ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text=response_text)]) + response_message = ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text=response_text)]) streaming_update = AgentResponseUpdate( - contents=[TextContent(text=response_text)], role=Role.ASSISTANT, message_id=str(uuid.uuid4()) + contents=[Content.from_text(text=response_text)], role=Role.ASSISTANT, message_id=str(uuid.uuid4()) ) await ctx.add_event(AgentRunUpdateEvent(executor_id=self.id, data=streaming_update)) await ctx.send_message([response_message]) @@ -134,7 +125,7 @@ async def test_end_to_end_basic_workflow(self): for message in result.messages: first_content = message.contents[0] - if isinstance(first_content, TextContent): + if first_content.type == "text": text = first_content.text if text.startswith("Step1:"): step1_messages.append(message) @@ -172,11 +163,11 @@ async def test_end_to_end_basic_workflow_streaming(self): # Verify we got a streaming update assert updates[0].contents is not None - first_content: TextContent = updates[0].contents[0] # type: ignore[assignment] - second_content: TextContent = updates[1].contents[0] # type: ignore[assignment] - assert isinstance(first_content, TextContent) + first_content: Content = updates[0].contents[0] # type: ignore[assignment] + second_content: Content = updates[1].contents[0] # type: ignore[assignment] + assert first_content.type == "text" assert "Streaming1: Test input" in first_content.text - assert isinstance(second_content, TextContent) + assert second_content.type == "text" assert "Streaming2: Streaming1: Test input" in second_content.text async def test_end_to_end_request_info_handling(self): @@ -200,17 +191,15 @@ async def test_end_to_end_request_info_handling(self): approval_update: AgentResponseUpdate | None = None for update in updates: - if any(isinstance(content, FunctionApprovalRequestContent) for content in update.contents): + if any(content.type == "function_approval_request" for content in update.contents): approval_update = update break assert approval_update is not None, "Should have received a request_info approval request" - function_call = next( - content for content in approval_update.contents if isinstance(content, FunctionCallContent) - ) + function_call = next(content for content in approval_update.contents if content.type == "function_call") approval_request = next( - content for content in approval_update.contents if isinstance(content, FunctionApprovalRequestContent) + content for content in approval_update.contents if content.type == "function_approval_request" ) # Verify the function call has expected structure @@ -233,10 +222,10 @@ async def test_end_to_end_request_info_handling(self): data="User provided answer", ).to_dict() - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( approved=True, id=approval_request.id, - function_call=FunctionCallContent( + function_call=Content.from_function_call( call_id=function_call.call_id, name=function_call.name, arguments=response_args, @@ -306,7 +295,7 @@ async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext) - workflow = WorkflowBuilder().set_start_executor(yielding_executor).build() # Run directly - should return WorkflowOutputEvent in result - direct_result = await workflow.run([ChatMessage(role=Role.USER, contents=[TextContent(text="hello")])]) + direct_result = await workflow.run([ChatMessage(role=Role.USER, contents=[Content.from_text(text="hello")])]) direct_outputs = direct_result.get_outputs() assert len(direct_outputs) == 1 assert direct_outputs[0] == "processed: hello" @@ -340,14 +329,14 @@ async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext) - assert "second output" in texts async def test_workflow_as_agent_yield_output_with_content_types(self) -> None: - """Test that yield_output preserves different content types (TextContent, DataContent, etc.).""" + """Test that yield_output preserves different content types (Content, Content, etc.).""" @executor async def content_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> None: # Yield different content types - await ctx.yield_output(TextContent(text="text content")) - await ctx.yield_output(DataContent(data=b"binary data", media_type="application/octet-stream")) - await ctx.yield_output(UriContent(uri="https://example.com/image.png", media_type="image/png")) + await ctx.yield_output(Content.from_text(text="text content")) + await ctx.yield_output(Content.from_data(data=b"binary data", media_type="application/octet-stream")) + await ctx.yield_output(Content.from_uri(uri="https://example.com/image.png", media_type="image/png")) workflow = WorkflowBuilder().set_start_executor(content_yielding_executor).build() agent = workflow.as_agent("content-test-agent") @@ -358,13 +347,13 @@ async def content_yielding_executor(messages: list[ChatMessage], ctx: WorkflowCo assert len(result.messages) == 3 # Verify each content type is preserved - assert isinstance(result.messages[0].contents[0], TextContent) + assert result.messages[0].contents[0].type == "text" assert result.messages[0].contents[0].text == "text content" - assert isinstance(result.messages[1].contents[0], DataContent) + assert result.messages[1].contents[0].type == "data" assert result.messages[1].contents[0].media_type == "application/octet-stream" - assert isinstance(result.messages[2].contents[0], UriContent) + assert result.messages[2].contents[0].type == "uri" assert result.messages[2].contents[0].uri == "https://example.com/image.png" async def test_workflow_as_agent_yield_output_with_chat_message(self) -> None: @@ -374,7 +363,7 @@ async def test_workflow_as_agent_yield_output_with_chat_message(self) -> None: async def chat_message_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> None: msg = ChatMessage( role=Role.ASSISTANT, - contents=[TextContent(text="response text")], + contents=[Content.from_text(text="response text")], author_name="custom-author", ) await ctx.yield_output(msg) @@ -404,7 +393,7 @@ def __str__(self) -> str: async def raw_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> None: # Yield different types of data await ctx.yield_output("simple string") - await ctx.yield_output(TextContent(text="text content")) + await ctx.yield_output(Content.from_text(text="text content")) custom = CustomData(42) await ctx.yield_output(custom) @@ -420,7 +409,7 @@ async def raw_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContex # Verify raw_representation is set for each update assert updates[0].raw_representation == "simple string" - assert isinstance(updates[1].raw_representation, TextContent) + assert updates[1].raw_representation.type == "text" assert updates[1].raw_representation.text == "text content" assert isinstance(updates[2].raw_representation, CustomData) assert updates[2].raw_representation.value == 42 @@ -428,19 +417,19 @@ async def raw_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContex async def test_workflow_as_agent_yield_output_with_list_of_chat_messages(self) -> None: """Test that yield_output with list[ChatMessage] extracts contents from all messages. - Note: TextContent items are coalesced by _finalize_response, so multiple text contents - become a single merged TextContent in the final response. + Note: Content items are coalesced by _finalize_response, so multiple text contents + become a single merged Content in the final response. """ @executor async def list_yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext) -> None: # Yield a list of ChatMessages (as SequentialBuilder does) msg_list = [ - ChatMessage(role=Role.USER, contents=[TextContent(text="first message")]), - ChatMessage(role=Role.ASSISTANT, contents=[TextContent(text="second message")]), + ChatMessage(role=Role.USER, contents=[Content.from_text(text="first message")]), + ChatMessage(role=Role.ASSISTANT, contents=[Content.from_text(text="second message")]), ChatMessage( role=Role.ASSISTANT, - contents=[TextContent(text="third"), TextContent(text="fourth")], + contents=[Content.from_text(text="third"), Content.from_text(text="fourth")], ), ] await ctx.yield_output(msg_list) @@ -455,7 +444,7 @@ async def list_yielding_executor(messages: list[ChatMessage], ctx: WorkflowConte assert len(updates) == 1 assert len(updates[0].contents) == 4 - texts = [c.text for c in updates[0].contents if isinstance(c, TextContent)] + texts = [c.text for c in updates[0].contents if c.type == "text"] assert texts == ["first message", "second message", "third", "fourth"] # Verify run() coalesces text contents (expected behavior) @@ -463,7 +452,7 @@ async def list_yielding_executor(messages: list[ChatMessage], ctx: WorkflowConte assert isinstance(result, AgentResponse) assert len(result.messages) == 1 - # TextContent items are coalesced into one + # Content items are coalesced into one assert len(result.messages[0].contents) == 1 assert result.messages[0].text == "first messagesecond messagethirdfourth" @@ -599,7 +588,7 @@ async def run_stream( ) -> AsyncIterable[AgentResponseUpdate]: for word in self._response_text.split(): yield AgentResponseUpdate( - contents=[TextContent(text=word + " ")], + contents=[Content.from_text(text=word + " ")], role=Role.ASSISTANT, author_name=self._name, ) @@ -672,7 +661,7 @@ async def run_stream( self, messages: Any, *, thread: AgentThread | None = None, **kwargs: Any ) -> AsyncIterable[AgentResponseUpdate]: yield AgentResponseUpdate( - contents=[TextContent(text=self._response_text)], + contents=[Content.from_text(text=self._response_text)], role=Role.ASSISTANT, author_name=self._name, ) @@ -738,7 +727,7 @@ class AuthorNameExecutor(Executor): async def handle_message(self, message: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]]) -> None: # Emit update with explicit author_name update = AgentResponseUpdate( - contents=[TextContent(text="Response with author")], + contents=[Content.from_text(text="Response with author")], role=Role.ASSISTANT, author_name="custom_author_name", # Explicitly set message_id=str(uuid.uuid4()), @@ -790,7 +779,7 @@ def test_merge_updates_ordering_by_response_and_message_id(self): updates = [ # Response B, Message 2 (latest in resp B) AgentResponseUpdate( - contents=[TextContent(text="RespB-Msg2")], + contents=[Content.from_text(text="RespB-Msg2")], role=Role.ASSISTANT, response_id="resp-b", message_id="msg-2", @@ -798,7 +787,7 @@ def test_merge_updates_ordering_by_response_and_message_id(self): ), # Response A, Message 1 (earliest overall) AgentResponseUpdate( - contents=[TextContent(text="RespA-Msg1")], + contents=[Content.from_text(text="RespA-Msg1")], role=Role.ASSISTANT, response_id="resp-a", message_id="msg-1", @@ -806,7 +795,7 @@ def test_merge_updates_ordering_by_response_and_message_id(self): ), # Response B, Message 1 (earlier in resp B) AgentResponseUpdate( - contents=[TextContent(text="RespB-Msg1")], + contents=[Content.from_text(text="RespB-Msg1")], role=Role.ASSISTANT, response_id="resp-b", message_id="msg-1", @@ -814,7 +803,7 @@ def test_merge_updates_ordering_by_response_and_message_id(self): ), # Response A, Message 2 (later in resp A) AgentResponseUpdate( - contents=[TextContent(text="RespA-Msg2")], + contents=[Content.from_text(text="RespA-Msg2")], role=Role.ASSISTANT, response_id="resp-a", message_id="msg-2", @@ -822,7 +811,7 @@ def test_merge_updates_ordering_by_response_and_message_id(self): ), # Global dangling update (no response_id) - should go at end AgentResponseUpdate( - contents=[TextContent(text="Global-Dangling")], + contents=[Content.from_text(text="Global-Dangling")], role=Role.ASSISTANT, response_id=None, message_id="msg-global", @@ -841,9 +830,7 @@ def test_merge_updates_ordering_by_response_and_message_id(self): # Verify ordering: responses are processed by response_id groups, # within each group messages are chronologically ordered, # global dangling goes at the end - message_texts = [ - msg.contents[0].text if isinstance(msg.contents[0], TextContent) else "" for msg in result.messages - ] + message_texts = [msg.contents[0].text if msg.contents[0].type == "text" else "" for msg in result.messages] # The exact order depends on dict iteration order for response_ids, # but within each response group, chronological order should be maintained @@ -894,9 +881,9 @@ def test_merge_updates_metadata_aggregation(self): updates = [ AgentResponseUpdate( contents=[ - TextContent(text="First"), - UsageContent( - details=UsageDetails(input_token_count=10, output_token_count=5, total_token_count=15) + Content.from_text(text="First"), + Content.from_usage( + usage_details={"input_token_count": 10, "output_token_count": 5, "total_token_count": 15} ), ], role=Role.ASSISTANT, @@ -907,9 +894,9 @@ def test_merge_updates_metadata_aggregation(self): ), AgentResponseUpdate( contents=[ - TextContent(text="Second"), - UsageContent( - details=UsageDetails(input_token_count=20, output_token_count=8, total_token_count=28) + Content.from_text(text="Second"), + Content.from_usage( + usage_details={"input_token_count": 20, "output_token_count": 8, "total_token_count": 28} ), ], role=Role.ASSISTANT, @@ -920,8 +907,10 @@ def test_merge_updates_metadata_aggregation(self): ), AgentResponseUpdate( contents=[ - TextContent(text="Third"), - UsageContent(details=UsageDetails(input_token_count=5, output_token_count=3, total_token_count=8)), + Content.from_text(text="Third"), + Content.from_usage( + usage_details={"input_token_count": 5, "output_token_count": 3, "total_token_count": 8} + ), ], role=Role.ASSISTANT, response_id="resp-1", # Same response_id as first @@ -985,7 +974,7 @@ def test_merge_updates_function_result_ordering_github_2977(self): updates = [ # User question AgentResponseUpdate( - contents=[TextContent(text="What is the weather?")], + contents=[Content.from_text(text="What is the weather?")], role=Role.USER, response_id="resp-1", message_id="msg-1", @@ -993,7 +982,9 @@ def test_merge_updates_function_result_ordering_github_2977(self): ), # Assistant with function call AgentResponseUpdate( - contents=[FunctionCallContent(call_id=call_id, name="get_weather", arguments='{"location": "NYC"}')], + contents=[ + Content.from_function_call(call_id=call_id, name="get_weather", arguments='{"location": "NYC"}') + ], role=Role.ASSISTANT, response_id="resp-1", message_id="msg-2", @@ -1002,7 +993,7 @@ def test_merge_updates_function_result_ordering_github_2977(self): # Function result: no response_id previously caused this to go to global_dangling # and be placed at the end (the bug); fix now correctly associates via call_id AgentResponseUpdate( - contents=[FunctionResultContent(call_id=call_id, result="Sunny, 72F")], + contents=[Content.from_function_result(call_id=call_id, result="Sunny, 72F")], role=Role.TOOL, response_id=None, message_id="msg-3", @@ -1010,7 +1001,7 @@ def test_merge_updates_function_result_ordering_github_2977(self): ), # Final assistant answer AgentResponseUpdate( - contents=[TextContent(text="The weather in NYC is sunny and 72F.")], + contents=[Content.from_text(text="The weather in NYC is sunny and 72F.")], role=Role.ASSISTANT, response_id="resp-1", message_id="msg-4", @@ -1026,11 +1017,11 @@ def test_merge_updates_function_result_ordering_github_2977(self): content_sequence = [] for msg in result.messages: for content in msg.contents: - if isinstance(content, TextContent): + if content.type == "text": content_sequence.append(("text", msg.role)) - elif isinstance(content, FunctionCallContent): + elif content.type == "function_call": content_sequence.append(("function_call", msg.role)) - elif isinstance(content, FunctionResultContent): + elif content.type == "function_result": content_sequence.append(("function_result", msg.role)) # Verify correct ordering: user -> function_call -> function_result -> assistant_answer @@ -1051,10 +1042,10 @@ def test_merge_updates_function_result_ordering_github_2977(self): function_result_idx = None for i, msg in enumerate(result.messages): for content in msg.contents: - if isinstance(content, FunctionCallContent): + if content.type == "function_call": function_call_idx = i assert content.call_id == call_id - elif isinstance(content, FunctionResultContent): + elif content.type == "function_result": function_result_idx = i assert content.call_id == call_id @@ -1081,7 +1072,7 @@ def test_merge_updates_multiple_function_results_ordering_github_2977(self): updates = [ # User question AgentResponseUpdate( - contents=[TextContent(text="What's the weather and time?")], + contents=[Content.from_text(text="What's the weather and time?")], role=Role.USER, response_id="resp-1", message_id="msg-1", @@ -1089,7 +1080,9 @@ def test_merge_updates_multiple_function_results_ordering_github_2977(self): ), # Assistant with first function call AgentResponseUpdate( - contents=[FunctionCallContent(call_id=call_id_1, name="get_weather", arguments='{"location": "NYC"}')], + contents=[ + Content.from_function_call(call_id=call_id_1, name="get_weather", arguments='{"location": "NYC"}') + ], role=Role.ASSISTANT, response_id="resp-1", message_id="msg-2", @@ -1097,7 +1090,9 @@ def test_merge_updates_multiple_function_results_ordering_github_2977(self): ), # Assistant with second function call AgentResponseUpdate( - contents=[FunctionCallContent(call_id=call_id_2, name="get_time", arguments='{"timezone": "EST"}')], + contents=[ + Content.from_function_call(call_id=call_id_2, name="get_time", arguments='{"timezone": "EST"}') + ], role=Role.ASSISTANT, response_id="resp-1", message_id="msg-3", @@ -1105,7 +1100,7 @@ def test_merge_updates_multiple_function_results_ordering_github_2977(self): ), # Second function result arrives first (no response_id) AgentResponseUpdate( - contents=[FunctionResultContent(call_id=call_id_2, result="3:00 PM EST")], + contents=[Content.from_function_result(call_id=call_id_2, result="3:00 PM EST")], role=Role.TOOL, response_id=None, message_id="msg-4", @@ -1113,7 +1108,7 @@ def test_merge_updates_multiple_function_results_ordering_github_2977(self): ), # First function result arrives second (no response_id) AgentResponseUpdate( - contents=[FunctionResultContent(call_id=call_id_1, result="Sunny, 72F")], + contents=[Content.from_function_result(call_id=call_id_1, result="Sunny, 72F")], role=Role.TOOL, response_id=None, message_id="msg-5", @@ -1121,7 +1116,7 @@ def test_merge_updates_multiple_function_results_ordering_github_2977(self): ), # Final assistant answer AgentResponseUpdate( - contents=[TextContent(text="It's sunny (72F) and 3 PM in NYC.")], + contents=[Content.from_text(text="It's sunny (72F) and 3 PM in NYC.")], role=Role.ASSISTANT, response_id="resp-1", message_id="msg-6", @@ -1137,11 +1132,11 @@ def test_merge_updates_multiple_function_results_ordering_github_2977(self): content_sequence = [] for msg in result.messages: for content in msg.contents: - if isinstance(content, TextContent): + if content.type == "text": content_sequence.append(("text", None)) - elif isinstance(content, FunctionCallContent): + elif content.type == "function_call": content_sequence.append(("function_call", content.call_id)) - elif isinstance(content, FunctionResultContent): + elif content.type == "function_result": content_sequence.append(("function_result", content.call_id)) # Verify all function results appear before the final assistant text @@ -1172,7 +1167,7 @@ def test_merge_updates_function_result_no_matching_call(self): """ updates = [ AgentResponseUpdate( - contents=[TextContent(text="Hello")], + contents=[Content.from_text(text="Hello")], role=Role.USER, response_id="resp-1", message_id="msg-1", @@ -1180,14 +1175,14 @@ def test_merge_updates_function_result_no_matching_call(self): ), # Function result with no matching call AgentResponseUpdate( - contents=[FunctionResultContent(call_id="orphan_call_id", result="orphan result")], + contents=[Content.from_function_result(call_id="orphan_call_id", result="orphan result")], role=Role.TOOL, response_id=None, message_id="msg-2", created_at="2024-01-01T12:00:01Z", ), AgentResponseUpdate( - contents=[TextContent(text="Goodbye")], + contents=[Content.from_text(text="Goodbye")], role=Role.ASSISTANT, response_id="resp-1", message_id="msg-3", @@ -1203,9 +1198,9 @@ def test_merge_updates_function_result_no_matching_call(self): content_types = [] for msg in result.messages: for content in msg.contents: - if isinstance(content, TextContent): + if content.type == "text": content_types.append("text") - elif isinstance(content, FunctionResultContent): + elif content.type == "function_result": content_types.append("function_result") # Order: text (user), text (assistant), function_result (orphan at end) diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index 75c34f9d95..14ec9f43ec 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -12,12 +12,12 @@ BaseAgent, ChatMessage, ConcurrentBuilder, + Content, GroupChatBuilder, GroupChatState, HandoffBuilder, Role, SequentialBuilder, - TextContent, WorkflowRunState, WorkflowStatusEvent, ai_function, @@ -67,7 +67,7 @@ async def run_stream( **kwargs: Any, ) -> AsyncIterable[AgentResponseUpdate]: self.captured_kwargs.append(dict(kwargs)) - yield AgentResponseUpdate(contents=[TextContent(text=f"{self.name} response")]) + yield AgentResponseUpdate(contents=[Content.from_text(text=f"{self.name} response")]) # region Sequential Builder Tests diff --git a/python/packages/declarative/agent_framework_declarative/_loader.py b/python/packages/declarative/agent_framework_declarative/_loader.py index 30b6bab521..6181be5f62 100644 --- a/python/packages/declarative/agent_framework_declarative/_loader.py +++ b/python/packages/declarative/agent_framework_declarative/_loader.py @@ -2,19 +2,18 @@ from collections.abc import Callable, Mapping from pathlib import Path -from typing import Any, Literal, TypedDict +from typing import Any, Literal, TypedDict, cast import yaml from agent_framework import ( AIFunction, ChatAgent, ChatClientProtocol, + Content, HostedCodeInterpreterTool, - HostedFileContent, HostedFileSearchTool, HostedMCPSpecificApproval, HostedMCPTool, - HostedVectorStoreContent, HostedWebSearchTool, ToolProtocol, ) @@ -89,6 +88,11 @@ class ProviderTypeMapping(TypedDict, total=True): "name": "AzureAIClient", "model_id_field": "model_deployment_name", }, + "AzureAI.ProjectProvider": { + "package": "agent_framework.azure", + "name": "AzureAIProjectAgentProvider", + "model_id_field": "model", + }, "Anthropic.Chat": { "package": "agent_framework.anthropic", "name": "AnthropicChatClient", @@ -448,6 +452,175 @@ def create_agent_from_dict(self, agent_def: dict[str, Any]) -> ChatAgent: **chat_options, ) + async def create_agent_from_yaml_path_async(self, yaml_path: str | Path) -> ChatAgent: + """Async version: Create a ChatAgent from a YAML file path. + + Use this method when the provider requires async initialization, such as + AzureAI.ProjectProvider which creates agents on the Azure AI Agent Service. + + Args: + yaml_path: Path to the YAML file representation of a PromptAgent. + + Returns: + The ``ChatAgent`` instance created from the YAML file. + + Examples: + .. code-block:: python + + from agent_framework_declarative import AgentFactory + + factory = AgentFactory( + client_kwargs={"credential": credential}, + default_provider="AzureAI.ProjectProvider", + ) + agent = await factory.create_agent_from_yaml_path_async("agent.yaml") + """ + if not isinstance(yaml_path, Path): + yaml_path = Path(yaml_path) + if not yaml_path.exists(): + raise DeclarativeLoaderError(f"YAML file not found at path: {yaml_path}") + yaml_str = yaml_path.read_text() + return await self.create_agent_from_yaml_async(yaml_str) + + async def create_agent_from_yaml_async(self, yaml_str: str) -> ChatAgent: + """Async version: Create a ChatAgent from a YAML string. + + Use this method when the provider requires async initialization, such as + AzureAI.ProjectProvider which creates agents on the Azure AI Agent Service. + + Args: + yaml_str: YAML string representation of a PromptAgent. + + Returns: + The ``ChatAgent`` instance created from the YAML string. + + Examples: + .. code-block:: python + + from agent_framework_declarative import AgentFactory + + yaml_content = ''' + kind: Prompt + name: MyAgent + instructions: You are a helpful assistant. + model: + id: gpt-4o + provider: AzureAI.ProjectProvider + ''' + + factory = AgentFactory(client_kwargs={"credential": credential}) + agent = await factory.create_agent_from_yaml_async(yaml_content) + """ + return await self.create_agent_from_dict_async(yaml.safe_load(yaml_str)) + + async def create_agent_from_dict_async(self, agent_def: dict[str, Any]) -> ChatAgent: + """Async version: Create a ChatAgent from a dictionary definition. + + Use this method when the provider requires async initialization, such as + AzureAI.ProjectProvider which creates agents on the Azure AI Agent Service. + + Args: + agent_def: Dictionary representation of a PromptAgent. + + Returns: + The ``ChatAgent`` instance created from the dictionary. + + Examples: + .. code-block:: python + + from agent_framework_declarative import AgentFactory + + agent_def = { + "kind": "Prompt", + "name": "MyAgent", + "instructions": "You are a helpful assistant.", + "model": { + "id": "gpt-4o", + "provider": "AzureAI.ProjectProvider", + }, + } + + factory = AgentFactory(client_kwargs={"credential": credential}) + agent = await factory.create_agent_from_dict_async(agent_def) + """ + # Set safe_mode context before parsing YAML to control PowerFx environment variable access + _safe_mode_context.set(self.safe_mode) + prompt_agent = agent_schema_dispatch(agent_def) + if not isinstance(prompt_agent, PromptAgent): + raise DeclarativeLoaderError("Only definitions for a PromptAgent are supported for agent creation.") + + # Check if we're using a provider-based approach (like AzureAIProjectAgentProvider) + mapping = self._retrieve_provider_configuration(prompt_agent.model) if prompt_agent.model else None + if mapping and mapping["name"] == "AzureAIProjectAgentProvider": + return await self._create_agent_with_provider(prompt_agent, mapping) + + # Fall back to standard ChatClient approach + client = self._get_client(prompt_agent) + chat_options = self._parse_chat_options(prompt_agent.model) + if tools := self._parse_tools(prompt_agent.tools): + chat_options["tools"] = tools + if output_schema := prompt_agent.outputSchema: + chat_options["response_format"] = _create_model_from_json_schema("agent", output_schema.to_json_schema()) + return ChatAgent( + chat_client=client, + name=prompt_agent.name, + description=prompt_agent.description, + instructions=prompt_agent.instructions, + **chat_options, + ) + + async def _create_agent_with_provider(self, prompt_agent: PromptAgent, mapping: ProviderTypeMapping) -> ChatAgent: + """Create a ChatAgent using AzureAIProjectAgentProvider. + + This method handles the special case where we use a provider that creates + agents on a remote service (like Azure AI Agent Service) and returns + ChatAgent instances directly. + """ + # Import the provider class + module_name = mapping["package"] + class_name = mapping["name"] + module = __import__(module_name, fromlist=[class_name]) + provider_class = getattr(module, class_name) + + # Build provider kwargs from client_kwargs and connection info + provider_kwargs: dict[str, Any] = {} + provider_kwargs.update(self.client_kwargs) + + # Handle connection settings for the model + if prompt_agent.model and prompt_agent.model.connection: + match prompt_agent.model.connection: + case RemoteConnection() | AnonymousConnection(): + if prompt_agent.model.connection.endpoint: + provider_kwargs["project_endpoint"] = prompt_agent.model.connection.endpoint + case ApiKeyConnection(): + if prompt_agent.model.connection.endpoint: + provider_kwargs["project_endpoint"] = prompt_agent.model.connection.endpoint + + # Create the provider and use it to create the agent + provider = provider_class(**provider_kwargs) + + # Parse tools + tools = self._parse_tools(prompt_agent.tools) if prompt_agent.tools else None + + # Parse response format + response_format = None + if prompt_agent.outputSchema: + response_format = _create_model_from_json_schema("agent", prompt_agent.outputSchema.to_json_schema()) + + # Create the agent using the provider + # The provider's create_agent returns a ChatAgent directly + return cast( + ChatAgent, + await provider.create_agent( + name=prompt_agent.name, + model=prompt_agent.model.id if prompt_agent.model else None, + instructions=prompt_agent.instructions, + description=prompt_agent.description, + tools=tools, + response_format=response_format, + ), + ) + def _get_client(self, prompt_agent: PromptAgent) -> ChatClientProtocol: """Create the ChatClientProtocol instance based on the PromptAgent model.""" if not prompt_agent.model: @@ -565,14 +738,14 @@ def _parse_tool(self, tool_resource: Tool) -> ToolProtocol: if tool_resource.filters: add_props["filters"] = tool_resource.filters return HostedFileSearchTool( - inputs=[HostedVectorStoreContent(id) for id in tool_resource.vectorStoreIds or []], + inputs=[Content.from_hosted_vector_store(id) for id in tool_resource.vectorStoreIds or []], description=tool_resource.description, max_results=tool_resource.maximumResultCount, additional_properties=add_props, ) case CodeInterpreterTool(): return HostedCodeInterpreterTool( - inputs=[HostedFileContent(file_id=file) for file in tool_resource.fileIds or []], + inputs=[Content.from_hosted_file(file_id=file) for file in tool_resource.fileIds or []], description=tool_resource.description, ) case McpTool(): @@ -594,12 +767,46 @@ def _parse_tool(self, tool_resource: Tool) -> ToolProtocol: ) if not approval_mode: approval_mode = None + + # Handle connection settings + headers: dict[str, str] | None = None + additional_properties: dict[str, Any] | None = None + + if tool_resource.connection is not None: + match tool_resource.connection: + case ApiKeyConnection(): + if tool_resource.connection.apiKey: + headers = {"Authorization": f"Bearer {tool_resource.connection.apiKey}"} + case RemoteConnection(): + additional_properties = { + "connection": { + "kind": tool_resource.connection.kind, + "name": tool_resource.connection.name, + "authenticationMode": tool_resource.connection.authenticationMode, + "endpoint": tool_resource.connection.endpoint, + } + } + case ReferenceConnection(): + additional_properties = { + "connection": { + "kind": tool_resource.connection.kind, + "name": tool_resource.connection.name, + "authenticationMode": tool_resource.connection.authenticationMode, + } + } + case AnonymousConnection(): + pass + case _: + raise ValueError(f"Unsupported connection kind: {tool_resource.connection.kind}") + return HostedMCPTool( name=tool_resource.name, # type: ignore description=tool_resource.description, url=tool_resource.url, # type: ignore allowed_tools=tool_resource.allowedTools, approval_mode=approval_mode, + headers=headers, + additional_properties=additional_properties, ) case _: raise ValueError(f"Unsupported tool kind: {tool_resource.kind}") diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py b/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py index 669f662a9b..18685ef401 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_executors_agents.py @@ -21,8 +21,7 @@ from agent_framework import ( ChatMessage, - FunctionCallContent, - FunctionResultContent, + Content, WorkflowContext, handler, response_handler, @@ -191,7 +190,7 @@ def _validate_conversation_history(messages: list[ChatMessage], agent_name: str) if not hasattr(msg, "contents") or msg.contents is None: continue for content in msg.contents: - if isinstance(content, FunctionCallContent) and content.call_id: + if content.type == "function_call" and content.call_id: tool_call_ids.add(content.call_id) logger.debug( "Agent '%s': Found tool call '%s' (id=%s) in message %d", @@ -200,7 +199,7 @@ def _validate_conversation_history(messages: list[ChatMessage], agent_name: str) content.call_id, i, ) - elif isinstance(content, FunctionResultContent) and content.call_id: + elif content.type == "function_result" and content.call_id: tool_result_ids.add(content.call_id) logger.debug( "Agent '%s': Found tool result for call_id=%s in message %d", @@ -265,7 +264,7 @@ class AgentResult: response: str agent_name: str messages: list[ChatMessage] = field(default_factory=lambda: cast(list[ChatMessage], [])) - tool_calls: list[FunctionCallContent] = field(default_factory=lambda: cast(list[FunctionCallContent], [])) + tool_calls: list[Content] = field(default_factory=lambda: cast(list[Content], [])) error: str | None = None @@ -311,7 +310,7 @@ async def on_request(request: AgentExternalInputRequest) -> ExternalInputRespons agent_response: str iteration: int = 0 messages: list[ChatMessage] = field(default_factory=lambda: cast(list[ChatMessage], [])) - function_calls: list[FunctionCallContent] = field(default_factory=lambda: cast(list[FunctionCallContent], [])) + function_calls: list[Content] = field(default_factory=lambda: cast(list[Content], [])) @dataclass @@ -342,9 +341,7 @@ class AgentExternalInputResponse: user_input: str messages: list[ChatMessage] = field(default_factory=lambda: cast(list[ChatMessage], [])) - function_results: dict[str, FunctionResultContent] = field( - default_factory=lambda: cast(dict[str, FunctionResultContent], {}) - ) + function_results: dict[str, Content] = field(default_factory=lambda: cast(dict[str, Content], {})) @dataclass @@ -641,7 +638,7 @@ async def _invoke_agent_and_store_results( """ accumulated_response = "" all_messages: list[ChatMessage] = [] - tool_calls: list[FunctionCallContent] = [] + tool_calls: list[Content] = [] # Add user input to conversation history first (via state.append only) if input_text: @@ -679,7 +676,7 @@ async def _invoke_agent_and_store_results( all_messages = list(cast(list[ChatMessage], result_messages)) result_tool_calls: Any = getattr(result, "tool_calls", None) if result_tool_calls is not None: - tool_calls = list(cast(list[FunctionCallContent], result_tool_calls)) + tool_calls = list(cast(list[Content], result_tool_calls)) else: raise RuntimeError(f"Agent '{agent_name}' has no run or run_stream method") diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py b/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py index 812f256828..1e8dab9f30 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_factory.py @@ -72,7 +72,7 @@ class WorkflowFactory: # Pre-register agents for InvokeAzureAgent actions chat_client = AzureOpenAIChatClient() - agent = chat_client.create_agent(name="MyAgent", instructions="You are helpful.") + agent = chat_client.as_agent(name="MyAgent", instructions="You are helpful.") factory = WorkflowFactory(agents={"MyAgent": agent}) workflow = factory.create_workflow_from_yaml_path("workflow.yaml") @@ -115,8 +115,8 @@ def __init__( # With pre-registered agents client = AzureOpenAIChatClient() agents = { - "WriterAgent": client.create_agent(name="Writer", instructions="Write content."), - "ReviewerAgent": client.create_agent(name="Reviewer", instructions="Review content."), + "WriterAgent": client.as_agent(name="Writer", instructions="Write content."), + "ReviewerAgent": client.as_agent(name="Reviewer", instructions="Review content."), } factory = WorkflowFactory(agents=agents) @@ -533,14 +533,14 @@ def register_agent(self, name: str, agent: AgentProtocol | AgentExecutor) -> "Wo WorkflowFactory() .register_agent( "Writer", - client.create_agent( + client.as_agent( name="Writer", instructions="Write content.", ), ) .register_agent( "Reviewer", - client.create_agent( + client.as_agent( name="Reviewer", instructions="Review content.", ), diff --git a/python/packages/declarative/pyproject.toml b/python/packages/declarative/pyproject.toml index 052d4a60ac..f18e211461 100644 --- a/python/packages/declarative/pyproject.toml +++ b/python/packages/declarative/pyproject.toml @@ -4,7 +4,7 @@ description = "Declarative specification support for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260114" +version = "1.0.0b260116" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/declarative/tests/test_declarative_loader.py b/python/packages/declarative/tests/test_declarative_loader.py index b0afb7d5cc..2d31a66d58 100644 --- a/python/packages/declarative/tests/test_declarative_loader.py +++ b/python/packages/declarative/tests/test_declarative_loader.py @@ -692,3 +692,230 @@ def test_agent_factory_safe_mode_false_resolves_api_key(self, monkeypatch): assert result.model.connection.apiKey == "secret-key-123" finally: _safe_mode_context.reset(token) + + +class TestAgentFactoryMcpToolConnection: + """Tests for MCP tool connection handling in AgentFactory._parse_tool.""" + + def _get_mcp_tools(self, agent): + """Helper to get MCP tools from agent's default_options.""" + from agent_framework import HostedMCPTool + + tools = agent.default_options.get("tools", []) + return [t for t in tools if isinstance(t, HostedMCPTool)] + + def test_mcp_tool_with_api_key_connection_sets_headers(self): + """Test that MCP tool with ApiKeyConnection sets headers correctly.""" + from unittest.mock import MagicMock + + from agent_framework_declarative import AgentFactory + + yaml_content = """ +kind: Prompt +name: TestAgent +instructions: Test agent +tools: + - kind: mcp + name: my-mcp-tool + url: https://api.example.com/mcp + connection: + kind: key + apiKey: my-secret-api-key +""" + + mock_client = MagicMock() + mock_client.create_agent.return_value = MagicMock() + + factory = AgentFactory(chat_client=mock_client) + agent = factory.create_agent_from_yaml(yaml_content) + + # Find the MCP tool in the agent's tools + mcp_tools = self._get_mcp_tools(agent) + assert len(mcp_tools) == 1 + mcp_tool = mcp_tools[0] + + # Verify headers are set with the API key + assert mcp_tool.headers is not None + assert mcp_tool.headers == {"Authorization": "Bearer my-secret-api-key"} + + def test_mcp_tool_with_remote_connection_sets_additional_properties(self): + """Test that MCP tool with RemoteConnection sets additional_properties correctly.""" + from unittest.mock import MagicMock + + from agent_framework_declarative import AgentFactory + + yaml_content = """ +kind: Prompt +name: TestAgent +instructions: Test agent +tools: + - kind: mcp + name: github-mcp + url: https://api.githubcopilot.com/mcp + connection: + kind: remote + authenticationMode: oauth + name: github-mcp-oauth-connection +""" + + mock_client = MagicMock() + mock_client.create_agent.return_value = MagicMock() + + factory = AgentFactory(chat_client=mock_client) + agent = factory.create_agent_from_yaml(yaml_content) + + # Find the MCP tool in the agent's tools + mcp_tools = self._get_mcp_tools(agent) + assert len(mcp_tools) == 1 + mcp_tool = mcp_tools[0] + + # Verify additional_properties are set with connection info + assert mcp_tool.additional_properties is not None + assert "connection" in mcp_tool.additional_properties + conn = mcp_tool.additional_properties["connection"] + assert conn["kind"] == "remote" + assert conn["authenticationMode"] == "oauth" + assert conn["name"] == "github-mcp-oauth-connection" + + def test_mcp_tool_with_reference_connection_sets_additional_properties(self): + """Test that MCP tool with ReferenceConnection sets additional_properties correctly.""" + from unittest.mock import MagicMock + + from agent_framework_declarative import AgentFactory + + yaml_content = """ +kind: Prompt +name: TestAgent +instructions: Test agent +tools: + - kind: mcp + name: ref-mcp-tool + url: https://api.example.com/mcp + connection: + kind: reference + name: my-connection-ref + target: /connections/my-connection +""" + + mock_client = MagicMock() + mock_client.create_agent.return_value = MagicMock() + + factory = AgentFactory(chat_client=mock_client) + agent = factory.create_agent_from_yaml(yaml_content) + + # Find the MCP tool in the agent's tools + mcp_tools = self._get_mcp_tools(agent) + assert len(mcp_tools) == 1 + mcp_tool = mcp_tools[0] + + # Verify additional_properties are set with connection info + assert mcp_tool.additional_properties is not None + assert "connection" in mcp_tool.additional_properties + conn = mcp_tool.additional_properties["connection"] + assert conn["kind"] == "reference" + assert conn["name"] == "my-connection-ref" + + def test_mcp_tool_with_anonymous_connection_no_headers_or_properties(self): + """Test that MCP tool with AnonymousConnection doesn't set headers or additional_properties.""" + from unittest.mock import MagicMock + + from agent_framework_declarative import AgentFactory + + yaml_content = """ +kind: Prompt +name: TestAgent +instructions: Test agent +tools: + - kind: mcp + name: anon-mcp-tool + url: https://api.example.com/mcp + connection: + kind: anonymous +""" + + mock_client = MagicMock() + mock_client.create_agent.return_value = MagicMock() + + factory = AgentFactory(chat_client=mock_client) + agent = factory.create_agent_from_yaml(yaml_content) + + # Find the MCP tool in the agent's tools + mcp_tools = self._get_mcp_tools(agent) + assert len(mcp_tools) == 1 + mcp_tool = mcp_tools[0] + + # Verify no headers or additional_properties are set + assert mcp_tool.headers is None + assert mcp_tool.additional_properties is None + + def test_mcp_tool_without_connection_preserves_existing_behavior(self): + """Test that MCP tool without connection works as before (no headers or additional_properties).""" + from unittest.mock import MagicMock + + from agent_framework_declarative import AgentFactory + + yaml_content = """ +kind: Prompt +name: TestAgent +instructions: Test agent +tools: + - kind: mcp + name: simple-mcp-tool + url: https://api.example.com/mcp + approvalMode: never +""" + + mock_client = MagicMock() + mock_client.create_agent.return_value = MagicMock() + + factory = AgentFactory(chat_client=mock_client) + agent = factory.create_agent_from_yaml(yaml_content) + + # Find the MCP tool in the agent's tools + mcp_tools = self._get_mcp_tools(agent) + assert len(mcp_tools) == 1 + mcp_tool = mcp_tools[0] + + # Verify tool is created correctly without connection + assert mcp_tool.name == "simple-mcp-tool" + assert str(mcp_tool.url) == "https://api.example.com/mcp" + assert mcp_tool.approval_mode == "never_require" + assert mcp_tool.headers is None + assert mcp_tool.additional_properties is None + + def test_mcp_tool_with_remote_connection_with_endpoint(self): + """Test that MCP tool with RemoteConnection including endpoint sets it in additional_properties.""" + from unittest.mock import MagicMock + + from agent_framework_declarative import AgentFactory + + yaml_content = """ +kind: Prompt +name: TestAgent +instructions: Test agent +tools: + - kind: mcp + name: endpoint-mcp-tool + url: https://api.example.com/mcp + connection: + kind: remote + authenticationMode: oauth + name: my-oauth-connection + endpoint: https://auth.example.com +""" + + mock_client = MagicMock() + mock_client.create_agent.return_value = MagicMock() + + factory = AgentFactory(chat_client=mock_client) + agent = factory.create_agent_from_yaml(yaml_content) + + # Find the MCP tool in the agent's tools + mcp_tools = self._get_mcp_tools(agent) + assert len(mcp_tools) == 1 + mcp_tool = mcp_tools[0] + + # Verify additional_properties include endpoint + assert mcp_tool.additional_properties is not None + conn = mcp_tool.additional_properties["connection"] + assert conn["endpoint"] == "https://auth.example.com" diff --git a/python/packages/devui/agent_framework_devui/_conversations.py b/python/packages/devui/agent_framework_devui/_conversations.py index 86db2172e1..868ca3e162 100644 --- a/python/packages/devui/agent_framework_devui/_conversations.py +++ b/python/packages/devui/agent_framework_devui/_conversations.py @@ -321,7 +321,7 @@ async def add_items(self, conversation_id: str, items: list[dict[str, Any]]) -> # Convert ChatMessage contents to OpenAI TextContent format message_content = [] for content_item in msg.contents: - if hasattr(content_item, "type") and content_item.type == "text": + if content_item.type == "text": # Extract text from TextContent object text_value = getattr(content_item, "text", "") message_content.append(TextContent(type="text", text=text_value)) diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index 585036bef9..cf4fa0066f 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -7,7 +7,7 @@ from collections.abc import AsyncGenerator from typing import Any -from agent_framework import AgentProtocol +from agent_framework import AgentProtocol, Content from agent_framework._workflows._events import RequestInfoEvent from ._conversations import ConversationStore, InMemoryConversationStore @@ -602,7 +602,7 @@ def _convert_input_to_chat_message(self, input_data: Any) -> Any: """ # Import Agent Framework types try: - from agent_framework import ChatMessage, DataContent, Role, TextContent + from agent_framework import ChatMessage, Role except ImportError: # Fallback to string extraction if Agent Framework not available return self._extract_user_message_fallback(input_data) @@ -613,14 +613,12 @@ def _convert_input_to_chat_message(self, input_data: Any) -> Any: # Handle OpenAI ResponseInputParam (List[ResponseInputItemParam]) if isinstance(input_data, list): - return self._convert_openai_input_to_chat_message(input_data, ChatMessage, TextContent, DataContent, Role) + return self._convert_openai_input_to_chat_message(input_data, ChatMessage, Role) # Fallback for other formats return self._extract_user_message_fallback(input_data) - def _convert_openai_input_to_chat_message( - self, input_items: list[Any], ChatMessage: Any, TextContent: Any, DataContent: Any, Role: Any - ) -> Any: + def _convert_openai_input_to_chat_message(self, input_items: list[Any], ChatMessage: Any, Role: Any) -> Any: """Convert OpenAI ResponseInputParam to Agent Framework ChatMessage. Processes text, images, files, and other content types from OpenAI format @@ -629,14 +627,12 @@ def _convert_openai_input_to_chat_message( Args: input_items: List of OpenAI ResponseInputItemParam objects (dicts or objects) ChatMessage: ChatMessage class for creating chat messages - TextContent: TextContent class for text content - DataContent: DataContent class for data/media content Role: Role enum for message roles Returns: ChatMessage with converted content """ - contents = [] + contents: list[Content] = [] # Process each input item for item in input_items: @@ -649,7 +645,7 @@ def _convert_openai_input_to_chat_message( # Handle both string content and list content if isinstance(message_content, str): - contents.append(TextContent(text=message_content)) + contents.append(Content.from_text(text=message_content)) elif isinstance(message_content, list): for content_item in message_content: # Handle dict content items @@ -658,7 +654,7 @@ def _convert_openai_input_to_chat_message( if content_type == "input_text": text = content_item.get("text", "") - contents.append(TextContent(text=text)) + contents.append(Content.from_text(text=text)) elif content_type == "input_image": image_url = content_item.get("image_url", "") @@ -676,7 +672,7 @@ def _convert_openai_input_to_chat_message( media_type = "image/png" else: media_type = "image/png" - contents.append(DataContent(uri=image_url, media_type=media_type)) + contents.append(Content.from_uri(uri=image_url, media_type=media_type)) elif content_type == "input_file": # Handle file input @@ -710,7 +706,7 @@ def _convert_openai_input_to_chat_message( # Assume file_data is base64, create data URI data_uri = f"data:{media_type};base64,{file_data}" contents.append( - DataContent( + Content.from_uri( uri=data_uri, media_type=media_type, additional_properties=additional_props, @@ -718,7 +714,7 @@ def _convert_openai_input_to_chat_message( ) elif file_url: contents.append( - DataContent( + Content.from_uri( uri=file_url, media_type=media_type, additional_properties=additional_props, @@ -728,21 +724,19 @@ def _convert_openai_input_to_chat_message( elif content_type == "function_approval_response": # Handle function approval response (DevUI extension) try: - from agent_framework import FunctionApprovalResponseContent, FunctionCallContent - request_id = content_item.get("request_id", "") approved = content_item.get("approved", False) function_call_data = content_item.get("function_call", {}) # Create FunctionCallContent from the function_call data - function_call = FunctionCallContent( + function_call = Content.from_function_call( call_id=function_call_data.get("id", ""), name=function_call_data.get("name", ""), arguments=function_call_data.get("arguments", {}), ) # Create FunctionApprovalResponseContent with correct signature - approval_response = FunctionApprovalResponseContent( + approval_response = Content.from_function_approval_response( approved, # positional argument id=request_id, # keyword argument 'id', NOT 'request_id' function_call=function_call, # FunctionCallContent object @@ -764,7 +758,7 @@ def _convert_openai_input_to_chat_message( # If no contents found, create a simple text message if not contents: - contents.append(TextContent(text="")) + contents.append(Content.from_text(text="")) chat_message = ChatMessage(role=Role.USER, contents=contents) diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index 71bbda9b85..f11a6811ce 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -12,7 +12,7 @@ from typing import Any, Union from uuid import uuid4 -from agent_framework import ChatMessage, TextContent +from agent_framework import ChatMessage, Content from openai.types.responses import ( Response, ResponseContentPartAddedEvent, @@ -92,7 +92,7 @@ def _serialize_content_recursive(value: Any) -> Any: if isinstance(value, (list, tuple)): serialized = [_serialize_content_recursive(item) for item in value] # For single-item lists containing text Content, extract just the text - # This handles the MCP case where result = [TextContent(text="Hello")] + # This handles the MCP case where result = [Content.from_text(text="Hello")] # and we want output = "Hello" not output = '[{"type": "text", "text": "Hello"}]' if len(serialized) == 1 and isinstance(serialized[0], dict) and serialized[0].get("type") == "text": return serialized[0].get("text", "") @@ -127,18 +127,18 @@ def __init__(self, max_contexts: int = 1000) -> None: # Register content type mappers for all 12 Agent Framework content types self.content_mappers = { - "TextContent": self._map_text_content, - "TextReasoningContent": self._map_reasoning_content, - "FunctionCallContent": self._map_function_call_content, - "FunctionResultContent": self._map_function_result_content, - "ErrorContent": self._map_error_content, - "UsageContent": self._map_usage_content, - "DataContent": self._map_data_content, - "UriContent": self._map_uri_content, - "HostedFileContent": self._map_hosted_file_content, - "HostedVectorStoreContent": self._map_hosted_vector_store_content, - "FunctionApprovalRequestContent": self._map_approval_request_content, - "FunctionApprovalResponseContent": self._map_approval_response_content, + "text": self._map_text_content, + "text_reasoning": self._map_reasoning_content, + "function_call": self._map_function_call_content, + "function_result": self._map_function_result_content, + "error": self._map_error_content, + "usage": self._map_usage_content, + "data": self._map_data_content, + "uri": self._map_uri_content, + "hosted_file": self._map_hosted_file_content, + "hosted_vector_store": self._map_hosted_vector_store_content, + "function_approval_request": self._map_approval_request_content, + "function_approval_response": self._map_approval_response_content, } async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> Sequence[Any]: @@ -603,7 +603,7 @@ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> S return events # Check if we're streaming text content - has_text_content = any(isinstance(content, TextContent) for content in update.contents) + has_text_content = any(content.type == "text" for content in update.contents) # Check if we're in an executor context with an existing item executor_id = context.get("current_executor_id") @@ -647,10 +647,8 @@ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> S # Process each content item for content in update.contents: - content_type = content.__class__.__name__ - # Special handling for TextContent to use proper delta events - if content_type == "TextContent" and "current_message_id" in context: + if content.type == "text" and "current_message_id" in context: # Stream text content via proper delta events events.append( ResponseTextDeltaEvent( @@ -663,9 +661,9 @@ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> S sequence_number=self._next_sequence(context), ) ) - elif content_type in self.content_mappers: + elif content.type in self.content_mappers: # Use existing mappers for other content types - mapped_events = await self.content_mappers[content_type](content, context) + mapped_events = await self.content_mappers[content.type](content, context) if mapped_events is not None: # Handle None returns (e.g., UsageContent) if isinstance(mapped_events, list): events.extend(mapped_events) @@ -676,7 +674,7 @@ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> S events.append(await self._create_unknown_content_event(content, context)) # Don't increment content_index for text deltas within the same part - if content_type != "TextContent": + if content.type != "text": context["content_index"] = context.get("content_index", 0) + 1 except Exception as e: @@ -708,10 +706,8 @@ async def _convert_agent_response(self, response: Any, context: dict[str, Any]) for message in messages: if hasattr(message, "contents") and message.contents: for content in message.contents: - content_type = content.__class__.__name__ - - if content_type in self.content_mappers: - mapped_events = await self.content_mappers[content_type](content, context) + if content.type in self.content_mappers: + mapped_events = await self.content_mappers[content.type](content, context) if mapped_events is not None: # Handle None returns (e.g., UsageContent) if isinstance(mapped_events, list): events.extend(mapped_events) @@ -726,9 +722,7 @@ async def _convert_agent_response(self, response: Any, context: dict[str, Any]) # Add usage information if present usage_details = getattr(response, "usage_details", None) if usage_details: - from agent_framework import UsageContent - - usage_content = UsageContent(details=usage_details) + usage_content = Content.from_usage(usage_details=usage_details) await self._map_usage_content(usage_content, context) # Note: _map_usage_content returns None - it accumulates usage for final Response.usage @@ -1421,11 +1415,11 @@ async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> Non Returns: None - no event emitted (usage goes in final Response.usage) """ - # Extract usage from UsageContent.details (UsageDetails object) - details = getattr(content, "details", None) - total_tokens = getattr(details, "total_token_count", 0) or 0 - prompt_tokens = getattr(details, "input_token_count", 0) or 0 - completion_tokens = getattr(details, "output_token_count", 0) or 0 + # Extract usage from UsageContent.usage_details (UsageDetails object) + details = content.usage_details or {} + total_tokens = details.get("total_token_count", 0) + prompt_tokens = details.get("input_token_count", 0) + completion_tokens = details.get("output_token_count", 0) # Accumulate for final Response.usage request_id = context.get("request_id", "default") diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.css b/python/packages/devui/agent_framework_devui/ui/assets/index.css index 504bd04d6c..0c68fc8ab0 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.css +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.css @@ -1 +1 @@ -/*! tailwindcss v4.1.12 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-translate-x:0;--tw-translate-y:0;--tw-translate-z:0;--tw-scale-x:1;--tw-scale-y:1;--tw-scale-z:1;--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-border-style:solid;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial;--tw-duration:initial;--tw-ease:initial;--tw-animation-delay:0s;--tw-animation-direction:normal;--tw-animation-duration:initial;--tw-animation-fill-mode:none;--tw-animation-iteration-count:1;--tw-enter-blur:0;--tw-enter-opacity:1;--tw-enter-rotate:0;--tw-enter-scale:1;--tw-enter-translate-x:0;--tw-enter-translate-y:0;--tw-exit-blur:0;--tw-exit-opacity:1;--tw-exit-rotate:0;--tw-exit-scale:1;--tw-exit-translate-x:0;--tw-exit-translate-y:0}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-50:oklch(97.1% .013 17.38);--color-red-100:oklch(93.6% .032 17.717);--color-red-200:oklch(88.5% .062 18.334);--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-red-600:oklch(57.7% .245 27.325);--color-red-700:oklch(50.5% .213 27.518);--color-red-800:oklch(44.4% .177 26.899);--color-red-900:oklch(39.6% .141 25.723);--color-red-950:oklch(25.8% .092 26.042);--color-orange-50:oklch(98% .016 73.684);--color-orange-100:oklch(95.4% .038 75.164);--color-orange-200:oklch(90.1% .076 70.697);--color-orange-300:oklch(83.7% .128 66.29);--color-orange-400:oklch(75% .183 55.934);--color-orange-500:oklch(70.5% .213 47.604);--color-orange-600:oklch(64.6% .222 41.116);--color-orange-700:oklch(55.3% .195 38.402);--color-orange-800:oklch(47% .157 37.304);--color-orange-900:oklch(40.8% .123 38.172);--color-orange-950:oklch(26.6% .079 36.259);--color-amber-50:oklch(98.7% .022 95.277);--color-amber-100:oklch(96.2% .059 95.617);--color-amber-200:oklch(92.4% .12 95.746);--color-amber-300:oklch(87.9% .169 91.605);--color-amber-400:oklch(82.8% .189 84.429);--color-amber-500:oklch(76.9% .188 70.08);--color-amber-600:oklch(66.6% .179 58.318);--color-amber-700:oklch(55.5% .163 48.998);--color-amber-800:oklch(47.3% .137 46.201);--color-amber-900:oklch(41.4% .112 45.904);--color-amber-950:oklch(27.9% .077 45.635);--color-yellow-100:oklch(97.3% .071 103.193);--color-yellow-200:oklch(94.5% .129 101.54);--color-yellow-400:oklch(85.2% .199 91.936);--color-yellow-600:oklch(68.1% .162 75.834);--color-yellow-700:oklch(55.4% .135 66.442);--color-green-50:oklch(98.2% .018 155.826);--color-green-100:oklch(96.2% .044 156.743);--color-green-200:oklch(92.5% .084 155.995);--color-green-300:oklch(87.1% .15 154.449);--color-green-400:oklch(79.2% .209 151.711);--color-green-500:oklch(72.3% .219 149.579);--color-green-600:oklch(62.7% .194 149.214);--color-green-700:oklch(52.7% .154 150.069);--color-green-800:oklch(44.8% .119 151.328);--color-green-900:oklch(39.3% .095 152.535);--color-green-950:oklch(26.6% .065 152.934);--color-emerald-50:oklch(97.9% .021 166.113);--color-emerald-100:oklch(95% .052 163.051);--color-emerald-200:oklch(90.5% .093 164.15);--color-emerald-400:oklch(76.5% .177 163.223);--color-emerald-500:oklch(69.6% .17 162.48);--color-emerald-600:oklch(59.6% .145 163.225);--color-emerald-700:oklch(50.8% .118 165.612);--color-emerald-800:oklch(43.2% .095 166.913);--color-blue-50:oklch(97% .014 254.604);--color-blue-100:oklch(93.2% .032 255.585);--color-blue-200:oklch(88.2% .059 254.128);--color-blue-300:oklch(80.9% .105 251.813);--color-blue-400:oklch(70.7% .165 254.624);--color-blue-500:oklch(62.3% .214 259.815);--color-blue-600:oklch(54.6% .245 262.881);--color-blue-700:oklch(48.8% .243 264.376);--color-blue-800:oklch(42.4% .199 265.638);--color-blue-900:oklch(37.9% .146 265.522);--color-blue-950:oklch(28.2% .091 267.935);--color-purple-50:oklch(97.7% .014 308.299);--color-purple-100:oklch(94.6% .033 307.174);--color-purple-200:oklch(90.2% .063 306.703);--color-purple-400:oklch(71.4% .203 305.504);--color-purple-500:oklch(62.7% .265 303.9);--color-purple-600:oklch(55.8% .288 302.321);--color-purple-800:oklch(43.8% .218 303.724);--color-purple-900:oklch(38.1% .176 304.987);--color-gray-50:oklch(98.5% .002 247.839);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-400:oklch(70.7% .022 261.325);--color-gray-500:oklch(55.1% .027 264.364);--color-gray-600:oklch(44.6% .03 256.802);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-800:oklch(27.8% .033 256.848);--color-gray-900:oklch(21% .034 264.665);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-lg:32rem;--container-2xl:42rem;--container-3xl:48rem;--container-4xl:56rem;--container-5xl:64rem;--container-6xl:72rem;--container-7xl:80rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-wide:.025em;--tracking-wider:.05em;--tracking-widest:.1em;--leading-tight:1.25;--leading-relaxed:1.625;--drop-shadow-lg:0 4px 4px #00000026;--ease-out:cubic-bezier(0,0,.2,1);--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--animate-bounce:bounce 1s infinite;--blur-sm:8px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){*{outline-color:color-mix(in oklab,var(--ring)50%,transparent)}}body{background-color:var(--background);color:var(--foreground)}}@layer components;@layer utilities{.\@container\/card-header{container:card-header/inline-size}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.static{position:static}.inset-0{inset:calc(var(--spacing)*0)}.inset-2{inset:calc(var(--spacing)*2)}.inset-y-0{inset-block:calc(var(--spacing)*0)}.top-0{top:calc(var(--spacing)*0)}.top-1{top:calc(var(--spacing)*1)}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.top-\[30px\]{top:30px}.-right-2{right:calc(var(--spacing)*-2)}.right-0{right:calc(var(--spacing)*0)}.right-1{right:calc(var(--spacing)*1)}.right-2{right:calc(var(--spacing)*2)}.right-4{right:calc(var(--spacing)*4)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-24{bottom:calc(var(--spacing)*24)}.-left-2{left:calc(var(--spacing)*-2)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.left-\[18px\]{left:18px}.z-10{z-index:10}.z-20{z-index:20}.z-50{z-index:50}.col-start-2{grid-column-start:2}.row-span-2{grid-row:span 2/span 2}.row-start-1{grid-row-start:1}.container{width:100%}@media (min-width:40rem){.container{max-width:40rem}}@media (min-width:48rem){.container{max-width:48rem}}@media (min-width:64rem){.container{max-width:64rem}}@media (min-width:80rem){.container{max-width:80rem}}@media (min-width:96rem){.container{max-width:96rem}}.container\!{width:100%!important}@media (min-width:40rem){.container\!{max-width:40rem!important}}@media (min-width:48rem){.container\!{max-width:48rem!important}}@media (min-width:64rem){.container\!{max-width:64rem!important}}@media (min-width:80rem){.container\!{max-width:80rem!important}}@media (min-width:96rem){.container\!{max-width:96rem!important}}.m-2{margin:calc(var(--spacing)*2)}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-0\.5{margin-inline:calc(var(--spacing)*.5)}.mx-4{margin-inline:calc(var(--spacing)*4)}.mx-auto{margin-inline:auto}.my-1{margin-block:calc(var(--spacing)*1)}.my-2{margin-block:calc(var(--spacing)*2)}.my-3{margin-block:calc(var(--spacing)*3)}.my-4{margin-block:calc(var(--spacing)*4)}.mt-0{margin-top:calc(var(--spacing)*0)}.mt-0\.5{margin-top:calc(var(--spacing)*.5)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-3{margin-top:calc(var(--spacing)*3)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-12{margin-top:calc(var(--spacing)*12)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-2{margin-right:calc(var(--spacing)*2)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.ml-0{margin-left:calc(var(--spacing)*0)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-3{margin-left:calc(var(--spacing)*3)}.ml-4{margin-left:calc(var(--spacing)*4)}.ml-5{margin-left:calc(var(--spacing)*5)}.ml-6{margin-left:calc(var(--spacing)*6)}.ml-auto{margin-left:auto}.line-clamp-2{-webkit-line-clamp:2;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.line-clamp-3{-webkit-line-clamp:3;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.block{display:block}.contents{display:contents}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.table{display:table}.field-sizing-content{field-sizing:content}.size-2{width:calc(var(--spacing)*2);height:calc(var(--spacing)*2)}.size-3\.5{width:calc(var(--spacing)*3.5);height:calc(var(--spacing)*3.5)}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-9{width:calc(var(--spacing)*9);height:calc(var(--spacing)*9)}.\!h-2{height:calc(var(--spacing)*2)!important}.h-0{height:calc(var(--spacing)*0)}.h-0\.5{height:calc(var(--spacing)*.5)}.h-1{height:calc(var(--spacing)*1)}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-16{height:calc(var(--spacing)*16)}.h-32{height:calc(var(--spacing)*32)}.h-\[1\.2rem\]{height:1.2rem}.h-\[1px\]{height:1px}.h-\[85vh\]{height:85vh}.h-\[500px\]{height:500px}.h-\[calc\(100\%\+8px\)\]{height:calc(100% + 8px)}.h-\[calc\(100vh-3\.5rem\)\]{height:calc(100vh - 3.5rem)}.h-\[calc\(100vh-3\.7rem\)\]{height:calc(100vh - 3.7rem)}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-\(--radix-dropdown-menu-content-available-height\){max-height:var(--radix-dropdown-menu-content-available-height)}.max-h-\(--radix-select-content-available-height\){max-height:var(--radix-select-content-available-height)}.max-h-32{max-height:calc(var(--spacing)*32)}.max-h-40{max-height:calc(var(--spacing)*40)}.max-h-48{max-height:calc(var(--spacing)*48)}.max-h-60{max-height:calc(var(--spacing)*60)}.max-h-64{max-height:calc(var(--spacing)*64)}.max-h-\[85vh\]{max-height:85vh}.max-h-\[90vh\]{max-height:90vh}.max-h-\[200px\]{max-height:200px}.max-h-\[400px\]{max-height:400px}.max-h-none{max-height:none}.max-h-screen{max-height:100vh}.\!min-h-0{min-height:calc(var(--spacing)*0)!important}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-16{min-height:calc(var(--spacing)*16)}.min-h-\[36px\]{min-height:36px}.min-h-\[40px\]{min-height:40px}.min-h-\[50vh\]{min-height:50vh}.min-h-\[400px\]{min-height:400px}.min-h-screen{min-height:100vh}.\!w-2{width:calc(var(--spacing)*2)!important}.w-1{width:calc(var(--spacing)*1)}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-8{width:calc(var(--spacing)*8)}.w-9{width:calc(var(--spacing)*9)}.w-10{width:calc(var(--spacing)*10)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-20{width:calc(var(--spacing)*20)}.w-56{width:calc(var(--spacing)*56)}.w-64{width:calc(var(--spacing)*64)}.w-80{width:calc(var(--spacing)*80)}.w-\[1\.2rem\]{width:1.2rem}.w-\[1px\]{width:1px}.w-\[28rem\]{width:28rem}.w-\[90vw\]{width:90vw}.w-\[600px\]{width:600px}.w-\[800px\]{width:800px}.w-fit{width:fit-content}.w-full{width:100%}.w-px{width:1px}.max-w-2xl{max-width:var(--container-2xl)}.max-w-3xl{max-width:var(--container-3xl)}.max-w-4xl{max-width:var(--container-4xl)}.max-w-6xl{max-width:var(--container-6xl)}.max-w-7xl{max-width:var(--container-7xl)}.max-w-\[80\%\]{max-width:80%}.max-w-\[90vw\]{max-width:90vw}.max-w-\[200px\]{max-width:200px}.max-w-full{max-width:100%}.max-w-lg{max-width:var(--container-lg)}.max-w-md{max-width:var(--container-md)}.max-w-none{max-width:none}.\!min-w-0{min-width:calc(var(--spacing)*0)!important}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-\[1\.25rem\]{min-width:1.25rem}.min-w-\[8rem\]{min-width:8rem}.min-w-\[50px\]{min-width:50px}.min-w-\[80px\]{min-width:80px}.min-w-\[300px\]{min-width:300px}.min-w-\[400px\]{min-width:400px}.min-w-\[800px\]{min-width:800px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.min-w-full{min-width:100%}.flex-1{flex:1}.flex-shrink-0,.shrink-0{flex-shrink:0}.origin-\(--radix-dropdown-menu-content-transform-origin\){transform-origin:var(--radix-dropdown-menu-content-transform-origin)}.origin-\(--radix-select-content-transform-origin\){transform-origin:var(--radix-select-content-transform-origin)}.origin-bottom{transform-origin:bottom}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-0{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-4{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-0{--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-75{--tw-scale-x:75%;--tw-scale-y:75%;--tw-scale-z:75%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-100{--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.rotate-0{rotate:none}.rotate-90{rotate:90deg}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-bounce{animation:var(--animate-bounce)}.animate-in{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-col-resize{cursor:col-resize}.cursor-default{cursor:default}.cursor-pointer{cursor:pointer}.touch-none{touch-action:none}.resize{resize:both}.resize-none{resize:none}.scroll-my-1{scroll-margin-block:calc(var(--spacing)*1)}.list-inside{list-style-position:inside}.list-decimal{list-style-type:decimal}.list-disc{list-style-type:disc}.list-none{list-style-type:none}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.grid-cols-\[auto_auto_1fr_auto\]{grid-template-columns:auto auto 1fr auto}.grid-rows-\[auto_auto\]{grid-template-rows:auto auto}.flex-col{flex-direction:column}.flex-row-reverse{flex-direction:row-reverse}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-end{align-items:flex-end}.items-start{align-items:flex-start}.items-stretch{align-items:stretch}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.gap-0{gap:calc(var(--spacing)*0)}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}:where(.space-y-0\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}.gap-x-4{column-gap:calc(var(--spacing)*4)}:where(.space-x-1>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*1)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-x-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.gap-y-1{row-gap:calc(var(--spacing)*1)}.self-start{align-self:flex-start}.justify-self-end{justify-self:flex-end}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-full{border-radius:3.40282e38px!important}.rounded{border-radius:.25rem}.rounded-\[4px\]{border-radius:4px}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-none{border-radius:0}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-r-none{border-top-right-radius:0;border-bottom-right-radius:0}.\!border{border-style:var(--tw-border-style)!important;border-width:1px!important}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-r{border-right-style:var(--tw-border-style);border-right-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-l-0{border-left-style:var(--tw-border-style);border-left-width:0}.border-l-2{border-left-style:var(--tw-border-style);border-left-width:2px}.border-l-4{border-left-style:var(--tw-border-style);border-left-width:4px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-gray-600{border-color:var(--color-gray-600)!important}.border-\[\#643FB2\]{border-color:#643fb2}.border-\[\#643FB2\]\/20{border-color:#643fb233}.border-\[\#643FB2\]\/30{border-color:#643fb24d}.border-amber-200{border-color:var(--color-amber-200)}.border-blue-200{border-color:var(--color-blue-200)}.border-blue-300{border-color:var(--color-blue-300)}.border-blue-400{border-color:var(--color-blue-400)}.border-blue-500{border-color:var(--color-blue-500)}.border-border,.border-border\/50{border-color:var(--border)}@supports (color:color-mix(in lab,red,red)){.border-border\/50{border-color:color-mix(in oklab,var(--border)50%,transparent)}}.border-current\/30{border-color:currentColor}@supports (color:color-mix(in lab,red,red)){.border-current\/30{border-color:color-mix(in oklab,currentcolor 30%,transparent)}}.border-destructive\/30{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/30{border-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.border-foreground\/5{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/5{border-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.border-foreground\/10{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/10{border-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.border-foreground\/20{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/20{border-color:color-mix(in oklab,var(--foreground)20%,transparent)}}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-300{border-color:var(--color-gray-300)}.border-gray-400{border-color:var(--color-gray-400)}.border-gray-500\/20{border-color:#6a728233}@supports (color:color-mix(in lab,red,red)){.border-gray-500\/20{border-color:color-mix(in oklab,var(--color-gray-500)20%,transparent)}}.border-green-200{border-color:var(--color-green-200)}.border-green-500{border-color:var(--color-green-500)}.border-green-500\/20{border-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.border-green-500\/20{border-color:color-mix(in oklab,var(--color-green-500)20%,transparent)}}.border-green-500\/40{border-color:#00c75866}@supports (color:color-mix(in lab,red,red)){.border-green-500\/40{border-color:color-mix(in oklab,var(--color-green-500)40%,transparent)}}.border-green-600\/20{border-color:#00a54433}@supports (color:color-mix(in lab,red,red)){.border-green-600\/20{border-color:color-mix(in oklab,var(--color-green-600)20%,transparent)}}.border-input{border-color:var(--input)}.border-muted,.border-muted\/50{border-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.border-muted\/50{border-color:color-mix(in oklab,var(--muted)50%,transparent)}}.border-orange-200{border-color:var(--color-orange-200)}.border-orange-300{border-color:var(--color-orange-300)}.border-orange-500{border-color:var(--color-orange-500)}.border-orange-500\/20{border-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/20{border-color:color-mix(in oklab,var(--color-orange-500)20%,transparent)}}.border-orange-500\/40{border-color:#fe6e0066}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/40{border-color:color-mix(in oklab,var(--color-orange-500)40%,transparent)}}.border-orange-600\/20{border-color:#f0510033}@supports (color:color-mix(in lab,red,red)){.border-orange-600\/20{border-color:color-mix(in oklab,var(--color-orange-600)20%,transparent)}}.border-primary,.border-primary\/20{border-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.border-primary\/20{border-color:color-mix(in oklab,var(--primary)20%,transparent)}}.border-red-200{border-color:var(--color-red-200)}.border-red-500{border-color:var(--color-red-500)}.border-red-500\/20{border-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.border-red-500\/20{border-color:color-mix(in oklab,var(--color-red-500)20%,transparent)}}.border-transparent{border-color:#0000}.border-yellow-200{border-color:var(--color-yellow-200)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.bg-\[\#643FB2\]{background-color:#643fb2}.bg-\[\#643FB2\]\/10{background-color:#643fb21a}.bg-accent\/10{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--accent)10%,transparent)}}.bg-amber-50{background-color:var(--color-amber-50)}.bg-amber-500{background-color:var(--color-amber-500)}.bg-amber-500\/10{background-color:#f99c001a}@supports (color:color-mix(in lab,red,red)){.bg-amber-500\/10{background-color:color-mix(in oklab,var(--color-amber-500)10%,transparent)}}.bg-background,.bg-background\/50{background-color:var(--background)}@supports (color:color-mix(in lab,red,red)){.bg-background\/50{background-color:color-mix(in oklab,var(--background)50%,transparent)}}.bg-black{background-color:var(--color-black)}.bg-black\/50{background-color:#00000080}@supports (color:color-mix(in lab,red,red)){.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}}.bg-black\/60{background-color:#0009}@supports (color:color-mix(in lab,red,red)){.bg-black\/60{background-color:color-mix(in oklab,var(--color-black)60%,transparent)}}.bg-blue-50{background-color:var(--color-blue-50)}.bg-blue-50\/80{background-color:#eff6ffcc}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/80{background-color:color-mix(in oklab,var(--color-blue-50)80%,transparent)}}.bg-blue-50\/95{background-color:#eff6fff2}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/95{background-color:color-mix(in oklab,var(--color-blue-50)95%,transparent)}}.bg-blue-100{background-color:var(--color-blue-100)}.bg-blue-500{background-color:var(--color-blue-500)}.bg-blue-500\/5{background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/5{background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.bg-blue-500\/10{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/10{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.bg-blue-600{background-color:var(--color-blue-600)}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-current{background-color:currentColor}.bg-destructive,.bg-destructive\/10{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/10{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.bg-emerald-500{background-color:var(--color-emerald-500)}.bg-foreground\/5{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/5{background-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.bg-foreground\/10{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.bg-gray-50{background-color:var(--color-gray-50)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-200{background-color:var(--color-gray-200)}.bg-gray-400{background-color:var(--color-gray-400)}.bg-gray-500\/10{background-color:#6a72821a}@supports (color:color-mix(in lab,red,red)){.bg-gray-500\/10{background-color:color-mix(in oklab,var(--color-gray-500)10%,transparent)}}.bg-gray-900\/90{background-color:#101828e6}@supports (color:color-mix(in lab,red,red)){.bg-gray-900\/90{background-color:color-mix(in oklab,var(--color-gray-900)90%,transparent)}}.bg-green-50{background-color:var(--color-green-50)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-green-500\/5{background-color:#00c7580d}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/5{background-color:color-mix(in oklab,var(--color-green-500)5%,transparent)}}.bg-green-500\/10{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/10{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.bg-muted{background-color:var(--muted)}.bg-muted-foreground\/20{background-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.bg-muted-foreground\/20{background-color:color-mix(in oklab,var(--muted-foreground)20%,transparent)}}.bg-muted-foreground\/30{background-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.bg-muted-foreground\/30{background-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.bg-muted\/30{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/30{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.bg-muted\/50{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.bg-orange-50{background-color:var(--color-orange-50)}.bg-orange-50\/50{background-color:#fff7ed80}@supports (color:color-mix(in lab,red,red)){.bg-orange-50\/50{background-color:color-mix(in oklab,var(--color-orange-50)50%,transparent)}}.bg-orange-100{background-color:var(--color-orange-100)}.bg-orange-100\/50{background-color:#ffedd580}@supports (color:color-mix(in lab,red,red)){.bg-orange-100\/50{background-color:color-mix(in oklab,var(--color-orange-100)50%,transparent)}}.bg-orange-500{background-color:var(--color-orange-500)}.bg-orange-500\/5{background-color:#fe6e000d}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/5{background-color:color-mix(in oklab,var(--color-orange-500)5%,transparent)}}.bg-orange-500\/10{background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/10{background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.bg-popover{background-color:var(--popover)}.bg-primary,.bg-primary\/10{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/10{background-color:color-mix(in oklab,var(--primary)10%,transparent)}}.bg-primary\/30{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/30{background-color:color-mix(in oklab,var(--primary)30%,transparent)}}.bg-primary\/40{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/40{background-color:color-mix(in oklab,var(--primary)40%,transparent)}}.bg-purple-50{background-color:var(--color-purple-50)}.bg-purple-100{background-color:var(--color-purple-100)}.bg-purple-500{background-color:var(--color-purple-500)}.bg-red-50{background-color:var(--color-red-50)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-red-500\/10{background-color:#fb2c361a}@supports (color:color-mix(in lab,red,red)){.bg-red-500\/10{background-color:color-mix(in oklab,var(--color-red-500)10%,transparent)}}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white{background-color:var(--color-white)}.bg-white\/60{background-color:#fff9}@supports (color:color-mix(in lab,red,red)){.bg-white\/60{background-color:color-mix(in oklab,var(--color-white)60%,transparent)}}.bg-white\/90{background-color:#ffffffe6}@supports (color:color-mix(in lab,red,red)){.bg-white\/90{background-color:color-mix(in oklab,var(--color-white)90%,transparent)}}.bg-yellow-100{background-color:var(--color-yellow-100)}.fill-current{fill:currentColor}.object-cover{object-fit:cover}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-1\.5{padding:calc(var(--spacing)*1.5)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-8{padding:calc(var(--spacing)*8)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-1\.5{padding-inline:calc(var(--spacing)*1.5)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0{padding-block:calc(var(--spacing)*0)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.py-8{padding-block:calc(var(--spacing)*8)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-3{padding-top:calc(var(--spacing)*3)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pt-8{padding-top:calc(var(--spacing)*8)}.pr-2{padding-right:calc(var(--spacing)*2)}.pr-4{padding-right:calc(var(--spacing)*4)}.pr-8{padding-right:calc(var(--spacing)*8)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-3{padding-bottom:calc(var(--spacing)*3)}.pb-4{padding-bottom:calc(var(--spacing)*4)}.pb-6{padding-bottom:calc(var(--spacing)*6)}.pl-2{padding-left:calc(var(--spacing)*2)}.pl-3{padding-left:calc(var(--spacing)*3)}.pl-4{padding-left:calc(var(--spacing)*4)}.pl-5{padding-left:calc(var(--spacing)*5)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.text-right{text-align:right}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[10px\]{font-size:10px}.leading-none{--tw-leading:1;line-height:1}.leading-relaxed{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.leading-tight{--tw-leading:var(--leading-tight);line-height:var(--leading-tight)}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-wide{--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide)}.tracking-wider{--tw-tracking:var(--tracking-wider);letter-spacing:var(--tracking-wider)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.text-\[\#643FB2\]{color:#643fb2}.text-amber-500{color:var(--color-amber-500)}.text-amber-600{color:var(--color-amber-600)}.text-amber-600\/80{color:#dd7400cc}@supports (color:color-mix(in lab,red,red)){.text-amber-600\/80{color:color-mix(in oklab,var(--color-amber-600)80%,transparent)}}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-amber-900{color:var(--color-amber-900)}.text-blue-500{color:var(--color-blue-500)}.text-blue-500\/80{color:#3080ffcc}@supports (color:color-mix(in lab,red,red)){.text-blue-500\/80{color:color-mix(in oklab,var(--color-blue-500)80%,transparent)}}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-blue-800{color:var(--color-blue-800)}.text-blue-900{color:var(--color-blue-900)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive,.text-destructive\/70{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/70{color:color-mix(in oklab,var(--destructive)70%,transparent)}}.text-destructive\/90{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/90{color:color-mix(in oklab,var(--destructive)90%,transparent)}}.text-emerald-600{color:var(--color-emerald-600)}.text-foreground{color:var(--foreground)}.text-gray-300{color:var(--color-gray-300)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-600{color:var(--color-gray-600)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-green-600{color:var(--color-green-600)}.text-green-700{color:var(--color-green-700)}.text-green-800{color:var(--color-green-800)}.text-green-900{color:var(--color-green-900)}.text-muted-foreground,.text-muted-foreground\/60{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/60{color:color-mix(in oklab,var(--muted-foreground)60%,transparent)}}.text-muted-foreground\/70{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/70{color:color-mix(in oklab,var(--muted-foreground)70%,transparent)}}.text-muted-foreground\/80{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/80{color:color-mix(in oklab,var(--muted-foreground)80%,transparent)}}.text-orange-500{color:var(--color-orange-500)}.text-orange-600{color:var(--color-orange-600)}.text-orange-700{color:var(--color-orange-700)}.text-orange-800{color:var(--color-orange-800)}.text-orange-900{color:var(--color-orange-900)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-purple-500{color:var(--color-purple-500)}.text-purple-600{color:var(--color-purple-600)}.text-purple-800{color:var(--color-purple-800)}.text-red-400{color:var(--color-red-400)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-red-700{color:var(--color-red-700)}.text-red-800{color:var(--color-red-800)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.text-yellow-700{color:var(--color-yellow-700)}.capitalize{text-transform:capitalize}.lowercase{text-transform:lowercase}.uppercase{text-transform:uppercase}.italic{font-style:italic}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-xs{--tw-shadow:0 1px 2px 0 var(--tw-shadow-color,#0000000d);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-0{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-2{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[\#643FB2\]\/20{--tw-shadow-color:#643fb233}@supports (color:color-mix(in lab,red,red)){.shadow-\[\#643FB2\]\/20{--tw-shadow-color:color-mix(in oklab,oklab(47.4316% .069152 -.159147/.2) var(--tw-shadow-alpha),transparent)}}.shadow-green-500\/20{--tw-shadow-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.shadow-green-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-green-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-orange-500\/20{--tw-shadow-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.shadow-orange-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-orange-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-primary\/25{--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.shadow-primary\/25{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)25%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-red-500\/20{--tw-shadow-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.shadow-red-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-red-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.ring-blue-500{--tw-ring-color:var(--color-blue-500)}.ring-blue-500\/20{--tw-ring-color:#3080ff33}@supports (color:color-mix(in lab,red,red)){.ring-blue-500\/20{--tw-ring-color:color-mix(in oklab,var(--color-blue-500)20%,transparent)}}.ring-offset-2{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline-hidden{--tw-outline-style:none;outline-style:none}@media (forced-colors:active){.outline-hidden{outline-offset:2px;outline:2px solid #0000}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.drop-shadow-lg{--tw-drop-shadow-size:drop-shadow(0 4px 4px var(--tw-drop-shadow-color,#00000026));--tw-drop-shadow:drop-shadow(var(--drop-shadow-lg));filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter,display,visibility,content-visibility,overlay,pointer-events;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-\[color\,box-shadow\]{transition-property:color,box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-shadow{transition-property:box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-transform{transition-property:transform,translate,scale,rotate;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-none{transition-property:none}.duration-150{--tw-duration:.15s;transition-duration:.15s}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.ease-out{--tw-ease:var(--ease-out);transition-timing-function:var(--ease-out)}.fade-in-0{--tw-enter-opacity:0}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.zoom-in-95{--tw-enter-scale:.95}.\[animation-delay\:-0\.3s\]{animation-delay:-.3s}.\[animation-delay\:-0\.15s\]{animation-delay:-.15s}.fade-in{--tw-enter-opacity:0}.running{animation-play-state:running}.slide-in-from-bottom-2{--tw-enter-translate-y:calc(2*var(--spacing))}.group-open\:rotate-90:is(:where(.group):is([open],:popover-open,:open) *){rotate:90deg}.group-open\:rotate-180:is(:where(.group):is([open],:popover-open,:open) *){rotate:180deg}@media (hover:hover){.group-hover\:bg-primary:is(:where(.group):hover *){background-color:var(--primary)}.group-hover\:opacity-100:is(:where(.group):hover *){opacity:1}.group-hover\:shadow-md:is(:where(.group):hover *){--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)20%,transparent)var(--tw-shadow-alpha),transparent)}}}.group-data-\[disabled\=true\]\:pointer-events-none:is(:where(.group)[data-disabled=true] *){pointer-events:none}.group-data-\[disabled\=true\]\:opacity-50:is(:where(.group)[data-disabled=true] *){opacity:.5}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-50:is(:where(.peer):disabled~*){opacity:.5}.selection\:bg-primary ::selection{background-color:var(--primary)}.selection\:bg-primary::selection{background-color:var(--primary)}.selection\:text-primary-foreground ::selection{color:var(--primary-foreground)}.selection\:text-primary-foreground::selection{color:var(--primary-foreground)}.file\:inline-flex::file-selector-button{display:inline-flex}.file\:h-7::file-selector-button{height:calc(var(--spacing)*7)}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}.first\:mt-0:first-child{margin-top:calc(var(--spacing)*0)}.last\:border-0:last-child{border-style:var(--tw-border-style);border-width:0}.last\:border-r-0:last-child{border-right-style:var(--tw-border-style);border-right-width:0}.last\:border-b-0:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}@media (hover:hover){.hover\:scale-y-\[1\.15\]:hover{--tw-scale-y:1.15;scale:var(--tw-scale-x)var(--tw-scale-y)}.hover\:border-gray-300:hover{border-color:var(--color-gray-300)}.hover\:border-muted-foreground\/30:hover{border-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.hover\:border-muted-foreground\/30:hover{border-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.hover\:bg-accent:hover,.hover\:bg-accent\/50:hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-accent\/50:hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.hover\:bg-amber-100:hover{background-color:var(--color-amber-100)}.hover\:bg-blue-700:hover{background-color:var(--color-blue-700)}.hover\:bg-destructive\/80:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:bg-destructive\/90:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}}.hover\:bg-muted:hover,.hover\:bg-muted\/30:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/30:hover{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.hover\:bg-muted\/50:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.hover\:bg-muted\/70:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/70:hover{background-color:color-mix(in oklab,var(--muted)70%,transparent)}}.hover\:bg-orange-100:hover{background-color:var(--color-orange-100)}.hover\:bg-primary\/20:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}}.hover\:bg-primary\/80:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}}.hover\:bg-primary\/90:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}}.hover\:bg-red-50:hover{background-color:var(--color-red-50)}.hover\:bg-secondary\/80:hover{background-color:var(--secondary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}}.hover\:bg-white:hover{background-color:var(--color-white)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-destructive\/80:hover{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:text-destructive\/80:hover{color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:text-orange-900:hover{color:var(--color-orange-900)}.hover\:text-primary:hover{color:var(--primary)}.hover\:text-red-600:hover{color:var(--color-red-600)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-70:hover{opacity:.7}.hover\:opacity-100:hover{opacity:1}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.hover\:brightness-110:hover{--tw-brightness:brightness(110%);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:border-ring:focus-visible{border-color:var(--ring)}.focus-visible\:ring-1:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-\[3px\]:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.focus-visible\:ring-ring:focus-visible,.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:color-mix(in oklab,var(--ring)50%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:ring-offset-background:focus-visible{--tw-ring-offset-color:var(--background)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.has-data-\[slot\=card-action\]\:grid-cols-\[1fr_auto\]:has([data-slot=card-action]){grid-template-columns:1fr auto}.has-\[\>svg\]\:px-2\.5:has(>svg){padding-inline:calc(var(--spacing)*2.5)}.has-\[\>svg\]\:px-3:has(>svg){padding-inline:calc(var(--spacing)*3)}.has-\[\>svg\]\:px-4:has(>svg){padding-inline:calc(var(--spacing)*4)}.aria-invalid\:border-destructive[aria-invalid=true]{border-color:var(--destructive)}.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[inset\]\:pl-8[data-inset]{padding-left:calc(var(--spacing)*8)}.data-\[placeholder\]\:text-muted-foreground[data-placeholder]{color:var(--muted-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:calc(2*var(--spacing)*-1)}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:calc(2*var(--spacing))}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:calc(2*var(--spacing)*-1)}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:calc(2*var(--spacing))}.data-\[size\=default\]\:h-9[data-size=default]{height:calc(var(--spacing)*9)}.data-\[size\=sm\]\:h-8[data-size=sm]{height:calc(var(--spacing)*8)}:is(.\*\:data-\[slot\=select-value\]\:line-clamp-1>*)[data-slot=select-value]{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}:is(.\*\:data-\[slot\=select-value\]\:flex>*)[data-slot=select-value]{display:flex}:is(.\*\:data-\[slot\=select-value\]\:items-center>*)[data-slot=select-value]{align-items:center}:is(.\*\:data-\[slot\=select-value\]\:gap-2>*)[data-slot=select-value]{gap:calc(var(--spacing)*2)}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:translate-x-4[data-state=checked]{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=checked\]\:border-primary[data-state=checked]{border-color:var(--primary)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{animation:exit var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=open\]\:animate-in[data-state=open]{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-accent-foreground[data-state=open]{color:var(--accent-foreground)}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[state\=unchecked\]\:translate-x-0[data-state=unchecked]{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=unchecked\]\:bg-input[data-state=unchecked]{background-color:var(--input)}.data-\[variant\=destructive\]\:text-destructive[data-variant=destructive]{color:var(--destructive)}.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.data-\[variant\=destructive\]\:focus\:text-destructive[data-variant=destructive]:focus{color:var(--destructive)}@media (min-width:40rem){.sm\:col-span-2{grid-column:span 2/span 2}.sm\:w-64{width:calc(var(--spacing)*64)}.sm\:max-w-lg{max-width:var(--container-lg)}.sm\:flex-none{flex:none}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}}@media (min-width:48rem){.md\:col-span-2{grid-column:span 2/span 2}.md\:col-start-2{grid-column-start:2}.md\:inline{display:inline}.md\:max-w-2xl{max-width:var(--container-2xl)}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:gap-8{gap:calc(var(--spacing)*8)}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}@media (min-width:64rem){.lg\:col-span-3{grid-column:span 3/span 3}.lg\:max-w-4xl{max-width:var(--container-4xl)}.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:items-center{align-items:center}.lg\:justify-between{justify-content:space-between}}@media (min-width:80rem){.xl\:col-span-2{grid-column:span 2/span 2}.xl\:col-span-4{grid-column:span 4/span 4}.xl\:max-w-5xl{max-width:var(--container-5xl)}.xl\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}}.dark\:scale-0:is(.dark *){--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:scale-100:is(.dark *){--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:-rotate-90:is(.dark *){rotate:-90deg}.dark\:rotate-0:is(.dark *){rotate:none}.dark\:\!border-gray-500:is(.dark *){border-color:var(--color-gray-500)!important}.dark\:\!border-gray-600:is(.dark *){border-color:var(--color-gray-600)!important}.dark\:border-\[\#8B5CF6\]:is(.dark *){border-color:#8b5cf6}.dark\:border-\[\#8B5CF6\]\/20:is(.dark *){border-color:#8b5cf633}.dark\:border-\[\#8B5CF6\]\/30:is(.dark *){border-color:#8b5cf64d}.dark\:border-amber-800:is(.dark *){border-color:var(--color-amber-800)}.dark\:border-amber-900:is(.dark *){border-color:var(--color-amber-900)}.dark\:border-blue-400:is(.dark *){border-color:var(--color-blue-400)}.dark\:border-blue-500:is(.dark *){border-color:var(--color-blue-500)}.dark\:border-blue-700:is(.dark *){border-color:var(--color-blue-700)}.dark\:border-blue-800:is(.dark *){border-color:var(--color-blue-800)}.dark\:border-gray-500:is(.dark *){border-color:var(--color-gray-500)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-400:is(.dark *){border-color:var(--color-green-400)}.dark\:border-green-800:is(.dark *){border-color:var(--color-green-800)}.dark\:border-input:is(.dark *){border-color:var(--input)}.dark\:border-orange-400:is(.dark *){border-color:var(--color-orange-400)}.dark\:border-orange-700:is(.dark *){border-color:var(--color-orange-700)}.dark\:border-orange-800:is(.dark *){border-color:var(--color-orange-800)}.dark\:border-red-400:is(.dark *){border-color:var(--color-red-400)}.dark\:border-red-800:is(.dark *){border-color:var(--color-red-800)}.dark\:\!bg-gray-800\/90:is(.dark *){background-color:#1e2939e6!important}@supports (color:color-mix(in lab,red,red)){.dark\:\!bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)!important}}.dark\:bg-\[\#8B5CF6\]:is(.dark *){background-color:#8b5cf6}.dark\:bg-\[\#8B5CF6\]\/10:is(.dark *){background-color:#8b5cf61a}.dark\:bg-amber-600:is(.dark *){background-color:var(--color-amber-600)}.dark\:bg-amber-950\/20:is(.dark *){background-color:#46190133}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)20%,transparent)}}.dark\:bg-amber-950\/50:is(.dark *){background-color:#46190180}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)50%,transparent)}}.dark\:bg-blue-500\/10:is(.dark *){background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:bg-blue-600:is(.dark *){background-color:var(--color-blue-600)}.dark\:bg-blue-900:is(.dark *){background-color:var(--color-blue-900)}.dark\:bg-blue-900\/20:is(.dark *){background-color:#1c398e33}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-900\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)20%,transparent)}}.dark\:bg-blue-950\/20:is(.dark *){background-color:#16245633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)20%,transparent)}}.dark\:bg-blue-950\/40:is(.dark *){background-color:#16245666}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/40:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)40%,transparent)}}.dark\:bg-blue-950\/50:is(.dark *){background-color:#16245680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)50%,transparent)}}.dark\:bg-blue-950\/95:is(.dark *){background-color:#162456f2}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/95:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)95%,transparent)}}.dark\:bg-card:is(.dark *){background-color:var(--card)}.dark\:bg-destructive\/60:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/60:is(.dark *){background-color:color-mix(in oklab,var(--destructive)60%,transparent)}}.dark\:bg-emerald-600:is(.dark *){background-color:var(--color-emerald-600)}.dark\:bg-foreground\/10:is(.dark *){background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-foreground\/10:is(.dark *){background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.dark\:bg-gray-500:is(.dark *){background-color:var(--color-gray-500)}.dark\:bg-gray-800:is(.dark *){background-color:var(--color-gray-800)}.dark\:bg-gray-800\/90:is(.dark *){background-color:#1e2939e6}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)}}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-gray-900\/30:is(.dark *){background-color:#1018284d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-900)30%,transparent)}}.dark\:bg-green-400:is(.dark *){background-color:var(--color-green-400)}.dark\:bg-green-500\/10:is(.dark *){background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.dark\:bg-green-900:is(.dark *){background-color:var(--color-green-900)}.dark\:bg-green-950:is(.dark *){background-color:var(--color-green-950)}.dark\:bg-green-950\/20:is(.dark *){background-color:#032e1533}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)20%,transparent)}}.dark\:bg-green-950\/50:is(.dark *){background-color:#032e1580}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)50%,transparent)}}.dark\:bg-input\/30:is(.dark *){background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-input\/30:is(.dark *){background-color:color-mix(in oklab,var(--input)30%,transparent)}}.dark\:bg-orange-400:is(.dark *){background-color:var(--color-orange-400)}.dark\:bg-orange-500\/10:is(.dark *){background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.dark\:bg-orange-600:is(.dark *){background-color:var(--color-orange-600)}.dark\:bg-orange-900:is(.dark *){background-color:var(--color-orange-900)}.dark\:bg-orange-950:is(.dark *){background-color:var(--color-orange-950)}.dark\:bg-orange-950\/20:is(.dark *){background-color:#44130633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)20%,transparent)}}.dark\:bg-orange-950\/30:is(.dark *){background-color:#4413064d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)30%,transparent)}}.dark\:bg-orange-950\/50:is(.dark *){background-color:#44130680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)50%,transparent)}}.dark\:bg-purple-600:is(.dark *){background-color:var(--color-purple-600)}.dark\:bg-purple-900:is(.dark *){background-color:var(--color-purple-900)}.dark\:bg-red-400:is(.dark *){background-color:var(--color-red-400)}.dark\:bg-red-900:is(.dark *){background-color:var(--color-red-900)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-red-950\/20:is(.dark *){background-color:#46080933}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)20%,transparent)}}.dark\:text-\[\#8B5CF6\]:is(.dark *){color:#8b5cf6}.dark\:text-amber-100:is(.dark *){color:var(--color-amber-100)}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-amber-300:is(.dark *){color:var(--color-amber-300)}.dark\:text-amber-400:is(.dark *){color:var(--color-amber-400)}.dark\:text-amber-400\/80:is(.dark *){color:#fcbb00cc}@supports (color:color-mix(in lab,red,red)){.dark\:text-amber-400\/80:is(.dark *){color:color-mix(in oklab,var(--color-amber-400)80%,transparent)}}.dark\:text-amber-500:is(.dark *){color:var(--color-amber-500)}.dark\:text-blue-100:is(.dark *){color:var(--color-blue-100)}.dark\:text-blue-200:is(.dark *){color:var(--color-blue-200)}.dark\:text-blue-300:is(.dark *){color:var(--color-blue-300)}.dark\:text-blue-400:is(.dark *){color:var(--color-blue-400)}.dark\:text-blue-400\/70:is(.dark *){color:#54a2ffb3}@supports (color:color-mix(in lab,red,red)){.dark\:text-blue-400\/70:is(.dark *){color:color-mix(in oklab,var(--color-blue-400)70%,transparent)}}.dark\:text-blue-500:is(.dark *){color:var(--color-blue-500)}.dark\:text-emerald-400:is(.dark *){color:var(--color-emerald-400)}.dark\:text-gray-100:is(.dark *){color:var(--color-gray-100)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-green-100:is(.dark *){color:var(--color-green-100)}.dark\:text-green-200:is(.dark *){color:var(--color-green-200)}.dark\:text-green-300:is(.dark *){color:var(--color-green-300)}.dark\:text-green-400:is(.dark *){color:var(--color-green-400)}.dark\:text-orange-100:is(.dark *){color:var(--color-orange-100)}.dark\:text-orange-200:is(.dark *){color:var(--color-orange-200)}.dark\:text-orange-300:is(.dark *){color:var(--color-orange-300)}.dark\:text-orange-400:is(.dark *){color:var(--color-orange-400)}.dark\:text-purple-200:is(.dark *){color:var(--color-purple-200)}.dark\:text-purple-400:is(.dark *){color:var(--color-purple-400)}.dark\:text-red-200:is(.dark *){color:var(--color-red-200)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-yellow-400:is(.dark *){color:var(--color-yellow-400)}.dark\:opacity-30:is(.dark *){opacity:.3}@media (hover:hover){.dark\:hover\:border-gray-600:is(.dark *):hover{border-color:var(--color-gray-600)}.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:#4619014d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-amber-950)30%,transparent)}}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--input)50%,transparent)}}.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:#44130666}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-orange-950)40%,transparent)}}.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:#82181a33}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-red-900)20%,transparent)}}.dark\:hover\:text-orange-200:is(.dark *):hover{color:var(--color-orange-200)}}.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:data-\[state\=checked\]\:bg-primary:is(.dark *)[data-state=checked]{background-color:var(--primary)}.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_svg\:not\(\[class\*\=\'size-\'\]\)\]\:size-4 svg:not([class*=size-]){width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\:not\(\[class\*\=\'text-\'\]\)\]\:text-muted-foreground svg:not([class*=text-]){color:var(--muted-foreground)}.\[\.border-b\]\:pb-6.border-b{padding-bottom:calc(var(--spacing)*6)}.\[\.border-t\]\:pt-6.border-t{padding-top:calc(var(--spacing)*6)}:is(.\*\:\[span\]\:last\:flex>*):is(span):last-child{display:flex}:is(.\*\:\[span\]\:last\:items-center>*):is(span):last-child{align-items:center}:is(.\*\:\[span\]\:last\:gap-2>*):is(span):last-child{gap:calc(var(--spacing)*2)}:is(.data-\[variant\=destructive\]\:\*\:\[svg\]\:\!text-destructive[data-variant=destructive]>*):is(svg){color:var(--destructive)!important}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}}@property --tw-animation-delay{syntax:"*";inherits:false;initial-value:0s}@property --tw-animation-direction{syntax:"*";inherits:false;initial-value:normal}@property --tw-animation-duration{syntax:"*";inherits:false}@property --tw-animation-fill-mode{syntax:"*";inherits:false;initial-value:none}@property --tw-animation-iteration-count{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-translate-y{syntax:"*";inherits:false;initial-value:0}:root{--radius:.625rem;--background:oklch(100% 0 0);--foreground:oklch(14.5% 0 0);--card:oklch(100% 0 0);--card-foreground:oklch(14.5% 0 0);--popover:oklch(100% 0 0);--popover-foreground:oklch(14.5% 0 0);--primary:oklch(48% .18 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(97% 0 0);--secondary-foreground:oklch(20.5% 0 0);--muted:oklch(97% 0 0);--muted-foreground:oklch(55.6% 0 0);--accent:oklch(97% 0 0);--accent-foreground:oklch(20.5% 0 0);--destructive:oklch(57.7% .245 27.325);--border:oklch(92.2% 0 0);--input:oklch(92.2% 0 0);--ring:oklch(70.8% 0 0);--chart-1:oklch(64.6% .222 41.116);--chart-2:oklch(60% .118 184.704);--chart-3:oklch(39.8% .07 227.392);--chart-4:oklch(82.8% .189 84.429);--chart-5:oklch(76.9% .188 70.08);--sidebar:oklch(98.5% 0 0);--sidebar-foreground:oklch(14.5% 0 0);--sidebar-primary:oklch(20.5% 0 0);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(97% 0 0);--sidebar-accent-foreground:oklch(20.5% 0 0);--sidebar-border:oklch(92.2% 0 0);--sidebar-ring:oklch(70.8% 0 0)}.dark{--background:oklch(14.5% 0 0);--foreground:oklch(98.5% 0 0);--card:oklch(20.5% 0 0);--card-foreground:oklch(98.5% 0 0);--popover:oklch(20.5% 0 0);--popover-foreground:oklch(98.5% 0 0);--primary:oklch(62% .2 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(26.9% 0 0);--secondary-foreground:oklch(98.5% 0 0);--muted:oklch(26.9% 0 0);--muted-foreground:oklch(70.8% 0 0);--accent:oklch(26.9% 0 0);--accent-foreground:oklch(98.5% 0 0);--destructive:oklch(70.4% .191 22.216);--border:oklch(100% 0 0/.1);--input:oklch(100% 0 0/.15);--ring:oklch(55.6% 0 0);--chart-1:oklch(48.8% .243 264.376);--chart-2:oklch(69.6% .17 162.48);--chart-3:oklch(76.9% .188 70.08);--chart-4:oklch(62.7% .265 303.9);--chart-5:oklch(64.5% .246 16.439);--sidebar:oklch(20.5% 0 0);--sidebar-foreground:oklch(98.5% 0 0);--sidebar-primary:oklch(48.8% .243 264.376);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(26.9% 0 0);--sidebar-accent-foreground:oklch(98.5% 0 0);--sidebar-border:oklch(100% 0 0/.1);--sidebar-ring:oklch(55.6% 0 0)}.workflow-chat-view .border-green-200{border-color:var(--color-emerald-200)}.workflow-chat-view .bg-green-50{background-color:var(--color-emerald-50)}.workflow-chat-view .bg-green-100{background-color:var(--color-emerald-100)}.workflow-chat-view .text-green-600{color:var(--color-emerald-600)}.workflow-chat-view .text-green-700{color:var(--color-emerald-700)}.workflow-chat-view .text-green-800{color:var(--color-emerald-800)}.highlight-attention{animation:1s ease-out highlight-flash}@keyframes highlight-flash{0%{background-color:#fb923c4d;transform:scale(1.02)}to{background-color:#0000;transform:scale(1)}}.hil-waiting-glow{animation:2s infinite pulse-glow;box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}@keyframes pulse-glow{0%,to{box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}50%{box-shadow:0 0 20px 5px #fb923c33,inset 0 0 0 2px #fb923c4d}}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0));filter:blur(var(--tw-enter-blur,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0));filter:blur(var(--tw-exit-blur,0))}}.react-flow{direction:ltr;--xy-edge-stroke-default: #b1b1b7;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #555;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(255, 255, 255, .5);--xy-minimap-background-color-default: #fff;--xy-minimap-mask-background-color-default: rgba(240, 240, 240, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #e2e2e2;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: transparent;--xy-background-pattern-dots-color-default: #91919a;--xy-background-pattern-lines-color-default: #eee;--xy-background-pattern-cross-color-default: #e2e2e2;background-color:var(--xy-background-color, var(--xy-background-color-default));--xy-node-color-default: inherit;--xy-node-border-default: 1px solid #1a192b;--xy-node-background-color-default: #fff;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(0, 0, 0, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #1a192b;--xy-node-border-radius-default: 3px;--xy-handle-background-color-default: #1a192b;--xy-handle-border-color-default: #fff;--xy-selection-background-color-default: rgba(0, 89, 220, .08);--xy-selection-border-default: 1px dotted rgba(0, 89, 220, .8);--xy-controls-button-background-color-default: #fefefe;--xy-controls-button-background-color-hover-default: #f4f4f4;--xy-controls-button-color-default: inherit;--xy-controls-button-color-hover-default: inherit;--xy-controls-button-border-color-default: #eee;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #ffffff;--xy-edge-label-color-default: inherit;--xy-resize-background-color-default: #3367d9}.react-flow.dark{--xy-edge-stroke-default: #3e3e3e;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #727272;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(150, 150, 150, .25);--xy-minimap-background-color-default: #141414;--xy-minimap-mask-background-color-default: rgba(60, 60, 60, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #2b2b2b;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: #141414;--xy-background-pattern-dots-color-default: #777;--xy-background-pattern-lines-color-default: #777;--xy-background-pattern-cross-color-default: #777;--xy-node-color-default: #f8f8f8;--xy-node-border-default: 1px solid #3c3c3c;--xy-node-background-color-default: #1e1e1e;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(255, 255, 255, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #999;--xy-handle-background-color-default: #bebebe;--xy-handle-border-color-default: #1e1e1e;--xy-selection-background-color-default: rgba(200, 200, 220, .08);--xy-selection-border-default: 1px dotted rgba(200, 200, 220, .8);--xy-controls-button-background-color-default: #2b2b2b;--xy-controls-button-background-color-hover-default: #3e3e3e;--xy-controls-button-color-default: #f8f8f8;--xy-controls-button-color-hover-default: #fff;--xy-controls-button-border-color-default: #5b5b5b;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #141414;--xy-edge-label-color-default: #f8f8f8}.react-flow__background{background-color:var(--xy-background-color-props, var(--xy-background-color, var(--xy-background-color-default)));pointer-events:none;z-index:-1}.react-flow__container{position:absolute;width:100%;height:100%;top:0;left:0}.react-flow__pane{z-index:1}.react-flow__pane.draggable{cursor:grab}.react-flow__pane.dragging{cursor:grabbing}.react-flow__pane.selection{cursor:pointer}.react-flow__viewport{transform-origin:0 0;z-index:2;pointer-events:none}.react-flow__renderer{z-index:4}.react-flow__selection{z-index:6}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible{outline:none}.react-flow__edge-path{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default));stroke-width:var(--xy-edge-stroke-width, var(--xy-edge-stroke-width-default));fill:none}.react-flow__connection-path{stroke:var(--xy-connectionline-stroke, var(--xy-connectionline-stroke-default));stroke-width:var(--xy-connectionline-stroke-width, var(--xy-connectionline-stroke-width-default));fill:none}.react-flow .react-flow__edges{position:absolute}.react-flow .react-flow__edges svg{overflow:visible;position:absolute;pointer-events:none}.react-flow__edge{pointer-events:visibleStroke}.react-flow__edge.selectable{cursor:pointer}.react-flow__edge.animated path{stroke-dasharray:5;animation:dashdraw .5s linear infinite}.react-flow__edge.animated path.react-flow__edge-interaction{stroke-dasharray:none;animation:none}.react-flow__edge.inactive{pointer-events:none}.react-flow__edge.selected,.react-flow__edge:focus,.react-flow__edge:focus-visible{outline:none}.react-flow__edge.selected .react-flow__edge-path,.react-flow__edge.selectable:focus .react-flow__edge-path,.react-flow__edge.selectable:focus-visible .react-flow__edge-path{stroke:var(--xy-edge-stroke-selected, var(--xy-edge-stroke-selected-default))}.react-flow__edge-textwrapper{pointer-events:all}.react-flow__edge .react-flow__edge-text{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__arrowhead polyline{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__arrowhead polyline.arrowclosed{fill:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__connection{pointer-events:none}.react-flow__connection .animated{stroke-dasharray:5;animation:dashdraw .5s linear infinite}svg.react-flow__connectionline{z-index:1001;overflow:visible;position:absolute}.react-flow__nodes{pointer-events:none;transform-origin:0 0}.react-flow__node{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;pointer-events:all;transform-origin:0 0;box-sizing:border-box;cursor:default}.react-flow__node.selectable{cursor:pointer}.react-flow__node.draggable{cursor:grab;pointer-events:all}.react-flow__node.draggable.dragging{cursor:grabbing}.react-flow__nodesselection{z-index:3;transform-origin:left top;pointer-events:none}.react-flow__nodesselection-rect{position:absolute;pointer-events:all;cursor:grab}.react-flow__handle{position:absolute;pointer-events:none;min-width:5px;min-height:5px;width:6px;height:6px;background-color:var(--xy-handle-background-color, var(--xy-handle-background-color-default));border:1px solid var(--xy-handle-border-color, var(--xy-handle-border-color-default));border-radius:100%}.react-flow__handle.connectingfrom{pointer-events:all}.react-flow__handle.connectionindicator{pointer-events:all;cursor:crosshair}.react-flow__handle-bottom{top:auto;left:50%;bottom:0;transform:translate(-50%,50%)}.react-flow__handle-top{top:0;left:50%;transform:translate(-50%,-50%)}.react-flow__handle-left{top:50%;left:0;transform:translate(-50%,-50%)}.react-flow__handle-right{top:50%;right:0;transform:translate(50%,-50%)}.react-flow__edgeupdater{cursor:move;pointer-events:all}.react-flow__pane.selection .react-flow__panel{pointer-events:none}.react-flow__panel{position:absolute;z-index:5;margin:15px}.react-flow__panel.top{top:0}.react-flow__panel.bottom{bottom:0}.react-flow__panel.top.center,.react-flow__panel.bottom.center{left:50%;transform:translate(-15px) translate(-50%)}.react-flow__panel.left{left:0}.react-flow__panel.right{right:0}.react-flow__panel.left.center,.react-flow__panel.right.center{top:50%;transform:translateY(-15px) translateY(-50%)}.react-flow__attribution{font-size:10px;background:var(--xy-attribution-background-color, var(--xy-attribution-background-color-default));padding:2px 3px;margin:0}.react-flow__attribution a{text-decoration:none;color:#999}@keyframes dashdraw{0%{stroke-dashoffset:10}}.react-flow__edgelabel-renderer{position:absolute;width:100%;height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;left:0;top:0}.react-flow__viewport-portal{position:absolute;width:100%;height:100%;left:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__minimap{background:var( --xy-minimap-background-color-props, var(--xy-minimap-background-color, var(--xy-minimap-background-color-default)) )}.react-flow__minimap-svg{display:block}.react-flow__minimap-mask{fill:var( --xy-minimap-mask-background-color-props, var(--xy-minimap-mask-background-color, var(--xy-minimap-mask-background-color-default)) );stroke:var( --xy-minimap-mask-stroke-color-props, var(--xy-minimap-mask-stroke-color, var(--xy-minimap-mask-stroke-color-default)) );stroke-width:var( --xy-minimap-mask-stroke-width-props, var(--xy-minimap-mask-stroke-width, var(--xy-minimap-mask-stroke-width-default)) )}.react-flow__minimap-node{fill:var( --xy-minimap-node-background-color-props, var(--xy-minimap-node-background-color, var(--xy-minimap-node-background-color-default)) );stroke:var( --xy-minimap-node-stroke-color-props, var(--xy-minimap-node-stroke-color, var(--xy-minimap-node-stroke-color-default)) );stroke-width:var( --xy-minimap-node-stroke-width-props, var(--xy-minimap-node-stroke-width, var(--xy-minimap-node-stroke-width-default)) )}.react-flow__background-pattern.dots{fill:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-dots-color-default)) )}.react-flow__background-pattern.lines{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-lines-color-default)) )}.react-flow__background-pattern.cross{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-cross-color-default)) )}.react-flow__controls{display:flex;flex-direction:column;box-shadow:var(--xy-controls-box-shadow, var(--xy-controls-box-shadow-default))}.react-flow__controls.horizontal{flex-direction:row}.react-flow__controls-button{display:flex;justify-content:center;align-items:center;height:26px;width:26px;padding:4px;border:none;background:var(--xy-controls-button-background-color, var(--xy-controls-button-background-color-default));border-bottom:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) );color:var( --xy-controls-button-color-props, var(--xy-controls-button-color, var(--xy-controls-button-color-default)) );cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__controls-button svg{width:100%;max-width:12px;max-height:12px;fill:currentColor}.react-flow__edge.updating .react-flow__edge-path{stroke:#777}.react-flow__edge-text{font-size:10px}.react-flow__node.selectable:focus,.react-flow__node.selectable:focus-visible{outline:none}.react-flow__node-input,.react-flow__node-default,.react-flow__node-output,.react-flow__node-group{padding:10px;border-radius:var(--xy-node-border-radius, var(--xy-node-border-radius-default));width:150px;font-size:12px;color:var(--xy-node-color, var(--xy-node-color-default));text-align:center;border:var(--xy-node-border, var(--xy-node-border-default));background-color:var(--xy-node-background-color, var(--xy-node-background-color-default))}.react-flow__node-input.selectable:hover,.react-flow__node-default.selectable:hover,.react-flow__node-output.selectable:hover,.react-flow__node-group.selectable:hover{box-shadow:var(--xy-node-boxshadow-hover, var(--xy-node-boxshadow-hover-default))}.react-flow__node-input.selectable.selected,.react-flow__node-input.selectable:focus,.react-flow__node-input.selectable:focus-visible,.react-flow__node-default.selectable.selected,.react-flow__node-default.selectable:focus,.react-flow__node-default.selectable:focus-visible,.react-flow__node-output.selectable.selected,.react-flow__node-output.selectable:focus,.react-flow__node-output.selectable:focus-visible,.react-flow__node-group.selectable.selected,.react-flow__node-group.selectable:focus,.react-flow__node-group.selectable:focus-visible{box-shadow:var(--xy-node-boxshadow-selected, var(--xy-node-boxshadow-selected-default))}.react-flow__node-group{background-color:var(--xy-node-group-background-color, var(--xy-node-group-background-color-default))}.react-flow__nodesselection-rect,.react-flow__selection{background:var(--xy-selection-background-color, var(--xy-selection-background-color-default));border:var(--xy-selection-border, var(--xy-selection-border-default))}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible,.react-flow__selection:focus,.react-flow__selection:focus-visible{outline:none}.react-flow__controls-button:hover{background:var( --xy-controls-button-background-color-hover-props, var(--xy-controls-button-background-color-hover, var(--xy-controls-button-background-color-hover-default)) );color:var( --xy-controls-button-color-hover-props, var(--xy-controls-button-color-hover, var(--xy-controls-button-color-hover-default)) )}.react-flow__controls-button:disabled{pointer-events:none}.react-flow__controls-button:disabled svg{fill-opacity:.4}.react-flow__controls-button:last-child{border-bottom:none}.react-flow__controls.horizontal .react-flow__controls-button{border-bottom:none;border-right:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) )}.react-flow__controls.horizontal .react-flow__controls-button:last-child{border-right:none}.react-flow__resize-control{position:absolute}.react-flow__resize-control.left,.react-flow__resize-control.right{cursor:ew-resize}.react-flow__resize-control.top,.react-flow__resize-control.bottom{cursor:ns-resize}.react-flow__resize-control.top.left,.react-flow__resize-control.bottom.right{cursor:nwse-resize}.react-flow__resize-control.bottom.left,.react-flow__resize-control.top.right{cursor:nesw-resize}.react-flow__resize-control.handle{width:5px;height:5px;border:1px solid #fff;border-radius:1px;background-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));translate:-50% -50%}.react-flow__resize-control.handle.left{left:0;top:50%}.react-flow__resize-control.handle.right{left:100%;top:50%}.react-flow__resize-control.handle.top{left:50%;top:0}.react-flow__resize-control.handle.bottom{left:50%;top:100%}.react-flow__resize-control.handle.top.left,.react-flow__resize-control.handle.bottom.left{left:0}.react-flow__resize-control.handle.top.right,.react-flow__resize-control.handle.bottom.right{left:100%}.react-flow__resize-control.line{border-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));border-width:0;border-style:solid}.react-flow__resize-control.line.left,.react-flow__resize-control.line.right{width:1px;transform:translate(-50%);top:0;height:100%}.react-flow__resize-control.line.left{left:0;border-left-width:1px}.react-flow__resize-control.line.right{left:100%;border-right-width:1px}.react-flow__resize-control.line.top,.react-flow__resize-control.line.bottom{height:1px;transform:translateY(-50%);left:0;width:100%}.react-flow__resize-control.line.top{top:0;border-top-width:1px}.react-flow__resize-control.line.bottom{border-bottom-width:1px;top:100%}.react-flow__edge-textbg{fill:var(--xy-edge-label-background-color, var(--xy-edge-label-background-color-default))}.react-flow__edge-text{fill:var(--xy-edge-label-color, var(--xy-edge-label-color-default))} +/*! tailwindcss v4.1.12 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-translate-x:0;--tw-translate-y:0;--tw-translate-z:0;--tw-scale-x:1;--tw-scale-y:1;--tw-scale-z:1;--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-border-style:solid;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial;--tw-duration:initial;--tw-ease:initial;--tw-animation-delay:0s;--tw-animation-direction:normal;--tw-animation-duration:initial;--tw-animation-fill-mode:none;--tw-animation-iteration-count:1;--tw-enter-blur:0;--tw-enter-opacity:1;--tw-enter-rotate:0;--tw-enter-scale:1;--tw-enter-translate-x:0;--tw-enter-translate-y:0;--tw-exit-blur:0;--tw-exit-opacity:1;--tw-exit-rotate:0;--tw-exit-scale:1;--tw-exit-translate-x:0;--tw-exit-translate-y:0}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-50:oklch(97.1% .013 17.38);--color-red-100:oklch(93.6% .032 17.717);--color-red-200:oklch(88.5% .062 18.334);--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-red-600:oklch(57.7% .245 27.325);--color-red-700:oklch(50.5% .213 27.518);--color-red-800:oklch(44.4% .177 26.899);--color-red-900:oklch(39.6% .141 25.723);--color-red-950:oklch(25.8% .092 26.042);--color-orange-50:oklch(98% .016 73.684);--color-orange-100:oklch(95.4% .038 75.164);--color-orange-200:oklch(90.1% .076 70.697);--color-orange-300:oklch(83.7% .128 66.29);--color-orange-400:oklch(75% .183 55.934);--color-orange-500:oklch(70.5% .213 47.604);--color-orange-600:oklch(64.6% .222 41.116);--color-orange-700:oklch(55.3% .195 38.402);--color-orange-800:oklch(47% .157 37.304);--color-orange-900:oklch(40.8% .123 38.172);--color-orange-950:oklch(26.6% .079 36.259);--color-amber-50:oklch(98.7% .022 95.277);--color-amber-100:oklch(96.2% .059 95.617);--color-amber-200:oklch(92.4% .12 95.746);--color-amber-300:oklch(87.9% .169 91.605);--color-amber-400:oklch(82.8% .189 84.429);--color-amber-500:oklch(76.9% .188 70.08);--color-amber-600:oklch(66.6% .179 58.318);--color-amber-700:oklch(55.5% .163 48.998);--color-amber-800:oklch(47.3% .137 46.201);--color-amber-900:oklch(41.4% .112 45.904);--color-amber-950:oklch(27.9% .077 45.635);--color-yellow-100:oklch(97.3% .071 103.193);--color-yellow-200:oklch(94.5% .129 101.54);--color-yellow-400:oklch(85.2% .199 91.936);--color-yellow-600:oklch(68.1% .162 75.834);--color-yellow-700:oklch(55.4% .135 66.442);--color-green-50:oklch(98.2% .018 155.826);--color-green-100:oklch(96.2% .044 156.743);--color-green-200:oklch(92.5% .084 155.995);--color-green-300:oklch(87.1% .15 154.449);--color-green-400:oklch(79.2% .209 151.711);--color-green-500:oklch(72.3% .219 149.579);--color-green-600:oklch(62.7% .194 149.214);--color-green-700:oklch(52.7% .154 150.069);--color-green-800:oklch(44.8% .119 151.328);--color-green-900:oklch(39.3% .095 152.535);--color-green-950:oklch(26.6% .065 152.934);--color-emerald-50:oklch(97.9% .021 166.113);--color-emerald-100:oklch(95% .052 163.051);--color-emerald-200:oklch(90.5% .093 164.15);--color-emerald-400:oklch(76.5% .177 163.223);--color-emerald-500:oklch(69.6% .17 162.48);--color-emerald-600:oklch(59.6% .145 163.225);--color-emerald-700:oklch(50.8% .118 165.612);--color-emerald-800:oklch(43.2% .095 166.913);--color-blue-50:oklch(97% .014 254.604);--color-blue-100:oklch(93.2% .032 255.585);--color-blue-200:oklch(88.2% .059 254.128);--color-blue-300:oklch(80.9% .105 251.813);--color-blue-400:oklch(70.7% .165 254.624);--color-blue-500:oklch(62.3% .214 259.815);--color-blue-600:oklch(54.6% .245 262.881);--color-blue-700:oklch(48.8% .243 264.376);--color-blue-800:oklch(42.4% .199 265.638);--color-blue-900:oklch(37.9% .146 265.522);--color-blue-950:oklch(28.2% .091 267.935);--color-purple-50:oklch(97.7% .014 308.299);--color-purple-100:oklch(94.6% .033 307.174);--color-purple-200:oklch(90.2% .063 306.703);--color-purple-400:oklch(71.4% .203 305.504);--color-purple-500:oklch(62.7% .265 303.9);--color-purple-600:oklch(55.8% .288 302.321);--color-purple-800:oklch(43.8% .218 303.724);--color-purple-900:oklch(38.1% .176 304.987);--color-gray-50:oklch(98.5% .002 247.839);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-400:oklch(70.7% .022 261.325);--color-gray-500:oklch(55.1% .027 264.364);--color-gray-600:oklch(44.6% .03 256.802);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-800:oklch(27.8% .033 256.848);--color-gray-900:oklch(21% .034 264.665);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-lg:32rem;--container-2xl:42rem;--container-3xl:48rem;--container-4xl:56rem;--container-5xl:64rem;--container-6xl:72rem;--container-7xl:80rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-wide:.025em;--tracking-wider:.05em;--tracking-widest:.1em;--leading-tight:1.25;--leading-relaxed:1.625;--drop-shadow-lg:0 4px 4px #00000026;--ease-out:cubic-bezier(0,0,.2,1);--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--animate-bounce:bounce 1s infinite;--blur-sm:8px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){*{outline-color:color-mix(in oklab,var(--ring)50%,transparent)}}body{background-color:var(--background);color:var(--foreground)}}@layer components;@layer utilities{.\@container\/card-header{container:card-header/inline-size}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.static{position:static}.inset-0{inset:calc(var(--spacing)*0)}.inset-2{inset:calc(var(--spacing)*2)}.inset-y-0{inset-block:calc(var(--spacing)*0)}.top-0{top:calc(var(--spacing)*0)}.top-1{top:calc(var(--spacing)*1)}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.top-\[30px\]{top:30px}.-right-2{right:calc(var(--spacing)*-2)}.right-0{right:calc(var(--spacing)*0)}.right-1{right:calc(var(--spacing)*1)}.right-2{right:calc(var(--spacing)*2)}.right-4{right:calc(var(--spacing)*4)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-24{bottom:calc(var(--spacing)*24)}.-left-2{left:calc(var(--spacing)*-2)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.left-\[18px\]{left:18px}.z-10{z-index:10}.z-20{z-index:20}.z-50{z-index:50}.col-start-2{grid-column-start:2}.row-span-2{grid-row:span 2/span 2}.row-start-1{grid-row-start:1}.container{width:100%}@media (min-width:40rem){.container{max-width:40rem}}@media (min-width:48rem){.container{max-width:48rem}}@media (min-width:64rem){.container{max-width:64rem}}@media (min-width:80rem){.container{max-width:80rem}}@media (min-width:96rem){.container{max-width:96rem}}.container\!{width:100%!important}@media (min-width:40rem){.container\!{max-width:40rem!important}}@media (min-width:48rem){.container\!{max-width:48rem!important}}@media (min-width:64rem){.container\!{max-width:64rem!important}}@media (min-width:80rem){.container\!{max-width:80rem!important}}@media (min-width:96rem){.container\!{max-width:96rem!important}}.m-2{margin:calc(var(--spacing)*2)}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-0\.5{margin-inline:calc(var(--spacing)*.5)}.mx-4{margin-inline:calc(var(--spacing)*4)}.mx-auto{margin-inline:auto}.my-1{margin-block:calc(var(--spacing)*1)}.my-2{margin-block:calc(var(--spacing)*2)}.my-3{margin-block:calc(var(--spacing)*3)}.my-4{margin-block:calc(var(--spacing)*4)}.mt-0{margin-top:calc(var(--spacing)*0)}.mt-0\.5{margin-top:calc(var(--spacing)*.5)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-3{margin-top:calc(var(--spacing)*3)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-12{margin-top:calc(var(--spacing)*12)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-2{margin-right:calc(var(--spacing)*2)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.ml-0{margin-left:calc(var(--spacing)*0)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-3{margin-left:calc(var(--spacing)*3)}.ml-4{margin-left:calc(var(--spacing)*4)}.ml-5{margin-left:calc(var(--spacing)*5)}.ml-6{margin-left:calc(var(--spacing)*6)}.ml-auto{margin-left:auto}.line-clamp-2{-webkit-line-clamp:2;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.line-clamp-3{-webkit-line-clamp:3;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.block{display:block}.contents{display:contents}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.table{display:table}.field-sizing-content{field-sizing:content}.size-2{width:calc(var(--spacing)*2);height:calc(var(--spacing)*2)}.size-3\.5{width:calc(var(--spacing)*3.5);height:calc(var(--spacing)*3.5)}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-9{width:calc(var(--spacing)*9);height:calc(var(--spacing)*9)}.\!h-2{height:calc(var(--spacing)*2)!important}.h-0{height:calc(var(--spacing)*0)}.h-0\.5{height:calc(var(--spacing)*.5)}.h-1{height:calc(var(--spacing)*1)}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-16{height:calc(var(--spacing)*16)}.h-32{height:calc(var(--spacing)*32)}.h-\[1\.2rem\]{height:1.2rem}.h-\[1px\]{height:1px}.h-\[85vh\]{height:85vh}.h-\[500px\]{height:500px}.h-\[calc\(100\%\+8px\)\]{height:calc(100% + 8px)}.h-\[calc\(100vh-3\.5rem\)\]{height:calc(100vh - 3.5rem)}.h-\[calc\(100vh-3\.7rem\)\]{height:calc(100vh - 3.7rem)}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-\(--radix-dropdown-menu-content-available-height\){max-height:var(--radix-dropdown-menu-content-available-height)}.max-h-\(--radix-select-content-available-height\){max-height:var(--radix-select-content-available-height)}.max-h-32{max-height:calc(var(--spacing)*32)}.max-h-40{max-height:calc(var(--spacing)*40)}.max-h-48{max-height:calc(var(--spacing)*48)}.max-h-60{max-height:calc(var(--spacing)*60)}.max-h-64{max-height:calc(var(--spacing)*64)}.max-h-\[85vh\]{max-height:85vh}.max-h-\[90vh\]{max-height:90vh}.max-h-\[200px\]{max-height:200px}.max-h-\[400px\]{max-height:400px}.max-h-none{max-height:none}.max-h-screen{max-height:100vh}.\!min-h-0{min-height:calc(var(--spacing)*0)!important}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-16{min-height:calc(var(--spacing)*16)}.min-h-\[36px\]{min-height:36px}.min-h-\[40px\]{min-height:40px}.min-h-\[50vh\]{min-height:50vh}.min-h-\[400px\]{min-height:400px}.min-h-screen{min-height:100vh}.\!w-2{width:calc(var(--spacing)*2)!important}.w-1{width:calc(var(--spacing)*1)}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-8{width:calc(var(--spacing)*8)}.w-9{width:calc(var(--spacing)*9)}.w-10{width:calc(var(--spacing)*10)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-20{width:calc(var(--spacing)*20)}.w-56{width:calc(var(--spacing)*56)}.w-64{width:calc(var(--spacing)*64)}.w-80{width:calc(var(--spacing)*80)}.w-\[1\.2rem\]{width:1.2rem}.w-\[1px\]{width:1px}.w-\[28rem\]{width:28rem}.w-\[90vw\]{width:90vw}.w-\[600px\]{width:600px}.w-\[800px\]{width:800px}.w-fit{width:fit-content}.w-full{width:100%}.w-px{width:1px}.max-w-2xl{max-width:var(--container-2xl)}.max-w-3xl{max-width:var(--container-3xl)}.max-w-4xl{max-width:var(--container-4xl)}.max-w-6xl{max-width:var(--container-6xl)}.max-w-7xl{max-width:var(--container-7xl)}.max-w-\[80\%\]{max-width:80%}.max-w-\[90vw\]{max-width:90vw}.max-w-\[200px\]{max-width:200px}.max-w-full{max-width:100%}.max-w-lg{max-width:var(--container-lg)}.max-w-md{max-width:var(--container-md)}.\!min-w-0{min-width:calc(var(--spacing)*0)!important}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-\[1\.25rem\]{min-width:1.25rem}.min-w-\[8rem\]{min-width:8rem}.min-w-\[50px\]{min-width:50px}.min-w-\[80px\]{min-width:80px}.min-w-\[300px\]{min-width:300px}.min-w-\[400px\]{min-width:400px}.min-w-\[800px\]{min-width:800px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.min-w-full{min-width:100%}.flex-1{flex:1}.flex-shrink-0,.shrink-0{flex-shrink:0}.origin-\(--radix-dropdown-menu-content-transform-origin\){transform-origin:var(--radix-dropdown-menu-content-transform-origin)}.origin-\(--radix-select-content-transform-origin\){transform-origin:var(--radix-select-content-transform-origin)}.origin-bottom{transform-origin:bottom}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-0{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-4{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-0{--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-75{--tw-scale-x:75%;--tw-scale-y:75%;--tw-scale-z:75%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-100{--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.rotate-0{rotate:none}.rotate-90{rotate:90deg}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-bounce{animation:var(--animate-bounce)}.animate-in{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-col-resize{cursor:col-resize}.cursor-default{cursor:default}.cursor-pointer{cursor:pointer}.touch-none{touch-action:none}.resize{resize:both}.resize-none{resize:none}.scroll-my-1{scroll-margin-block:calc(var(--spacing)*1)}.list-inside{list-style-position:inside}.list-decimal{list-style-type:decimal}.list-disc{list-style-type:disc}.list-none{list-style-type:none}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.grid-cols-\[auto_auto_1fr_auto\]{grid-template-columns:auto auto 1fr auto}.grid-rows-\[auto_auto\]{grid-template-rows:auto auto}.flex-col{flex-direction:column}.flex-row-reverse{flex-direction:row-reverse}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-end{align-items:flex-end}.items-start{align-items:flex-start}.items-stretch{align-items:stretch}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.gap-0{gap:calc(var(--spacing)*0)}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}:where(.space-y-0\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}.gap-x-4{column-gap:calc(var(--spacing)*4)}:where(.space-x-1>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*1)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-x-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.gap-y-1{row-gap:calc(var(--spacing)*1)}.self-start{align-self:flex-start}.justify-self-end{justify-self:flex-end}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-full{border-radius:3.40282e38px!important}.rounded{border-radius:.25rem}.rounded-\[4px\]{border-radius:4px}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-none{border-radius:0}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-r-none{border-top-right-radius:0;border-bottom-right-radius:0}.\!border{border-style:var(--tw-border-style)!important;border-width:1px!important}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-r{border-right-style:var(--tw-border-style);border-right-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-l-0{border-left-style:var(--tw-border-style);border-left-width:0}.border-l-2{border-left-style:var(--tw-border-style);border-left-width:2px}.border-l-4{border-left-style:var(--tw-border-style);border-left-width:4px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-gray-600{border-color:var(--color-gray-600)!important}.border-\[\#643FB2\]{border-color:#643fb2}.border-\[\#643FB2\]\/20{border-color:#643fb233}.border-\[\#643FB2\]\/30{border-color:#643fb24d}.border-amber-200{border-color:var(--color-amber-200)}.border-blue-200{border-color:var(--color-blue-200)}.border-blue-300{border-color:var(--color-blue-300)}.border-blue-400{border-color:var(--color-blue-400)}.border-blue-500{border-color:var(--color-blue-500)}.border-border,.border-border\/50{border-color:var(--border)}@supports (color:color-mix(in lab,red,red)){.border-border\/50{border-color:color-mix(in oklab,var(--border)50%,transparent)}}.border-current\/30{border-color:currentColor}@supports (color:color-mix(in lab,red,red)){.border-current\/30{border-color:color-mix(in oklab,currentcolor 30%,transparent)}}.border-destructive\/30{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/30{border-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.border-foreground\/5{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/5{border-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.border-foreground\/10{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/10{border-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.border-foreground\/20{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/20{border-color:color-mix(in oklab,var(--foreground)20%,transparent)}}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-300{border-color:var(--color-gray-300)}.border-gray-400{border-color:var(--color-gray-400)}.border-gray-500\/20{border-color:#6a728233}@supports (color:color-mix(in lab,red,red)){.border-gray-500\/20{border-color:color-mix(in oklab,var(--color-gray-500)20%,transparent)}}.border-green-200{border-color:var(--color-green-200)}.border-green-500{border-color:var(--color-green-500)}.border-green-500\/20{border-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.border-green-500\/20{border-color:color-mix(in oklab,var(--color-green-500)20%,transparent)}}.border-green-500\/40{border-color:#00c75866}@supports (color:color-mix(in lab,red,red)){.border-green-500\/40{border-color:color-mix(in oklab,var(--color-green-500)40%,transparent)}}.border-green-600\/20{border-color:#00a54433}@supports (color:color-mix(in lab,red,red)){.border-green-600\/20{border-color:color-mix(in oklab,var(--color-green-600)20%,transparent)}}.border-input{border-color:var(--input)}.border-muted,.border-muted\/50{border-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.border-muted\/50{border-color:color-mix(in oklab,var(--muted)50%,transparent)}}.border-orange-200{border-color:var(--color-orange-200)}.border-orange-300{border-color:var(--color-orange-300)}.border-orange-500{border-color:var(--color-orange-500)}.border-orange-500\/20{border-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/20{border-color:color-mix(in oklab,var(--color-orange-500)20%,transparent)}}.border-orange-500\/40{border-color:#fe6e0066}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/40{border-color:color-mix(in oklab,var(--color-orange-500)40%,transparent)}}.border-orange-600\/20{border-color:#f0510033}@supports (color:color-mix(in lab,red,red)){.border-orange-600\/20{border-color:color-mix(in oklab,var(--color-orange-600)20%,transparent)}}.border-primary,.border-primary\/20{border-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.border-primary\/20{border-color:color-mix(in oklab,var(--primary)20%,transparent)}}.border-red-200{border-color:var(--color-red-200)}.border-red-500{border-color:var(--color-red-500)}.border-red-500\/20{border-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.border-red-500\/20{border-color:color-mix(in oklab,var(--color-red-500)20%,transparent)}}.border-transparent{border-color:#0000}.border-yellow-200{border-color:var(--color-yellow-200)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.bg-\[\#643FB2\]{background-color:#643fb2}.bg-\[\#643FB2\]\/10{background-color:#643fb21a}.bg-accent\/10{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--accent)10%,transparent)}}.bg-amber-50{background-color:var(--color-amber-50)}.bg-amber-500{background-color:var(--color-amber-500)}.bg-amber-500\/10{background-color:#f99c001a}@supports (color:color-mix(in lab,red,red)){.bg-amber-500\/10{background-color:color-mix(in oklab,var(--color-amber-500)10%,transparent)}}.bg-background,.bg-background\/50{background-color:var(--background)}@supports (color:color-mix(in lab,red,red)){.bg-background\/50{background-color:color-mix(in oklab,var(--background)50%,transparent)}}.bg-black{background-color:var(--color-black)}.bg-black\/50{background-color:#00000080}@supports (color:color-mix(in lab,red,red)){.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}}.bg-black\/60{background-color:#0009}@supports (color:color-mix(in lab,red,red)){.bg-black\/60{background-color:color-mix(in oklab,var(--color-black)60%,transparent)}}.bg-blue-50{background-color:var(--color-blue-50)}.bg-blue-50\/80{background-color:#eff6ffcc}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/80{background-color:color-mix(in oklab,var(--color-blue-50)80%,transparent)}}.bg-blue-50\/95{background-color:#eff6fff2}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/95{background-color:color-mix(in oklab,var(--color-blue-50)95%,transparent)}}.bg-blue-100{background-color:var(--color-blue-100)}.bg-blue-500{background-color:var(--color-blue-500)}.bg-blue-500\/5{background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/5{background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.bg-blue-500\/10{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/10{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.bg-blue-600{background-color:var(--color-blue-600)}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-current{background-color:currentColor}.bg-destructive,.bg-destructive\/10{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/10{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.bg-emerald-500{background-color:var(--color-emerald-500)}.bg-foreground\/5{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/5{background-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.bg-foreground\/10{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.bg-gray-50{background-color:var(--color-gray-50)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-200{background-color:var(--color-gray-200)}.bg-gray-400{background-color:var(--color-gray-400)}.bg-gray-500\/10{background-color:#6a72821a}@supports (color:color-mix(in lab,red,red)){.bg-gray-500\/10{background-color:color-mix(in oklab,var(--color-gray-500)10%,transparent)}}.bg-gray-900\/90{background-color:#101828e6}@supports (color:color-mix(in lab,red,red)){.bg-gray-900\/90{background-color:color-mix(in oklab,var(--color-gray-900)90%,transparent)}}.bg-green-50{background-color:var(--color-green-50)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-green-500\/5{background-color:#00c7580d}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/5{background-color:color-mix(in oklab,var(--color-green-500)5%,transparent)}}.bg-green-500\/10{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/10{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.bg-muted{background-color:var(--muted)}.bg-muted-foreground\/20{background-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.bg-muted-foreground\/20{background-color:color-mix(in oklab,var(--muted-foreground)20%,transparent)}}.bg-muted-foreground\/30{background-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.bg-muted-foreground\/30{background-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.bg-muted\/30{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/30{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.bg-muted\/50{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.bg-orange-50{background-color:var(--color-orange-50)}.bg-orange-50\/50{background-color:#fff7ed80}@supports (color:color-mix(in lab,red,red)){.bg-orange-50\/50{background-color:color-mix(in oklab,var(--color-orange-50)50%,transparent)}}.bg-orange-100{background-color:var(--color-orange-100)}.bg-orange-100\/50{background-color:#ffedd580}@supports (color:color-mix(in lab,red,red)){.bg-orange-100\/50{background-color:color-mix(in oklab,var(--color-orange-100)50%,transparent)}}.bg-orange-500{background-color:var(--color-orange-500)}.bg-orange-500\/5{background-color:#fe6e000d}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/5{background-color:color-mix(in oklab,var(--color-orange-500)5%,transparent)}}.bg-orange-500\/10{background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/10{background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.bg-popover{background-color:var(--popover)}.bg-primary,.bg-primary\/10{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/10{background-color:color-mix(in oklab,var(--primary)10%,transparent)}}.bg-primary\/30{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/30{background-color:color-mix(in oklab,var(--primary)30%,transparent)}}.bg-primary\/40{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/40{background-color:color-mix(in oklab,var(--primary)40%,transparent)}}.bg-purple-50{background-color:var(--color-purple-50)}.bg-purple-100{background-color:var(--color-purple-100)}.bg-purple-500{background-color:var(--color-purple-500)}.bg-red-50{background-color:var(--color-red-50)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-red-500\/10{background-color:#fb2c361a}@supports (color:color-mix(in lab,red,red)){.bg-red-500\/10{background-color:color-mix(in oklab,var(--color-red-500)10%,transparent)}}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white{background-color:var(--color-white)}.bg-white\/60{background-color:#fff9}@supports (color:color-mix(in lab,red,red)){.bg-white\/60{background-color:color-mix(in oklab,var(--color-white)60%,transparent)}}.bg-white\/90{background-color:#ffffffe6}@supports (color:color-mix(in lab,red,red)){.bg-white\/90{background-color:color-mix(in oklab,var(--color-white)90%,transparent)}}.bg-yellow-100{background-color:var(--color-yellow-100)}.fill-current{fill:currentColor}.object-cover{object-fit:cover}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-1\.5{padding:calc(var(--spacing)*1.5)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-8{padding:calc(var(--spacing)*8)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-1\.5{padding-inline:calc(var(--spacing)*1.5)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0{padding-block:calc(var(--spacing)*0)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.py-8{padding-block:calc(var(--spacing)*8)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-3{padding-top:calc(var(--spacing)*3)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pt-8{padding-top:calc(var(--spacing)*8)}.pt-9{padding-top:calc(var(--spacing)*9)}.pr-2{padding-right:calc(var(--spacing)*2)}.pr-4{padding-right:calc(var(--spacing)*4)}.pr-8{padding-right:calc(var(--spacing)*8)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-3{padding-bottom:calc(var(--spacing)*3)}.pb-4{padding-bottom:calc(var(--spacing)*4)}.pb-6{padding-bottom:calc(var(--spacing)*6)}.pl-2{padding-left:calc(var(--spacing)*2)}.pl-3{padding-left:calc(var(--spacing)*3)}.pl-4{padding-left:calc(var(--spacing)*4)}.pl-5{padding-left:calc(var(--spacing)*5)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.text-right{text-align:right}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[10px\]{font-size:10px}.leading-none{--tw-leading:1;line-height:1}.leading-relaxed{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.leading-tight{--tw-leading:var(--leading-tight);line-height:var(--leading-tight)}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-wide{--tw-tracking:var(--tracking-wide);letter-spacing:var(--tracking-wide)}.tracking-wider{--tw-tracking:var(--tracking-wider);letter-spacing:var(--tracking-wider)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.text-\[\#643FB2\]{color:#643fb2}.text-amber-500{color:var(--color-amber-500)}.text-amber-600{color:var(--color-amber-600)}.text-amber-600\/80{color:#dd7400cc}@supports (color:color-mix(in lab,red,red)){.text-amber-600\/80{color:color-mix(in oklab,var(--color-amber-600)80%,transparent)}}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-amber-900{color:var(--color-amber-900)}.text-blue-500{color:var(--color-blue-500)}.text-blue-500\/80{color:#3080ffcc}@supports (color:color-mix(in lab,red,red)){.text-blue-500\/80{color:color-mix(in oklab,var(--color-blue-500)80%,transparent)}}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-blue-800{color:var(--color-blue-800)}.text-blue-900{color:var(--color-blue-900)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive,.text-destructive\/70{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/70{color:color-mix(in oklab,var(--destructive)70%,transparent)}}.text-destructive\/90{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/90{color:color-mix(in oklab,var(--destructive)90%,transparent)}}.text-emerald-600{color:var(--color-emerald-600)}.text-foreground{color:var(--foreground)}.text-gray-300{color:var(--color-gray-300)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-600{color:var(--color-gray-600)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-green-600{color:var(--color-green-600)}.text-green-700{color:var(--color-green-700)}.text-green-800{color:var(--color-green-800)}.text-green-900{color:var(--color-green-900)}.text-muted-foreground,.text-muted-foreground\/60{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/60{color:color-mix(in oklab,var(--muted-foreground)60%,transparent)}}.text-muted-foreground\/70{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/70{color:color-mix(in oklab,var(--muted-foreground)70%,transparent)}}.text-muted-foreground\/80{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/80{color:color-mix(in oklab,var(--muted-foreground)80%,transparent)}}.text-orange-500{color:var(--color-orange-500)}.text-orange-600{color:var(--color-orange-600)}.text-orange-700{color:var(--color-orange-700)}.text-orange-800{color:var(--color-orange-800)}.text-orange-900{color:var(--color-orange-900)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-purple-500{color:var(--color-purple-500)}.text-purple-600{color:var(--color-purple-600)}.text-purple-800{color:var(--color-purple-800)}.text-red-400{color:var(--color-red-400)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-red-700{color:var(--color-red-700)}.text-red-800{color:var(--color-red-800)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.text-yellow-700{color:var(--color-yellow-700)}.capitalize{text-transform:capitalize}.lowercase{text-transform:lowercase}.uppercase{text-transform:uppercase}.italic{font-style:italic}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-xs{--tw-shadow:0 1px 2px 0 var(--tw-shadow-color,#0000000d);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-0{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-2{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[\#643FB2\]\/20{--tw-shadow-color:#643fb233}@supports (color:color-mix(in lab,red,red)){.shadow-\[\#643FB2\]\/20{--tw-shadow-color:color-mix(in oklab,oklab(47.4316% .069152 -.159147/.2) var(--tw-shadow-alpha),transparent)}}.shadow-green-500\/20{--tw-shadow-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.shadow-green-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-green-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-orange-500\/20{--tw-shadow-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.shadow-orange-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-orange-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-primary\/25{--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.shadow-primary\/25{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)25%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-red-500\/20{--tw-shadow-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.shadow-red-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-red-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.ring-blue-500{--tw-ring-color:var(--color-blue-500)}.ring-blue-500\/20{--tw-ring-color:#3080ff33}@supports (color:color-mix(in lab,red,red)){.ring-blue-500\/20{--tw-ring-color:color-mix(in oklab,var(--color-blue-500)20%,transparent)}}.ring-offset-2{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline-hidden{--tw-outline-style:none;outline-style:none}@media (forced-colors:active){.outline-hidden{outline-offset:2px;outline:2px solid #0000}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.drop-shadow-lg{--tw-drop-shadow-size:drop-shadow(0 4px 4px var(--tw-drop-shadow-color,#00000026));--tw-drop-shadow:drop-shadow(var(--drop-shadow-lg));filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter,display,visibility,content-visibility,overlay,pointer-events;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-\[color\,box-shadow\]{transition-property:color,box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-shadow{transition-property:box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-transform{transition-property:transform,translate,scale,rotate;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-none{transition-property:none}.duration-150{--tw-duration:.15s;transition-duration:.15s}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.ease-out{--tw-ease:var(--ease-out);transition-timing-function:var(--ease-out)}.fade-in-0{--tw-enter-opacity:0}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.zoom-in-95{--tw-enter-scale:.95}.\[animation-delay\:-0\.3s\]{animation-delay:-.3s}.\[animation-delay\:-0\.15s\]{animation-delay:-.15s}.fade-in{--tw-enter-opacity:0}.running{animation-play-state:running}.slide-in-from-bottom-2{--tw-enter-translate-y:calc(2*var(--spacing))}.group-open\:rotate-90:is(:where(.group):is([open],:popover-open,:open) *){rotate:90deg}.group-open\:rotate-180:is(:where(.group):is([open],:popover-open,:open) *){rotate:180deg}@media (hover:hover){.group-hover\:bg-primary:is(:where(.group):hover *){background-color:var(--primary)}.group-hover\:opacity-100:is(:where(.group):hover *){opacity:1}.group-hover\:shadow-md:is(:where(.group):hover *){--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)20%,transparent)var(--tw-shadow-alpha),transparent)}}}.group-data-\[disabled\=true\]\:pointer-events-none:is(:where(.group)[data-disabled=true] *){pointer-events:none}.group-data-\[disabled\=true\]\:opacity-50:is(:where(.group)[data-disabled=true] *){opacity:.5}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-50:is(:where(.peer):disabled~*){opacity:.5}.selection\:bg-primary ::selection{background-color:var(--primary)}.selection\:bg-primary::selection{background-color:var(--primary)}.selection\:text-primary-foreground ::selection{color:var(--primary-foreground)}.selection\:text-primary-foreground::selection{color:var(--primary-foreground)}.file\:inline-flex::file-selector-button{display:inline-flex}.file\:h-7::file-selector-button{height:calc(var(--spacing)*7)}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}.first\:mt-0:first-child{margin-top:calc(var(--spacing)*0)}.last\:border-0:last-child{border-style:var(--tw-border-style);border-width:0}.last\:border-r-0:last-child{border-right-style:var(--tw-border-style);border-right-width:0}.last\:border-b-0:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}@media (hover:hover){.hover\:scale-y-\[1\.15\]:hover{--tw-scale-y:1.15;scale:var(--tw-scale-x)var(--tw-scale-y)}.hover\:border-gray-300:hover{border-color:var(--color-gray-300)}.hover\:border-muted-foreground\/30:hover{border-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.hover\:border-muted-foreground\/30:hover{border-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.hover\:bg-accent:hover,.hover\:bg-accent\/50:hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-accent\/50:hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.hover\:bg-amber-100:hover{background-color:var(--color-amber-100)}.hover\:bg-blue-700:hover{background-color:var(--color-blue-700)}.hover\:bg-destructive\/80:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:bg-destructive\/90:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}}.hover\:bg-muted:hover,.hover\:bg-muted\/30:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/30:hover{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.hover\:bg-muted\/50:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.hover\:bg-muted\/70:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/70:hover{background-color:color-mix(in oklab,var(--muted)70%,transparent)}}.hover\:bg-orange-100:hover{background-color:var(--color-orange-100)}.hover\:bg-primary\/20:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}}.hover\:bg-primary\/80:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}}.hover\:bg-primary\/90:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}}.hover\:bg-red-50:hover{background-color:var(--color-red-50)}.hover\:bg-secondary\/80:hover{background-color:var(--secondary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}}.hover\:bg-white:hover{background-color:var(--color-white)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-destructive\/80:hover{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:text-destructive\/80:hover{color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:text-orange-900:hover{color:var(--color-orange-900)}.hover\:text-primary:hover{color:var(--primary)}.hover\:text-red-600:hover{color:var(--color-red-600)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-70:hover{opacity:.7}.hover\:opacity-100:hover{opacity:1}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.hover\:brightness-110:hover{--tw-brightness:brightness(110%);filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:border-ring:focus-visible{border-color:var(--ring)}.focus-visible\:ring-1:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-\[3px\]:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.focus-visible\:ring-ring:focus-visible,.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:color-mix(in oklab,var(--ring)50%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:ring-offset-background:focus-visible{--tw-ring-offset-color:var(--background)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.has-data-\[slot\=card-action\]\:grid-cols-\[1fr_auto\]:has([data-slot=card-action]){grid-template-columns:1fr auto}.has-\[\>svg\]\:px-2\.5:has(>svg){padding-inline:calc(var(--spacing)*2.5)}.has-\[\>svg\]\:px-3:has(>svg){padding-inline:calc(var(--spacing)*3)}.has-\[\>svg\]\:px-4:has(>svg){padding-inline:calc(var(--spacing)*4)}.aria-invalid\:border-destructive[aria-invalid=true]{border-color:var(--destructive)}.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[inset\]\:pl-8[data-inset]{padding-left:calc(var(--spacing)*8)}.data-\[placeholder\]\:text-muted-foreground[data-placeholder]{color:var(--muted-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:calc(2*var(--spacing)*-1)}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:calc(2*var(--spacing))}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:calc(2*var(--spacing)*-1)}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:calc(2*var(--spacing))}.data-\[size\=default\]\:h-9[data-size=default]{height:calc(var(--spacing)*9)}.data-\[size\=sm\]\:h-8[data-size=sm]{height:calc(var(--spacing)*8)}:is(.\*\:data-\[slot\=select-value\]\:line-clamp-1>*)[data-slot=select-value]{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}:is(.\*\:data-\[slot\=select-value\]\:flex>*)[data-slot=select-value]{display:flex}:is(.\*\:data-\[slot\=select-value\]\:items-center>*)[data-slot=select-value]{align-items:center}:is(.\*\:data-\[slot\=select-value\]\:gap-2>*)[data-slot=select-value]{gap:calc(var(--spacing)*2)}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:translate-x-4[data-state=checked]{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=checked\]\:border-primary[data-state=checked]{border-color:var(--primary)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{animation:exit var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=open\]\:animate-in[data-state=open]{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-accent-foreground[data-state=open]{color:var(--accent-foreground)}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[state\=unchecked\]\:translate-x-0[data-state=unchecked]{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=unchecked\]\:bg-input[data-state=unchecked]{background-color:var(--input)}.data-\[variant\=destructive\]\:text-destructive[data-variant=destructive]{color:var(--destructive)}.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.data-\[variant\=destructive\]\:focus\:text-destructive[data-variant=destructive]:focus{color:var(--destructive)}@media (min-width:40rem){.sm\:w-64{width:calc(var(--spacing)*64)}.sm\:max-w-lg{max-width:var(--container-lg)}.sm\:flex-none{flex:none}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}}@media (min-width:48rem){.md\:col-span-2{grid-column:span 2/span 2}.md\:col-start-2{grid-column-start:2}.md\:inline{display:inline}.md\:max-w-2xl{max-width:var(--container-2xl)}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}@media (min-width:64rem){.lg\:col-span-3{grid-column:span 3/span 3}.lg\:max-w-4xl{max-width:var(--container-4xl)}.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:items-center{align-items:center}.lg\:justify-between{justify-content:space-between}}@media (min-width:80rem){.xl\:col-span-2{grid-column:span 2/span 2}.xl\:col-span-4{grid-column:span 4/span 4}.xl\:max-w-5xl{max-width:var(--container-5xl)}.xl\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}}.dark\:scale-0:is(.dark *){--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:scale-100:is(.dark *){--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:-rotate-90:is(.dark *){rotate:-90deg}.dark\:rotate-0:is(.dark *){rotate:none}.dark\:\!border-gray-500:is(.dark *){border-color:var(--color-gray-500)!important}.dark\:\!border-gray-600:is(.dark *){border-color:var(--color-gray-600)!important}.dark\:border-\[\#8B5CF6\]:is(.dark *){border-color:#8b5cf6}.dark\:border-\[\#8B5CF6\]\/20:is(.dark *){border-color:#8b5cf633}.dark\:border-\[\#8B5CF6\]\/30:is(.dark *){border-color:#8b5cf64d}.dark\:border-amber-800:is(.dark *){border-color:var(--color-amber-800)}.dark\:border-amber-900:is(.dark *){border-color:var(--color-amber-900)}.dark\:border-blue-400:is(.dark *){border-color:var(--color-blue-400)}.dark\:border-blue-500:is(.dark *){border-color:var(--color-blue-500)}.dark\:border-blue-700:is(.dark *){border-color:var(--color-blue-700)}.dark\:border-blue-800:is(.dark *){border-color:var(--color-blue-800)}.dark\:border-gray-500:is(.dark *){border-color:var(--color-gray-500)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-400:is(.dark *){border-color:var(--color-green-400)}.dark\:border-green-800:is(.dark *){border-color:var(--color-green-800)}.dark\:border-input:is(.dark *){border-color:var(--input)}.dark\:border-orange-400:is(.dark *){border-color:var(--color-orange-400)}.dark\:border-orange-700:is(.dark *){border-color:var(--color-orange-700)}.dark\:border-orange-800:is(.dark *){border-color:var(--color-orange-800)}.dark\:border-red-400:is(.dark *){border-color:var(--color-red-400)}.dark\:border-red-800:is(.dark *){border-color:var(--color-red-800)}.dark\:\!bg-gray-800\/90:is(.dark *){background-color:#1e2939e6!important}@supports (color:color-mix(in lab,red,red)){.dark\:\!bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)!important}}.dark\:bg-\[\#8B5CF6\]:is(.dark *){background-color:#8b5cf6}.dark\:bg-\[\#8B5CF6\]\/10:is(.dark *){background-color:#8b5cf61a}.dark\:bg-amber-600:is(.dark *){background-color:var(--color-amber-600)}.dark\:bg-amber-950\/20:is(.dark *){background-color:#46190133}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)20%,transparent)}}.dark\:bg-amber-950\/50:is(.dark *){background-color:#46190180}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)50%,transparent)}}.dark\:bg-blue-500\/10:is(.dark *){background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:bg-blue-600:is(.dark *){background-color:var(--color-blue-600)}.dark\:bg-blue-900:is(.dark *){background-color:var(--color-blue-900)}.dark\:bg-blue-900\/20:is(.dark *){background-color:#1c398e33}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-900\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)20%,transparent)}}.dark\:bg-blue-950\/20:is(.dark *){background-color:#16245633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)20%,transparent)}}.dark\:bg-blue-950\/40:is(.dark *){background-color:#16245666}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/40:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)40%,transparent)}}.dark\:bg-blue-950\/50:is(.dark *){background-color:#16245680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)50%,transparent)}}.dark\:bg-blue-950\/95:is(.dark *){background-color:#162456f2}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/95:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)95%,transparent)}}.dark\:bg-card:is(.dark *){background-color:var(--card)}.dark\:bg-destructive\/60:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/60:is(.dark *){background-color:color-mix(in oklab,var(--destructive)60%,transparent)}}.dark\:bg-emerald-600:is(.dark *){background-color:var(--color-emerald-600)}.dark\:bg-foreground\/10:is(.dark *){background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-foreground\/10:is(.dark *){background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.dark\:bg-gray-500:is(.dark *){background-color:var(--color-gray-500)}.dark\:bg-gray-800:is(.dark *){background-color:var(--color-gray-800)}.dark\:bg-gray-800\/90:is(.dark *){background-color:#1e2939e6}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)}}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-gray-900\/30:is(.dark *){background-color:#1018284d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-900\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-900)30%,transparent)}}.dark\:bg-green-400:is(.dark *){background-color:var(--color-green-400)}.dark\:bg-green-500\/10:is(.dark *){background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.dark\:bg-green-900:is(.dark *){background-color:var(--color-green-900)}.dark\:bg-green-950:is(.dark *){background-color:var(--color-green-950)}.dark\:bg-green-950\/20:is(.dark *){background-color:#032e1533}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)20%,transparent)}}.dark\:bg-green-950\/50:is(.dark *){background-color:#032e1580}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)50%,transparent)}}.dark\:bg-input\/30:is(.dark *){background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-input\/30:is(.dark *){background-color:color-mix(in oklab,var(--input)30%,transparent)}}.dark\:bg-orange-400:is(.dark *){background-color:var(--color-orange-400)}.dark\:bg-orange-500\/10:is(.dark *){background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.dark\:bg-orange-600:is(.dark *){background-color:var(--color-orange-600)}.dark\:bg-orange-900:is(.dark *){background-color:var(--color-orange-900)}.dark\:bg-orange-950:is(.dark *){background-color:var(--color-orange-950)}.dark\:bg-orange-950\/20:is(.dark *){background-color:#44130633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)20%,transparent)}}.dark\:bg-orange-950\/30:is(.dark *){background-color:#4413064d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)30%,transparent)}}.dark\:bg-orange-950\/50:is(.dark *){background-color:#44130680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)50%,transparent)}}.dark\:bg-purple-600:is(.dark *){background-color:var(--color-purple-600)}.dark\:bg-purple-900:is(.dark *){background-color:var(--color-purple-900)}.dark\:bg-red-400:is(.dark *){background-color:var(--color-red-400)}.dark\:bg-red-900:is(.dark *){background-color:var(--color-red-900)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-red-950\/20:is(.dark *){background-color:#46080933}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)20%,transparent)}}.dark\:text-\[\#8B5CF6\]:is(.dark *){color:#8b5cf6}.dark\:text-amber-100:is(.dark *){color:var(--color-amber-100)}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-amber-300:is(.dark *){color:var(--color-amber-300)}.dark\:text-amber-400:is(.dark *){color:var(--color-amber-400)}.dark\:text-amber-400\/80:is(.dark *){color:#fcbb00cc}@supports (color:color-mix(in lab,red,red)){.dark\:text-amber-400\/80:is(.dark *){color:color-mix(in oklab,var(--color-amber-400)80%,transparent)}}.dark\:text-amber-500:is(.dark *){color:var(--color-amber-500)}.dark\:text-blue-100:is(.dark *){color:var(--color-blue-100)}.dark\:text-blue-200:is(.dark *){color:var(--color-blue-200)}.dark\:text-blue-300:is(.dark *){color:var(--color-blue-300)}.dark\:text-blue-400:is(.dark *){color:var(--color-blue-400)}.dark\:text-blue-400\/70:is(.dark *){color:#54a2ffb3}@supports (color:color-mix(in lab,red,red)){.dark\:text-blue-400\/70:is(.dark *){color:color-mix(in oklab,var(--color-blue-400)70%,transparent)}}.dark\:text-blue-500:is(.dark *){color:var(--color-blue-500)}.dark\:text-emerald-400:is(.dark *){color:var(--color-emerald-400)}.dark\:text-gray-100:is(.dark *){color:var(--color-gray-100)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-green-100:is(.dark *){color:var(--color-green-100)}.dark\:text-green-200:is(.dark *){color:var(--color-green-200)}.dark\:text-green-300:is(.dark *){color:var(--color-green-300)}.dark\:text-green-400:is(.dark *){color:var(--color-green-400)}.dark\:text-orange-100:is(.dark *){color:var(--color-orange-100)}.dark\:text-orange-200:is(.dark *){color:var(--color-orange-200)}.dark\:text-orange-300:is(.dark *){color:var(--color-orange-300)}.dark\:text-orange-400:is(.dark *){color:var(--color-orange-400)}.dark\:text-purple-200:is(.dark *){color:var(--color-purple-200)}.dark\:text-purple-400:is(.dark *){color:var(--color-purple-400)}.dark\:text-red-200:is(.dark *){color:var(--color-red-200)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-yellow-400:is(.dark *){color:var(--color-yellow-400)}.dark\:opacity-30:is(.dark *){opacity:.3}@media (hover:hover){.dark\:hover\:border-gray-600:is(.dark *):hover{border-color:var(--color-gray-600)}.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:#4619014d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-amber-950)30%,transparent)}}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--input)50%,transparent)}}.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:#44130666}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-orange-950\/40:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-orange-950)40%,transparent)}}.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:#82181a33}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-red-900)20%,transparent)}}.dark\:hover\:text-orange-200:is(.dark *):hover{color:var(--color-orange-200)}}.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:data-\[state\=checked\]\:bg-primary:is(.dark *)[data-state=checked]{background-color:var(--primary)}.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_svg\:not\(\[class\*\=\'size-\'\]\)\]\:size-4 svg:not([class*=size-]){width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\:not\(\[class\*\=\'text-\'\]\)\]\:text-muted-foreground svg:not([class*=text-]){color:var(--muted-foreground)}.\[\.border-b\]\:pb-6.border-b{padding-bottom:calc(var(--spacing)*6)}.\[\.border-t\]\:pt-6.border-t{padding-top:calc(var(--spacing)*6)}:is(.\*\:\[span\]\:last\:flex>*):is(span):last-child{display:flex}:is(.\*\:\[span\]\:last\:items-center>*):is(span):last-child{align-items:center}:is(.\*\:\[span\]\:last\:gap-2>*):is(span):last-child{gap:calc(var(--spacing)*2)}:is(.data-\[variant\=destructive\]\:\*\:\[svg\]\:\!text-destructive[data-variant=destructive]>*):is(svg){color:var(--destructive)!important}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}}@property --tw-animation-delay{syntax:"*";inherits:false;initial-value:0s}@property --tw-animation-direction{syntax:"*";inherits:false;initial-value:normal}@property --tw-animation-duration{syntax:"*";inherits:false}@property --tw-animation-fill-mode{syntax:"*";inherits:false;initial-value:none}@property --tw-animation-iteration-count{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-translate-y{syntax:"*";inherits:false;initial-value:0}:root{--radius:.625rem;--background:oklch(100% 0 0);--foreground:oklch(14.5% 0 0);--card:oklch(100% 0 0);--card-foreground:oklch(14.5% 0 0);--popover:oklch(100% 0 0);--popover-foreground:oklch(14.5% 0 0);--primary:oklch(48% .18 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(97% 0 0);--secondary-foreground:oklch(20.5% 0 0);--muted:oklch(97% 0 0);--muted-foreground:oklch(55.6% 0 0);--accent:oklch(97% 0 0);--accent-foreground:oklch(20.5% 0 0);--destructive:oklch(57.7% .245 27.325);--border:oklch(92.2% 0 0);--input:oklch(92.2% 0 0);--ring:oklch(70.8% 0 0);--chart-1:oklch(64.6% .222 41.116);--chart-2:oklch(60% .118 184.704);--chart-3:oklch(39.8% .07 227.392);--chart-4:oklch(82.8% .189 84.429);--chart-5:oklch(76.9% .188 70.08);--sidebar:oklch(98.5% 0 0);--sidebar-foreground:oklch(14.5% 0 0);--sidebar-primary:oklch(20.5% 0 0);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(97% 0 0);--sidebar-accent-foreground:oklch(20.5% 0 0);--sidebar-border:oklch(92.2% 0 0);--sidebar-ring:oklch(70.8% 0 0)}.dark{--background:oklch(14.5% 0 0);--foreground:oklch(98.5% 0 0);--card:oklch(20.5% 0 0);--card-foreground:oklch(98.5% 0 0);--popover:oklch(20.5% 0 0);--popover-foreground:oklch(98.5% 0 0);--primary:oklch(62% .2 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(26.9% 0 0);--secondary-foreground:oklch(98.5% 0 0);--muted:oklch(26.9% 0 0);--muted-foreground:oklch(70.8% 0 0);--accent:oklch(26.9% 0 0);--accent-foreground:oklch(98.5% 0 0);--destructive:oklch(70.4% .191 22.216);--border:oklch(100% 0 0/.1);--input:oklch(100% 0 0/.15);--ring:oklch(55.6% 0 0);--chart-1:oklch(48.8% .243 264.376);--chart-2:oklch(69.6% .17 162.48);--chart-3:oklch(76.9% .188 70.08);--chart-4:oklch(62.7% .265 303.9);--chart-5:oklch(64.5% .246 16.439);--sidebar:oklch(20.5% 0 0);--sidebar-foreground:oklch(98.5% 0 0);--sidebar-primary:oklch(48.8% .243 264.376);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(26.9% 0 0);--sidebar-accent-foreground:oklch(98.5% 0 0);--sidebar-border:oklch(100% 0 0/.1);--sidebar-ring:oklch(55.6% 0 0)}.workflow-chat-view .border-green-200{border-color:var(--color-emerald-200)}.workflow-chat-view .bg-green-50{background-color:var(--color-emerald-50)}.workflow-chat-view .bg-green-100{background-color:var(--color-emerald-100)}.workflow-chat-view .text-green-600{color:var(--color-emerald-600)}.workflow-chat-view .text-green-700{color:var(--color-emerald-700)}.workflow-chat-view .text-green-800{color:var(--color-emerald-800)}.highlight-attention{animation:1s ease-out highlight-flash}@keyframes highlight-flash{0%{background-color:#fb923c4d;transform:scale(1.02)}to{background-color:#0000;transform:scale(1)}}.hil-waiting-glow{animation:2s infinite pulse-glow;box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}@keyframes pulse-glow{0%,to{box-shadow:0 0 #fb923c66,inset 0 0 0 1px #fb923c33}50%{box-shadow:0 0 20px 5px #fb923c33,inset 0 0 0 2px #fb923c4d}}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0));filter:blur(var(--tw-enter-blur,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0));filter:blur(var(--tw-exit-blur,0))}}.react-flow{direction:ltr;--xy-edge-stroke-default: #b1b1b7;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #555;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(255, 255, 255, .5);--xy-minimap-background-color-default: #fff;--xy-minimap-mask-background-color-default: rgba(240, 240, 240, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #e2e2e2;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: transparent;--xy-background-pattern-dots-color-default: #91919a;--xy-background-pattern-lines-color-default: #eee;--xy-background-pattern-cross-color-default: #e2e2e2;background-color:var(--xy-background-color, var(--xy-background-color-default));--xy-node-color-default: inherit;--xy-node-border-default: 1px solid #1a192b;--xy-node-background-color-default: #fff;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(0, 0, 0, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #1a192b;--xy-node-border-radius-default: 3px;--xy-handle-background-color-default: #1a192b;--xy-handle-border-color-default: #fff;--xy-selection-background-color-default: rgba(0, 89, 220, .08);--xy-selection-border-default: 1px dotted rgba(0, 89, 220, .8);--xy-controls-button-background-color-default: #fefefe;--xy-controls-button-background-color-hover-default: #f4f4f4;--xy-controls-button-color-default: inherit;--xy-controls-button-color-hover-default: inherit;--xy-controls-button-border-color-default: #eee;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #ffffff;--xy-edge-label-color-default: inherit;--xy-resize-background-color-default: #3367d9}.react-flow.dark{--xy-edge-stroke-default: #3e3e3e;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #727272;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(150, 150, 150, .25);--xy-minimap-background-color-default: #141414;--xy-minimap-mask-background-color-default: rgba(60, 60, 60, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #2b2b2b;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: #141414;--xy-background-pattern-dots-color-default: #777;--xy-background-pattern-lines-color-default: #777;--xy-background-pattern-cross-color-default: #777;--xy-node-color-default: #f8f8f8;--xy-node-border-default: 1px solid #3c3c3c;--xy-node-background-color-default: #1e1e1e;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(255, 255, 255, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #999;--xy-handle-background-color-default: #bebebe;--xy-handle-border-color-default: #1e1e1e;--xy-selection-background-color-default: rgba(200, 200, 220, .08);--xy-selection-border-default: 1px dotted rgba(200, 200, 220, .8);--xy-controls-button-background-color-default: #2b2b2b;--xy-controls-button-background-color-hover-default: #3e3e3e;--xy-controls-button-color-default: #f8f8f8;--xy-controls-button-color-hover-default: #fff;--xy-controls-button-border-color-default: #5b5b5b;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #141414;--xy-edge-label-color-default: #f8f8f8}.react-flow__background{background-color:var(--xy-background-color-props, var(--xy-background-color, var(--xy-background-color-default)));pointer-events:none;z-index:-1}.react-flow__container{position:absolute;width:100%;height:100%;top:0;left:0}.react-flow__pane{z-index:1}.react-flow__pane.draggable{cursor:grab}.react-flow__pane.dragging{cursor:grabbing}.react-flow__pane.selection{cursor:pointer}.react-flow__viewport{transform-origin:0 0;z-index:2;pointer-events:none}.react-flow__renderer{z-index:4}.react-flow__selection{z-index:6}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible{outline:none}.react-flow__edge-path{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default));stroke-width:var(--xy-edge-stroke-width, var(--xy-edge-stroke-width-default));fill:none}.react-flow__connection-path{stroke:var(--xy-connectionline-stroke, var(--xy-connectionline-stroke-default));stroke-width:var(--xy-connectionline-stroke-width, var(--xy-connectionline-stroke-width-default));fill:none}.react-flow .react-flow__edges{position:absolute}.react-flow .react-flow__edges svg{overflow:visible;position:absolute;pointer-events:none}.react-flow__edge{pointer-events:visibleStroke}.react-flow__edge.selectable{cursor:pointer}.react-flow__edge.animated path{stroke-dasharray:5;animation:dashdraw .5s linear infinite}.react-flow__edge.animated path.react-flow__edge-interaction{stroke-dasharray:none;animation:none}.react-flow__edge.inactive{pointer-events:none}.react-flow__edge.selected,.react-flow__edge:focus,.react-flow__edge:focus-visible{outline:none}.react-flow__edge.selected .react-flow__edge-path,.react-flow__edge.selectable:focus .react-flow__edge-path,.react-flow__edge.selectable:focus-visible .react-flow__edge-path{stroke:var(--xy-edge-stroke-selected, var(--xy-edge-stroke-selected-default))}.react-flow__edge-textwrapper{pointer-events:all}.react-flow__edge .react-flow__edge-text{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__arrowhead polyline{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__arrowhead polyline.arrowclosed{fill:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__connection{pointer-events:none}.react-flow__connection .animated{stroke-dasharray:5;animation:dashdraw .5s linear infinite}svg.react-flow__connectionline{z-index:1001;overflow:visible;position:absolute}.react-flow__nodes{pointer-events:none;transform-origin:0 0}.react-flow__node{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;pointer-events:all;transform-origin:0 0;box-sizing:border-box;cursor:default}.react-flow__node.selectable{cursor:pointer}.react-flow__node.draggable{cursor:grab;pointer-events:all}.react-flow__node.draggable.dragging{cursor:grabbing}.react-flow__nodesselection{z-index:3;transform-origin:left top;pointer-events:none}.react-flow__nodesselection-rect{position:absolute;pointer-events:all;cursor:grab}.react-flow__handle{position:absolute;pointer-events:none;min-width:5px;min-height:5px;width:6px;height:6px;background-color:var(--xy-handle-background-color, var(--xy-handle-background-color-default));border:1px solid var(--xy-handle-border-color, var(--xy-handle-border-color-default));border-radius:100%}.react-flow__handle.connectingfrom{pointer-events:all}.react-flow__handle.connectionindicator{pointer-events:all;cursor:crosshair}.react-flow__handle-bottom{top:auto;left:50%;bottom:0;transform:translate(-50%,50%)}.react-flow__handle-top{top:0;left:50%;transform:translate(-50%,-50%)}.react-flow__handle-left{top:50%;left:0;transform:translate(-50%,-50%)}.react-flow__handle-right{top:50%;right:0;transform:translate(50%,-50%)}.react-flow__edgeupdater{cursor:move;pointer-events:all}.react-flow__pane.selection .react-flow__panel{pointer-events:none}.react-flow__panel{position:absolute;z-index:5;margin:15px}.react-flow__panel.top{top:0}.react-flow__panel.bottom{bottom:0}.react-flow__panel.top.center,.react-flow__panel.bottom.center{left:50%;transform:translate(-15px) translate(-50%)}.react-flow__panel.left{left:0}.react-flow__panel.right{right:0}.react-flow__panel.left.center,.react-flow__panel.right.center{top:50%;transform:translateY(-15px) translateY(-50%)}.react-flow__attribution{font-size:10px;background:var(--xy-attribution-background-color, var(--xy-attribution-background-color-default));padding:2px 3px;margin:0}.react-flow__attribution a{text-decoration:none;color:#999}@keyframes dashdraw{0%{stroke-dashoffset:10}}.react-flow__edgelabel-renderer{position:absolute;width:100%;height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;left:0;top:0}.react-flow__viewport-portal{position:absolute;width:100%;height:100%;left:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__minimap{background:var( --xy-minimap-background-color-props, var(--xy-minimap-background-color, var(--xy-minimap-background-color-default)) )}.react-flow__minimap-svg{display:block}.react-flow__minimap-mask{fill:var( --xy-minimap-mask-background-color-props, var(--xy-minimap-mask-background-color, var(--xy-minimap-mask-background-color-default)) );stroke:var( --xy-minimap-mask-stroke-color-props, var(--xy-minimap-mask-stroke-color, var(--xy-minimap-mask-stroke-color-default)) );stroke-width:var( --xy-minimap-mask-stroke-width-props, var(--xy-minimap-mask-stroke-width, var(--xy-minimap-mask-stroke-width-default)) )}.react-flow__minimap-node{fill:var( --xy-minimap-node-background-color-props, var(--xy-minimap-node-background-color, var(--xy-minimap-node-background-color-default)) );stroke:var( --xy-minimap-node-stroke-color-props, var(--xy-minimap-node-stroke-color, var(--xy-minimap-node-stroke-color-default)) );stroke-width:var( --xy-minimap-node-stroke-width-props, var(--xy-minimap-node-stroke-width, var(--xy-minimap-node-stroke-width-default)) )}.react-flow__background-pattern.dots{fill:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-dots-color-default)) )}.react-flow__background-pattern.lines{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-lines-color-default)) )}.react-flow__background-pattern.cross{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-cross-color-default)) )}.react-flow__controls{display:flex;flex-direction:column;box-shadow:var(--xy-controls-box-shadow, var(--xy-controls-box-shadow-default))}.react-flow__controls.horizontal{flex-direction:row}.react-flow__controls-button{display:flex;justify-content:center;align-items:center;height:26px;width:26px;padding:4px;border:none;background:var(--xy-controls-button-background-color, var(--xy-controls-button-background-color-default));border-bottom:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) );color:var( --xy-controls-button-color-props, var(--xy-controls-button-color, var(--xy-controls-button-color-default)) );cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__controls-button svg{width:100%;max-width:12px;max-height:12px;fill:currentColor}.react-flow__edge.updating .react-flow__edge-path{stroke:#777}.react-flow__edge-text{font-size:10px}.react-flow__node.selectable:focus,.react-flow__node.selectable:focus-visible{outline:none}.react-flow__node-input,.react-flow__node-default,.react-flow__node-output,.react-flow__node-group{padding:10px;border-radius:var(--xy-node-border-radius, var(--xy-node-border-radius-default));width:150px;font-size:12px;color:var(--xy-node-color, var(--xy-node-color-default));text-align:center;border:var(--xy-node-border, var(--xy-node-border-default));background-color:var(--xy-node-background-color, var(--xy-node-background-color-default))}.react-flow__node-input.selectable:hover,.react-flow__node-default.selectable:hover,.react-flow__node-output.selectable:hover,.react-flow__node-group.selectable:hover{box-shadow:var(--xy-node-boxshadow-hover, var(--xy-node-boxshadow-hover-default))}.react-flow__node-input.selectable.selected,.react-flow__node-input.selectable:focus,.react-flow__node-input.selectable:focus-visible,.react-flow__node-default.selectable.selected,.react-flow__node-default.selectable:focus,.react-flow__node-default.selectable:focus-visible,.react-flow__node-output.selectable.selected,.react-flow__node-output.selectable:focus,.react-flow__node-output.selectable:focus-visible,.react-flow__node-group.selectable.selected,.react-flow__node-group.selectable:focus,.react-flow__node-group.selectable:focus-visible{box-shadow:var(--xy-node-boxshadow-selected, var(--xy-node-boxshadow-selected-default))}.react-flow__node-group{background-color:var(--xy-node-group-background-color, var(--xy-node-group-background-color-default))}.react-flow__nodesselection-rect,.react-flow__selection{background:var(--xy-selection-background-color, var(--xy-selection-background-color-default));border:var(--xy-selection-border, var(--xy-selection-border-default))}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible,.react-flow__selection:focus,.react-flow__selection:focus-visible{outline:none}.react-flow__controls-button:hover{background:var( --xy-controls-button-background-color-hover-props, var(--xy-controls-button-background-color-hover, var(--xy-controls-button-background-color-hover-default)) );color:var( --xy-controls-button-color-hover-props, var(--xy-controls-button-color-hover, var(--xy-controls-button-color-hover-default)) )}.react-flow__controls-button:disabled{pointer-events:none}.react-flow__controls-button:disabled svg{fill-opacity:.4}.react-flow__controls-button:last-child{border-bottom:none}.react-flow__controls.horizontal .react-flow__controls-button{border-bottom:none;border-right:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) )}.react-flow__controls.horizontal .react-flow__controls-button:last-child{border-right:none}.react-flow__resize-control{position:absolute}.react-flow__resize-control.left,.react-flow__resize-control.right{cursor:ew-resize}.react-flow__resize-control.top,.react-flow__resize-control.bottom{cursor:ns-resize}.react-flow__resize-control.top.left,.react-flow__resize-control.bottom.right{cursor:nwse-resize}.react-flow__resize-control.bottom.left,.react-flow__resize-control.top.right{cursor:nesw-resize}.react-flow__resize-control.handle{width:5px;height:5px;border:1px solid #fff;border-radius:1px;background-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));translate:-50% -50%}.react-flow__resize-control.handle.left{left:0;top:50%}.react-flow__resize-control.handle.right{left:100%;top:50%}.react-flow__resize-control.handle.top{left:50%;top:0}.react-flow__resize-control.handle.bottom{left:50%;top:100%}.react-flow__resize-control.handle.top.left,.react-flow__resize-control.handle.bottom.left{left:0}.react-flow__resize-control.handle.top.right,.react-flow__resize-control.handle.bottom.right{left:100%}.react-flow__resize-control.line{border-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));border-width:0;border-style:solid}.react-flow__resize-control.line.left,.react-flow__resize-control.line.right{width:1px;transform:translate(-50%);top:0;height:100%}.react-flow__resize-control.line.left{left:0;border-left-width:1px}.react-flow__resize-control.line.right{left:100%;border-right-width:1px}.react-flow__resize-control.line.top,.react-flow__resize-control.line.bottom{height:1px;transform:translateY(-50%);left:0;width:100%}.react-flow__resize-control.line.top{top:0;border-top-width:1px}.react-flow__resize-control.line.bottom{border-bottom-width:1px;top:100%}.react-flow__edge-textbg{fill:var(--xy-edge-label-background-color, var(--xy-edge-label-background-color-default))}.react-flow__edge-text{fill:var(--xy-edge-label-color, var(--xy-edge-label-color-default))} diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.js b/python/packages/devui/agent_framework_devui/ui/assets/index.js index 44836a5b86..6ee0ee4c01 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.js +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.js @@ -1,4 +1,4 @@ -function WE(e,n){for(var r=0;ra[l]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const n=document.createElement("link").relList;if(n&&n.supports&&n.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))a(l);new MutationObserver(l=>{for(const c of l)if(c.type==="childList")for(const d of c.addedNodes)d.tagName==="LINK"&&d.rel==="modulepreload"&&a(d)}).observe(document,{childList:!0,subtree:!0});function r(l){const c={};return l.integrity&&(c.integrity=l.integrity),l.referrerPolicy&&(c.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?c.credentials="include":l.crossOrigin==="anonymous"?c.credentials="omit":c.credentials="same-origin",c}function a(l){if(l.ep)return;l.ep=!0;const c=r(l);fetch(l.href,c)}})();function Dp(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var ih={exports:{}},Fi={};/** +function KE(e,n){for(var r=0;ra[l]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const n=document.createElement("link").relList;if(n&&n.supports&&n.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))a(l);new MutationObserver(l=>{for(const c of l)if(c.type==="childList")for(const d of c.addedNodes)d.tagName==="LINK"&&d.rel==="modulepreload"&&a(d)}).observe(document,{childList:!0,subtree:!0});function r(l){const c={};return l.integrity&&(c.integrity=l.integrity),l.referrerPolicy&&(c.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?c.credentials="include":l.crossOrigin==="anonymous"?c.credentials="omit":c.credentials="same-origin",c}function a(l){if(l.ep)return;l.ep=!0;const c=r(l);fetch(l.href,c)}})();function Cp(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var eh={exports:{}},qi={};/** * @license React * react-jsx-runtime.production.js * @@ -6,7 +6,7 @@ function WE(e,n){for(var r=0;r>>1,C=k[$];if(0>>1;$l(V,I))Jl(ce,V)?(k[$]=ce,k[J]=I,$=J):(k[$]=V,k[Y]=I,$=Y);else if(Jl(ce,I))k[$]=ce,k[J]=I,$=J;else break e}}return L}function l(k,L){var I=k.sortIndex-L.sortIndex;return I!==0?I:k.id-L.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var c=performance;e.unstable_now=function(){return c.now()}}else{var d=Date,f=d.now();e.unstable_now=function(){return d.now()-f}}var m=[],h=[],g=1,y=null,x=3,b=!1,j=!1,N=!1,S=!1,_=typeof setTimeout=="function"?setTimeout:null,A=typeof clearTimeout=="function"?clearTimeout:null,E=typeof setImmediate<"u"?setImmediate:null;function M(k){for(var L=r(h);L!==null;){if(L.callback===null)a(h);else if(L.startTime<=k)a(h),L.sortIndex=L.expirationTime,n(m,L);else break;L=r(h)}}function R(k){if(N=!1,M(k),!j)if(r(m)!==null)j=!0,D||(D=!0,G());else{var L=r(h);L!==null&&B(R,L.startTime-k)}}var D=!1,O=-1,H=5,q=-1;function Z(){return S?!0:!(e.unstable_now()-qk&&Z());){var $=y.callback;if(typeof $=="function"){y.callback=null,x=y.priorityLevel;var C=$(y.expirationTime<=k);if(k=e.unstable_now(),typeof C=="function"){y.callback=C,M(k),L=!0;break t}y===r(m)&&a(m),M(k)}else a(m);y=r(m)}if(y!==null)L=!0;else{var P=r(h);P!==null&&B(R,P.startTime-k),L=!1}}break e}finally{y=null,x=I,b=!1}L=void 0}}finally{L?G():D=!1}}}var G;if(typeof E=="function")G=function(){E(Q)};else if(typeof MessageChannel<"u"){var ne=new MessageChannel,U=ne.port2;ne.port1.onmessage=Q,G=function(){U.postMessage(null)}}else G=function(){_(Q,0)};function B(k,L){O=_(function(){k(e.unstable_now())},L)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(k){k.callback=null},e.unstable_forceFrameRate=function(k){0>k||125$?(k.sortIndex=I,n(h,k),r(m)===null&&k===r(h)&&(N?(A(O),O=-1):N=!0,B(R,I-$))):(k.sortIndex=C,n(m,k),j||b||(j=!0,D||(D=!0,G()))),k},e.unstable_shouldYield=Z,e.unstable_wrapCallback=function(k){var L=x;return function(){var I=x;x=L;try{return k.apply(this,arguments)}finally{x=I}}}})(dh)),dh}var bv;function tC(){return bv||(bv=1,uh.exports=eC()),uh.exports}var fh={exports:{}},Jt={};/** + */var yv;function tC(){return yv||(yv=1,(function(e){function n(R,L){var I=R.length;R.push(L);e:for(;0>>1,C=R[P];if(0>>1;P<$;){var Y=2*(P+1)-1,V=R[Y],J=Y+1,ce=R[J];if(0>l(V,I))Jl(ce,V)?(R[P]=ce,R[J]=I,P=J):(R[P]=V,R[Y]=I,P=Y);else if(Jl(ce,I))R[P]=ce,R[J]=I,P=J;else break e}}return L}function l(R,L){var I=R.sortIndex-L.sortIndex;return I!==0?I:R.id-L.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var c=performance;e.unstable_now=function(){return c.now()}}else{var d=Date,f=d.now();e.unstable_now=function(){return d.now()-f}}var m=[],h=[],g=1,x=null,y=3,b=!1,j=!1,N=!1,S=!1,_=typeof setTimeout=="function"?setTimeout:null,A=typeof clearTimeout=="function"?clearTimeout:null,E=typeof setImmediate<"u"?setImmediate:null;function M(R){for(var L=r(h);L!==null;){if(L.callback===null)a(h);else if(L.startTime<=R)a(h),L.sortIndex=L.expirationTime,n(m,L);else break;L=r(h)}}function T(R){if(N=!1,M(R),!j)if(r(m)!==null)j=!0,D||(D=!0,G());else{var L=r(h);L!==null&&U(T,L.startTime-R)}}var D=!1,z=-1,H=5,q=-1;function X(){return S?!0:!(e.unstable_now()-qR&&X());){var P=x.callback;if(typeof P=="function"){x.callback=null,y=x.priorityLevel;var C=P(x.expirationTime<=R);if(R=e.unstable_now(),typeof C=="function"){x.callback=C,M(R),L=!0;break t}x===r(m)&&a(m),M(R)}else a(m);x=r(m)}if(x!==null)L=!0;else{var $=r(h);$!==null&&U(T,$.startTime-R),L=!1}}break e}finally{x=null,y=I,b=!1}L=void 0}}finally{L?G():D=!1}}}var G;if(typeof E=="function")G=function(){E(W)};else if(typeof MessageChannel<"u"){var ne=new MessageChannel,B=ne.port2;ne.port1.onmessage=W,G=function(){B.postMessage(null)}}else G=function(){_(W,0)};function U(R,L){z=_(function(){R(e.unstable_now())},L)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(R){R.callback=null},e.unstable_forceFrameRate=function(R){0>R||125P?(R.sortIndex=I,n(h,R),r(m)===null&&R===r(h)&&(N?(A(z),z=-1):N=!0,U(T,I-P))):(R.sortIndex=C,n(m,R),j||b||(j=!0,D||(D=!0,G()))),R},e.unstable_shouldYield=X,e.unstable_wrapCallback=function(R){var L=y;return function(){var I=y;y=L;try{return R.apply(this,arguments)}finally{y=I}}}})(rh)),rh}var vv;function nC(){return vv||(vv=1,sh.exports=tC()),sh.exports}var oh={exports:{}},Jt={};/** * @license React * react-dom.production.js * @@ -30,7 +30,7 @@ function WE(e,n){for(var r=0;r"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(n){console.error(n)}}return e(),fh.exports=nC(),fh.exports}/** + */var bv;function sC(){if(bv)return Jt;bv=1;var e=wl();function n(m){var h="https://react.dev/errors/"+m;if(1"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(n){console.error(n)}}return e(),oh.exports=sC(),oh.exports}/** * @license React * react-dom-client.production.js * @@ -38,20 +38,20 @@ function WE(e,n){for(var r=0;rC||(t.current=$[C],$[C]=null,C--)}function V(t,s){C++,$[C]=t.current,t.current=s}var J=P(null),ce=P(null),fe=P(null),ee=P(null);function ie(t,s){switch(V(fe,s),V(ce,t),V(J,null),s.nodeType){case 9:case 11:t=(t=s.documentElement)&&(t=t.namespaceURI)?Vy(t):0;break;default:if(t=s.tagName,s=s.namespaceURI)s=Vy(s),t=qy(s,t);else switch(t){case"svg":t=1;break;case"math":t=2;break;default:t=0}}Y(J),V(J,t)}function ge(){Y(J),Y(ce),Y(fe)}function Ee(t){t.memoizedState!==null&&V(ee,t);var s=J.current,i=qy(s,t.type);s!==i&&(V(ce,t),V(J,i))}function Ne(t){ce.current===t&&(Y(J),Y(ce)),ee.current===t&&(Y(ee),Hi._currentValue=I)}var ve=Object.prototype.hasOwnProperty,ze=e.unstable_scheduleCallback,re=e.unstable_cancelCallback,K=e.unstable_shouldYield,me=e.unstable_requestPaint,be=e.unstable_now,Ce=e.unstable_getCurrentPriorityLevel,we=e.unstable_ImmediatePriority,Me=e.unstable_UserBlockingPriority,je=e.unstable_NormalPriority,Se=e.unstable_LowPriority,Ke=e.unstable_IdlePriority,tt=e.log,Ue=e.unstable_setDisableYieldValue,_e=null,xe=null;function $e(t){if(typeof tt=="function"&&Ue(t),xe&&typeof xe.setStrictMode=="function")try{xe.setStrictMode(_e,t)}catch{}}var Ge=Math.clz32?Math.clz32:Co,qt=Math.log,rn=Math.LN2;function Co(t){return t>>>=0,t===0?32:31-(qt(t)/rn|0)|0}var es=256,bs=4194304;function pe(t){var s=t&42;if(s!==0)return s;switch(t&-t){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:return 128;case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return t&4194048;case 4194304:case 8388608:case 16777216:case 33554432:return t&62914560;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return t}}function Ae(t,s,i){var u=t.pendingLanes;if(u===0)return 0;var p=0,v=t.suspendedLanes,T=t.pingedLanes;t=t.warmLanes;var z=u&134217727;return z!==0?(u=z&~v,u!==0?p=pe(u):(T&=z,T!==0?p=pe(T):i||(i=z&~t,i!==0&&(p=pe(i))))):(z=u&~v,z!==0?p=pe(z):T!==0?p=pe(T):i||(i=u&~t,i!==0&&(p=pe(i)))),p===0?0:s!==0&&s!==p&&(s&v)===0&&(v=p&-p,i=s&-s,v>=i||v===32&&(i&4194048)!==0)?s:p}function Ie(t,s){return(t.pendingLanes&~(t.suspendedLanes&~t.pingedLanes)&s)===0}function Ot(t,s){switch(t){case 1:case 2:case 4:case 8:case 64:return s+250;case 16:case 32:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return s+5e3;case 4194304:case 8388608:case 16777216:case 33554432:return-1;case 67108864:case 134217728:case 268435456:case 536870912:case 1073741824:return-1;default:return-1}}function Ft(){var t=es;return es<<=1,(es&4194048)===0&&(es=256),t}function Pe(){var t=bs;return bs<<=1,(bs&62914560)===0&&(bs=4194304),t}function ye(t){for(var s=[],i=0;31>i;i++)s.push(t);return s}function dt(t,s){t.pendingLanes|=s,s!==268435456&&(t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0)}function Ct(t,s,i,u,p,v){var T=t.pendingLanes;t.pendingLanes=i,t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0,t.expiredLanes&=i,t.entangledLanes&=i,t.errorRecoveryDisabledLanes&=i,t.shellSuspendCounter=0;var z=t.entanglements,F=t.expirationTimes,se=t.hiddenUpdates;for(i=T&~i;0C||(t.current=P[C],P[C]=null,C--)}function V(t,s){C++,P[C]=t.current,t.current=s}var J=$(null),ce=$(null),fe=$(null),ee=$(null);function ie(t,s){switch(V(fe,s),V(ce,t),V(J,null),s.nodeType){case 9:case 11:t=(t=s.documentElement)&&(t=t.namespaceURI)?By(t):0;break;default:if(t=s.tagName,s=s.namespaceURI)s=By(s),t=Vy(s,t);else switch(t){case"svg":t=1;break;case"math":t=2;break;default:t=0}}Y(J),V(J,t)}function ge(){Y(J),Y(ce),Y(fe)}function Ee(t){t.memoizedState!==null&&V(ee,t);var s=J.current,i=Vy(s,t.type);s!==i&&(V(ce,t),V(J,i))}function Ne(t){ce.current===t&&(Y(J),Y(ce)),ee.current===t&&(Y(ee),Pi._currentValue=I)}var ve=Object.prototype.hasOwnProperty,ze=e.unstable_scheduleCallback,re=e.unstable_cancelCallback,Q=e.unstable_shouldYield,me=e.unstable_requestPaint,be=e.unstable_now,Ce=e.unstable_getCurrentPriorityLevel,we=e.unstable_ImmediatePriority,Me=e.unstable_UserBlockingPriority,je=e.unstable_NormalPriority,Se=e.unstable_LowPriority,Ke=e.unstable_IdlePriority,tt=e.log,Be=e.unstable_setDisableYieldValue,_e=null,xe=null;function $e(t){if(typeof tt=="function"&&Be(t),xe&&typeof xe.setStrictMode=="function")try{xe.setStrictMode(_e,t)}catch{}}var Ge=Math.clz32?Math.clz32:_o,qt=Math.log,rn=Math.LN2;function _o(t){return t>>>=0,t===0?32:31-(qt(t)/rn|0)|0}var Jn=256,vs=4194304;function pe(t){var s=t&42;if(s!==0)return s;switch(t&-t){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:return 128;case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return t&4194048;case 4194304:case 8388608:case 16777216:case 33554432:return t&62914560;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return t}}function Ae(t,s,i){var u=t.pendingLanes;if(u===0)return 0;var p=0,v=t.suspendedLanes,k=t.pingedLanes;t=t.warmLanes;var O=u&134217727;return O!==0?(u=O&~v,u!==0?p=pe(u):(k&=O,k!==0?p=pe(k):i||(i=O&~t,i!==0&&(p=pe(i))))):(O=u&~v,O!==0?p=pe(O):k!==0?p=pe(k):i||(i=u&~t,i!==0&&(p=pe(i)))),p===0?0:s!==0&&s!==p&&(s&v)===0&&(v=p&-p,i=s&-s,v>=i||v===32&&(i&4194048)!==0)?s:p}function Ie(t,s){return(t.pendingLanes&~(t.suspendedLanes&~t.pingedLanes)&s)===0}function Ot(t,s){switch(t){case 1:case 2:case 4:case 8:case 64:return s+250;case 16:case 32:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return s+5e3;case 4194304:case 8388608:case 16777216:case 33554432:return-1;case 67108864:case 134217728:case 268435456:case 536870912:case 1073741824:return-1;default:return-1}}function Ft(){var t=Jn;return Jn<<=1,(Jn&4194048)===0&&(Jn=256),t}function Pe(){var t=vs;return vs<<=1,(vs&62914560)===0&&(vs=4194304),t}function ye(t){for(var s=[],i=0;31>i;i++)s.push(t);return s}function dt(t,s){t.pendingLanes|=s,s!==268435456&&(t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0)}function _t(t,s,i,u,p,v){var k=t.pendingLanes;t.pendingLanes=i,t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0,t.expiredLanes&=i,t.entangledLanes&=i,t.errorRecoveryDisabledLanes&=i,t.shellSuspendCounter=0;var O=t.entanglements,F=t.expirationTimes,se=t.hiddenUpdates;for(i=k&~i;0)":-1p||F[u]!==se[p]){var ue=` -`+F[u].replace(" at new "," at ");return t.displayName&&ue.includes("")&&(ue=ue.replace("",t.displayName)),ue}while(1<=u&&0<=p);break}}}finally{Za=!1,Error.prepareStackTrace=i}return(i=t?t.displayName||t.name:"")?Ss(i):""}function nf(t){switch(t.tag){case 26:case 27:case 5:return Ss(t.type);case 16:return Ss("Lazy");case 13:return Ss("Suspense");case 19:return Ss("SuspenseList");case 0:case 15:return Wa(t.type,!1);case 11:return Wa(t.type.render,!1);case 1:return Wa(t.type,!0);case 31:return Ss("Activity");default:return""}}function ql(t){try{var s="";do s+=nf(t),t=t.return;while(t);return s}catch(i){return` +`+F[u].replace(" at new "," at ");return t.displayName&&ue.includes("")&&(ue=ue.replace("",t.displayName)),ue}while(1<=u&&0<=p);break}}}finally{Xa=!1,Error.prepareStackTrace=i}return(i=t?t.displayName||t.name:"")?js(i):""}function Zd(t){switch(t.tag){case 26:case 27:case 5:return js(t.type);case 16:return js("Lazy");case 13:return js("Suspense");case 19:return js("SuspenseList");case 0:case 15:return Za(t.type,!1);case 11:return Za(t.type.render,!1);case 1:return Za(t.type,!0);case 31:return js("Activity");default:return""}}function Vl(t){try{var s="";do s+=Zd(t),t=t.return;while(t);return s}catch(i){return` Error generating stack: `+i.message+` -`+i.stack}}function on(t){switch(typeof t){case"bigint":case"boolean":case"number":case"string":case"undefined":return t;case"object":return t;default:return""}}function Fl(t){var s=t.type;return(t=t.nodeName)&&t.toLowerCase()==="input"&&(s==="checkbox"||s==="radio")}function sf(t){var s=Fl(t)?"checked":"value",i=Object.getOwnPropertyDescriptor(t.constructor.prototype,s),u=""+t[s];if(!t.hasOwnProperty(s)&&typeof i<"u"&&typeof i.get=="function"&&typeof i.set=="function"){var p=i.get,v=i.set;return Object.defineProperty(t,s,{configurable:!0,get:function(){return p.call(this)},set:function(T){u=""+T,v.call(this,T)}}),Object.defineProperty(t,s,{enumerable:i.enumerable}),{getValue:function(){return u},setValue:function(T){u=""+T},stopTracking:function(){t._valueTracker=null,delete t[s]}}}}function Ao(t){t._valueTracker||(t._valueTracker=sf(t))}function Ka(t){if(!t)return!1;var s=t._valueTracker;if(!s)return!0;var i=s.getValue(),u="";return t&&(u=Fl(t)?t.checked?"true":"false":t.value),t=u,t!==i?(s.setValue(t),!0):!1}function Mo(t){if(t=t||(typeof document<"u"?document:void 0),typeof t>"u")return null;try{return t.activeElement||t.body}catch{return t.body}}var rf=/[\n"\\]/g;function an(t){return t.replace(rf,function(s){return"\\"+s.charCodeAt(0).toString(16)+" "})}function Fr(t,s,i,u,p,v,T,z){t.name="",T!=null&&typeof T!="function"&&typeof T!="symbol"&&typeof T!="boolean"?t.type=T:t.removeAttribute("type"),s!=null?T==="number"?(s===0&&t.value===""||t.value!=s)&&(t.value=""+on(s)):t.value!==""+on(s)&&(t.value=""+on(s)):T!=="submit"&&T!=="reset"||t.removeAttribute("value"),s!=null?Qa(t,T,on(s)):i!=null?Qa(t,T,on(i)):u!=null&&t.removeAttribute("value"),p==null&&v!=null&&(t.defaultChecked=!!v),p!=null&&(t.checked=p&&typeof p!="function"&&typeof p!="symbol"),z!=null&&typeof z!="function"&&typeof z!="symbol"&&typeof z!="boolean"?t.name=""+on(z):t.removeAttribute("name")}function Yl(t,s,i,u,p,v,T,z){if(v!=null&&typeof v!="function"&&typeof v!="symbol"&&typeof v!="boolean"&&(t.type=v),s!=null||i!=null){if(!(v!=="submit"&&v!=="reset"||s!=null))return;i=i!=null?""+on(i):"",s=s!=null?""+on(s):i,z||s===t.value||(t.value=s),t.defaultValue=s}u=u??p,u=typeof u!="function"&&typeof u!="symbol"&&!!u,t.checked=z?t.checked:!!u,t.defaultChecked=!!u,T!=null&&typeof T!="function"&&typeof T!="symbol"&&typeof T!="boolean"&&(t.name=T)}function Qa(t,s,i){s==="number"&&Mo(t.ownerDocument)===t||t.defaultValue===""+i||(t.defaultValue=""+i)}function _s(t,s,i,u){if(t=t.options,s){s={};for(var p=0;p"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),uf=!1;if(Es)try{var ei={};Object.defineProperty(ei,"passive",{get:function(){uf=!0}}),window.addEventListener("test",ei,ei),window.removeEventListener("test",ei,ei)}catch{uf=!1}var tr=null,df=null,Xl=null;function Gg(){if(Xl)return Xl;var t,s=df,i=s.length,u,p="value"in tr?tr.value:tr.textContent,v=p.length;for(t=0;t=si),Jg=" ",ex=!1;function tx(t,s){switch(t){case"keyup":return b_.indexOf(s.keyCode)!==-1;case"keydown":return s.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function nx(t){return t=t.detail,typeof t=="object"&&"data"in t?t.data:null}var zo=!1;function N_(t,s){switch(t){case"compositionend":return nx(s);case"keypress":return s.which!==32?null:(ex=!0,Jg);case"textInput":return t=s.data,t===Jg&&ex?null:t;default:return null}}function j_(t,s){if(zo)return t==="compositionend"||!gf&&tx(t,s)?(t=Gg(),Xl=df=tr=null,zo=!1,t):null;switch(t){case"paste":return null;case"keypress":if(!(s.ctrlKey||s.altKey||s.metaKey)||s.ctrlKey&&s.altKey){if(s.char&&1=s)return{node:i,offset:s-t};t=u}e:{for(;i;){if(i.nextSibling){i=i.nextSibling;break e}i=i.parentNode}i=void 0}i=ux(i)}}function fx(t,s){return t&&s?t===s?!0:t&&t.nodeType===3?!1:s&&s.nodeType===3?fx(t,s.parentNode):"contains"in t?t.contains(s):t.compareDocumentPosition?!!(t.compareDocumentPosition(s)&16):!1:!1}function mx(t){t=t!=null&&t.ownerDocument!=null&&t.ownerDocument.defaultView!=null?t.ownerDocument.defaultView:window;for(var s=Mo(t.document);s instanceof t.HTMLIFrameElement;){try{var i=typeof s.contentWindow.location.href=="string"}catch{i=!1}if(i)t=s.contentWindow;else break;s=Mo(t.document)}return s}function vf(t){var s=t&&t.nodeName&&t.nodeName.toLowerCase();return s&&(s==="input"&&(t.type==="text"||t.type==="search"||t.type==="tel"||t.type==="url"||t.type==="password")||s==="textarea"||t.contentEditable==="true")}var M_=Es&&"documentMode"in document&&11>=document.documentMode,Io=null,bf=null,ii=null,wf=!1;function hx(t,s,i){var u=i.window===i?i.document:i.nodeType===9?i:i.ownerDocument;wf||Io==null||Io!==Mo(u)||(u=Io,"selectionStart"in u&&vf(u)?u={start:u.selectionStart,end:u.selectionEnd}:(u=(u.ownerDocument&&u.ownerDocument.defaultView||window).getSelection(),u={anchorNode:u.anchorNode,anchorOffset:u.anchorOffset,focusNode:u.focusNode,focusOffset:u.focusOffset}),ii&&ai(ii,u)||(ii=u,u=$c(bf,"onSelect"),0>=T,p-=T,ks=1<<32-Ge(s)+p|i<v?v:8;var T=k.T,z={};k.T=z,am(t,!1,s,i);try{var F=p(),se=k.S;if(se!==null&&se(z,F),F!==null&&typeof F=="object"&&typeof F.then=="function"){var ue=H_(F,u);Ni(t,s,ue,vn(t))}else Ni(t,s,u,vn(t))}catch(he){Ni(t,s,{then:function(){},status:"rejected",reason:he},vn())}finally{L.p=v,k.T=T}}function F_(){}function rm(t,s,i,u){if(t.tag!==5)throw Error(a(476));var p=p0(t).queue;h0(t,p,s,I,i===null?F_:function(){return g0(t),i(u)})}function p0(t){var s=t.memoizedState;if(s!==null)return s;s={memoizedState:I,baseState:I,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:Rs,lastRenderedState:I},next:null};var i={};return s.next={memoizedState:i,baseState:i,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:Rs,lastRenderedState:i},next:null},t.memoizedState=s,t=t.alternate,t!==null&&(t.memoizedState=s),s}function g0(t){var s=p0(t).next.queue;Ni(t,s,{},vn())}function om(){return Qt(Hi)}function x0(){return Rt().memoizedState}function y0(){return Rt().memoizedState}function Y_(t){for(var s=t.return;s!==null;){switch(s.tag){case 24:case 3:var i=vn();t=rr(i);var u=or(s,t,i);u!==null&&(bn(u,s,i),gi(u,s,i)),s={cache:zf()},t.payload=s;return}s=s.return}}function G_(t,s,i){var u=vn();i={lane:u,revertLane:0,action:i,hasEagerState:!1,eagerState:null,next:null},yc(t)?b0(s,i):(i=_f(t,s,i,u),i!==null&&(bn(i,t,u),w0(i,s,u)))}function v0(t,s,i){var u=vn();Ni(t,s,i,u)}function Ni(t,s,i,u){var p={lane:u,revertLane:0,action:i,hasEagerState:!1,eagerState:null,next:null};if(yc(t))b0(s,p);else{var v=t.alternate;if(t.lanes===0&&(v===null||v.lanes===0)&&(v=s.lastRenderedReducer,v!==null))try{var T=s.lastRenderedState,z=v(T,i);if(p.hasEagerState=!0,p.eagerState=z,hn(z,T))return tc(t,s,p,0),vt===null&&ec(),!1}catch{}finally{}if(i=_f(t,s,p,u),i!==null)return bn(i,t,u),w0(i,s,u),!0}return!1}function am(t,s,i,u){if(u={lane:2,revertLane:$m(),action:u,hasEagerState:!1,eagerState:null,next:null},yc(t)){if(s)throw Error(a(479))}else s=_f(t,i,u,2),s!==null&&bn(s,t,2)}function yc(t){var s=t.alternate;return t===Qe||s!==null&&s===Qe}function b0(t,s){Yo=fc=!0;var i=t.pending;i===null?s.next=s:(s.next=i.next,i.next=s),t.pending=s}function w0(t,s,i){if((i&4194048)!==0){var u=s.lanes;u&=t.pendingLanes,i|=u,s.lanes=i,kn(t,i)}}var vc={readContext:Qt,use:hc,useCallback:kt,useContext:kt,useEffect:kt,useImperativeHandle:kt,useLayoutEffect:kt,useInsertionEffect:kt,useMemo:kt,useReducer:kt,useRef:kt,useState:kt,useDebugValue:kt,useDeferredValue:kt,useTransition:kt,useSyncExternalStore:kt,useId:kt,useHostTransitionStatus:kt,useFormState:kt,useActionState:kt,useOptimistic:kt,useMemoCache:kt,useCacheRefresh:kt},N0={readContext:Qt,use:hc,useCallback:function(t,s){return cn().memoizedState=[t,s===void 0?null:s],t},useContext:Qt,useEffect:o0,useImperativeHandle:function(t,s,i){i=i!=null?i.concat([t]):null,xc(4194308,4,c0.bind(null,s,t),i)},useLayoutEffect:function(t,s){return xc(4194308,4,t,s)},useInsertionEffect:function(t,s){xc(4,2,t,s)},useMemo:function(t,s){var i=cn();s=s===void 0?null:s;var u=t();if(so){$e(!0);try{t()}finally{$e(!1)}}return i.memoizedState=[u,s],u},useReducer:function(t,s,i){var u=cn();if(i!==void 0){var p=i(s);if(so){$e(!0);try{i(s)}finally{$e(!1)}}}else p=s;return u.memoizedState=u.baseState=p,t={pending:null,lanes:0,dispatch:null,lastRenderedReducer:t,lastRenderedState:p},u.queue=t,t=t.dispatch=G_.bind(null,Qe,t),[u.memoizedState,t]},useRef:function(t){var s=cn();return t={current:t},s.memoizedState=t},useState:function(t){t=em(t);var s=t.queue,i=v0.bind(null,Qe,s);return s.dispatch=i,[t.memoizedState,i]},useDebugValue:nm,useDeferredValue:function(t,s){var i=cn();return sm(i,t,s)},useTransition:function(){var t=em(!1);return t=h0.bind(null,Qe,t.queue,!0,!1),cn().memoizedState=t,[!1,t]},useSyncExternalStore:function(t,s,i){var u=Qe,p=cn();if(ct){if(i===void 0)throw Error(a(407));i=i()}else{if(i=s(),vt===null)throw Error(a(349));(it&124)!==0||Vx(u,s,i)}p.memoizedState=i;var v={value:i,getSnapshot:s};return p.queue=v,o0(Fx.bind(null,u,v,t),[t]),u.flags|=2048,Xo(9,gc(),qx.bind(null,u,v,i,s),null),i},useId:function(){var t=cn(),s=vt.identifierPrefix;if(ct){var i=Ts,u=ks;i=(u&~(1<<32-Ge(u)-1)).toString(32)+i,s="«"+s+"R"+i,i=mc++,0qe?(Vt=He,He=null):Vt=He.sibling;var lt=oe(W,He,te[qe],de);if(lt===null){He===null&&(He=Vt);break}t&&He&<.alternate===null&&s(W,He),X=v(lt,X,qe),et===null?Re=lt:et.sibling=lt,et=lt,He=Vt}if(qe===te.length)return i(W,He),ct&&Kr(W,qe),Re;if(He===null){for(;qeqe?(Vt=He,He=null):Vt=He.sibling;var Nr=oe(W,He,lt.value,de);if(Nr===null){He===null&&(He=Vt);break}t&&He&&Nr.alternate===null&&s(W,He),X=v(Nr,X,qe),et===null?Re=Nr:et.sibling=Nr,et=Nr,He=Vt}if(lt.done)return i(W,He),ct&&Kr(W,qe),Re;if(He===null){for(;!lt.done;qe++,lt=te.next())lt=he(W,lt.value,de),lt!==null&&(X=v(lt,X,qe),et===null?Re=lt:et.sibling=lt,et=lt);return ct&&Kr(W,qe),Re}for(He=u(He);!lt.done;qe++,lt=te.next())lt=ae(He,W,qe,lt.value,de),lt!==null&&(t&<.alternate!==null&&He.delete(lt.key===null?qe:lt.key),X=v(lt,X,qe),et===null?Re=lt:et.sibling=lt,et=lt);return t&&He.forEach(function(ZE){return s(W,ZE)}),ct&&Kr(W,qe),Re}function gt(W,X,te,de){if(typeof te=="object"&&te!==null&&te.type===j&&te.key===null&&(te=te.props.children),typeof te=="object"&&te!==null){switch(te.$$typeof){case x:e:{for(var Re=te.key;X!==null;){if(X.key===Re){if(Re=te.type,Re===j){if(X.tag===7){i(W,X.sibling),de=p(X,te.props.children),de.return=W,W=de;break e}}else if(X.elementType===Re||typeof Re=="object"&&Re!==null&&Re.$$typeof===H&&S0(Re)===X.type){i(W,X.sibling),de=p(X,te.props),Si(de,te),de.return=W,W=de;break e}i(W,X);break}else s(W,X);X=X.sibling}te.type===j?(de=Zr(te.props.children,W.mode,de,te.key),de.return=W,W=de):(de=sc(te.type,te.key,te.props,null,W.mode,de),Si(de,te),de.return=W,W=de)}return T(W);case b:e:{for(Re=te.key;X!==null;){if(X.key===Re)if(X.tag===4&&X.stateNode.containerInfo===te.containerInfo&&X.stateNode.implementation===te.implementation){i(W,X.sibling),de=p(X,te.children||[]),de.return=W,W=de;break e}else{i(W,X);break}else s(W,X);X=X.sibling}de=kf(te,W.mode,de),de.return=W,W=de}return T(W);case H:return Re=te._init,te=Re(te._payload),gt(W,X,te,de)}if(B(te))return Fe(W,X,te,de);if(G(te)){if(Re=G(te),typeof Re!="function")throw Error(a(150));return te=Re.call(te),Ve(W,X,te,de)}if(typeof te.then=="function")return gt(W,X,bc(te),de);if(te.$$typeof===E)return gt(W,X,ic(W,te),de);wc(W,te)}return typeof te=="string"&&te!==""||typeof te=="number"||typeof te=="bigint"?(te=""+te,X!==null&&X.tag===6?(i(W,X.sibling),de=p(X,te),de.return=W,W=de):(i(W,X),de=Cf(te,W.mode,de),de.return=W,W=de),T(W)):i(W,X)}return function(W,X,te,de){try{ji=0;var Re=gt(W,X,te,de);return Zo=null,Re}catch(He){if(He===hi||He===cc)throw He;var et=pn(29,He,null,W.mode);return et.lanes=de,et.return=W,et}finally{}}}var Wo=_0(!0),E0=_0(!1),Dn=P(null),ss=null;function ir(t){var s=t.alternate;V(It,It.current&1),V(Dn,t),ss===null&&(s===null||Fo.current!==null||s.memoizedState!==null)&&(ss=t)}function C0(t){if(t.tag===22){if(V(It,It.current),V(Dn,t),ss===null){var s=t.alternate;s!==null&&s.memoizedState!==null&&(ss=t)}}else lr()}function lr(){V(It,It.current),V(Dn,Dn.current)}function Ds(t){Y(Dn),ss===t&&(ss=null),Y(It)}var It=P(0);function Nc(t){for(var s=t;s!==null;){if(s.tag===13){var i=s.memoizedState;if(i!==null&&(i=i.dehydrated,i===null||i.data==="$?"||Wm(i)))return s}else if(s.tag===19&&s.memoizedProps.revealOrder!==void 0){if((s.flags&128)!==0)return s}else if(s.child!==null){s.child.return=s,s=s.child;continue}if(s===t)break;for(;s.sibling===null;){if(s.return===null||s.return===t)return null;s=s.return}s.sibling.return=s.return,s=s.sibling}return null}function im(t,s,i,u){s=t.memoizedState,i=i(u,s),i=i==null?s:g({},s,i),t.memoizedState=i,t.lanes===0&&(t.updateQueue.baseState=i)}var lm={enqueueSetState:function(t,s,i){t=t._reactInternals;var u=vn(),p=rr(u);p.payload=s,i!=null&&(p.callback=i),s=or(t,p,u),s!==null&&(bn(s,t,u),gi(s,t,u))},enqueueReplaceState:function(t,s,i){t=t._reactInternals;var u=vn(),p=rr(u);p.tag=1,p.payload=s,i!=null&&(p.callback=i),s=or(t,p,u),s!==null&&(bn(s,t,u),gi(s,t,u))},enqueueForceUpdate:function(t,s){t=t._reactInternals;var i=vn(),u=rr(i);u.tag=2,s!=null&&(u.callback=s),s=or(t,u,i),s!==null&&(bn(s,t,i),gi(s,t,i))}};function k0(t,s,i,u,p,v,T){return t=t.stateNode,typeof t.shouldComponentUpdate=="function"?t.shouldComponentUpdate(u,v,T):s.prototype&&s.prototype.isPureReactComponent?!ai(i,u)||!ai(p,v):!0}function T0(t,s,i,u){t=s.state,typeof s.componentWillReceiveProps=="function"&&s.componentWillReceiveProps(i,u),typeof s.UNSAFE_componentWillReceiveProps=="function"&&s.UNSAFE_componentWillReceiveProps(i,u),s.state!==t&&lm.enqueueReplaceState(s,s.state,null)}function ro(t,s){var i=s;if("ref"in s){i={};for(var u in s)u!=="ref"&&(i[u]=s[u])}if(t=t.defaultProps){i===s&&(i=g({},i));for(var p in t)i[p]===void 0&&(i[p]=t[p])}return i}var jc=typeof reportError=="function"?reportError:function(t){if(typeof window=="object"&&typeof window.ErrorEvent=="function"){var s=new window.ErrorEvent("error",{bubbles:!0,cancelable:!0,message:typeof t=="object"&&t!==null&&typeof t.message=="string"?String(t.message):String(t),error:t});if(!window.dispatchEvent(s))return}else if(typeof process=="object"&&typeof process.emit=="function"){process.emit("uncaughtException",t);return}console.error(t)};function A0(t){jc(t)}function M0(t){console.error(t)}function R0(t){jc(t)}function Sc(t,s){try{var i=t.onUncaughtError;i(s.value,{componentStack:s.stack})}catch(u){setTimeout(function(){throw u})}}function D0(t,s,i){try{var u=t.onCaughtError;u(i.value,{componentStack:i.stack,errorBoundary:s.tag===1?s.stateNode:null})}catch(p){setTimeout(function(){throw p})}}function cm(t,s,i){return i=rr(i),i.tag=3,i.payload={element:null},i.callback=function(){Sc(t,s)},i}function O0(t){return t=rr(t),t.tag=3,t}function z0(t,s,i,u){var p=i.type.getDerivedStateFromError;if(typeof p=="function"){var v=u.value;t.payload=function(){return p(v)},t.callback=function(){D0(s,i,u)}}var T=i.stateNode;T!==null&&typeof T.componentDidCatch=="function"&&(t.callback=function(){D0(s,i,u),typeof p!="function"&&(hr===null?hr=new Set([this]):hr.add(this));var z=u.stack;this.componentDidCatch(u.value,{componentStack:z!==null?z:""})})}function Z_(t,s,i,u,p){if(i.flags|=32768,u!==null&&typeof u=="object"&&typeof u.then=="function"){if(s=i.alternate,s!==null&&di(s,i,p,!0),i=Dn.current,i!==null){switch(i.tag){case 13:return ss===null?Dm():i.alternate===null&&_t===0&&(_t=3),i.flags&=-257,i.flags|=65536,i.lanes=p,u===$f?i.flags|=16384:(s=i.updateQueue,s===null?i.updateQueue=new Set([u]):s.add(u),zm(t,u,p)),!1;case 22:return i.flags|=65536,u===$f?i.flags|=16384:(s=i.updateQueue,s===null?(s={transitions:null,markerInstances:null,retryQueue:new Set([u])},i.updateQueue=s):(i=s.retryQueue,i===null?s.retryQueue=new Set([u]):i.add(u)),zm(t,u,p)),!1}throw Error(a(435,i.tag))}return zm(t,u,p),Dm(),!1}if(ct)return s=Dn.current,s!==null?((s.flags&65536)===0&&(s.flags|=256),s.flags|=65536,s.lanes=p,u!==Mf&&(t=Error(a(422),{cause:u}),ui(Tn(t,i)))):(u!==Mf&&(s=Error(a(423),{cause:u}),ui(Tn(s,i))),t=t.current.alternate,t.flags|=65536,p&=-p,t.lanes|=p,u=Tn(u,i),p=cm(t.stateNode,u,p),Bf(t,p),_t!==4&&(_t=2)),!1;var v=Error(a(520),{cause:u});if(v=Tn(v,i),Mi===null?Mi=[v]:Mi.push(v),_t!==4&&(_t=2),s===null)return!0;u=Tn(u,i),i=s;do{switch(i.tag){case 3:return i.flags|=65536,t=p&-p,i.lanes|=t,t=cm(i.stateNode,u,t),Bf(i,t),!1;case 1:if(s=i.type,v=i.stateNode,(i.flags&128)===0&&(typeof s.getDerivedStateFromError=="function"||v!==null&&typeof v.componentDidCatch=="function"&&(hr===null||!hr.has(v))))return i.flags|=65536,p&=-p,i.lanes|=p,p=O0(p),z0(p,t,i,u),Bf(i,p),!1}i=i.return}while(i!==null);return!1}var I0=Error(a(461)),Bt=!1;function Yt(t,s,i,u){s.child=t===null?E0(s,null,i,u):Wo(s,t.child,i,u)}function L0(t,s,i,u,p){i=i.render;var v=s.ref;if("ref"in u){var T={};for(var z in u)z!=="ref"&&(T[z]=u[z])}else T=u;return to(s),u=Yf(t,s,i,T,v,p),z=Gf(),t!==null&&!Bt?(Xf(t,s,p),Os(t,s,p)):(ct&&z&&Tf(s),s.flags|=1,Yt(t,s,u,p),s.child)}function $0(t,s,i,u,p){if(t===null){var v=i.type;return typeof v=="function"&&!Ef(v)&&v.defaultProps===void 0&&i.compare===null?(s.tag=15,s.type=v,P0(t,s,v,u,p)):(t=sc(i.type,null,u,s,s.mode,p),t.ref=s.ref,t.return=s,s.child=t)}if(v=t.child,!xm(t,p)){var T=v.memoizedProps;if(i=i.compare,i=i!==null?i:ai,i(T,u)&&t.ref===s.ref)return Os(t,s,p)}return s.flags|=1,t=Cs(v,u),t.ref=s.ref,t.return=s,s.child=t}function P0(t,s,i,u,p){if(t!==null){var v=t.memoizedProps;if(ai(v,u)&&t.ref===s.ref)if(Bt=!1,s.pendingProps=u=v,xm(t,p))(t.flags&131072)!==0&&(Bt=!0);else return s.lanes=t.lanes,Os(t,s,p)}return um(t,s,i,u,p)}function H0(t,s,i){var u=s.pendingProps,p=u.children,v=t!==null?t.memoizedState:null;if(u.mode==="hidden"){if((s.flags&128)!==0){if(u=v!==null?v.baseLanes|i:i,t!==null){for(p=s.child=t.child,v=0;p!==null;)v=v|p.lanes|p.childLanes,p=p.sibling;s.childLanes=v&~u}else s.childLanes=0,s.child=null;return B0(t,s,u,i)}if((i&536870912)!==0)s.memoizedState={baseLanes:0,cachePool:null},t!==null&&lc(s,v!==null?v.cachePool:null),v!==null?Px(s,v):Vf(),C0(s);else return s.lanes=s.childLanes=536870912,B0(t,s,v!==null?v.baseLanes|i:i,i)}else v!==null?(lc(s,v.cachePool),Px(s,v),lr(),s.memoizedState=null):(t!==null&&lc(s,null),Vf(),lr());return Yt(t,s,p,i),s.child}function B0(t,s,i,u){var p=Lf();return p=p===null?null:{parent:zt._currentValue,pool:p},s.memoizedState={baseLanes:i,cachePool:p},t!==null&&lc(s,null),Vf(),C0(s),t!==null&&di(t,s,u,!0),null}function _c(t,s){var i=s.ref;if(i===null)t!==null&&t.ref!==null&&(s.flags|=4194816);else{if(typeof i!="function"&&typeof i!="object")throw Error(a(284));(t===null||t.ref!==i)&&(s.flags|=4194816)}}function um(t,s,i,u,p){return to(s),i=Yf(t,s,i,u,void 0,p),u=Gf(),t!==null&&!Bt?(Xf(t,s,p),Os(t,s,p)):(ct&&u&&Tf(s),s.flags|=1,Yt(t,s,i,p),s.child)}function U0(t,s,i,u,p,v){return to(s),s.updateQueue=null,i=Bx(s,u,i,p),Hx(t),u=Gf(),t!==null&&!Bt?(Xf(t,s,v),Os(t,s,v)):(ct&&u&&Tf(s),s.flags|=1,Yt(t,s,i,v),s.child)}function V0(t,s,i,u,p){if(to(s),s.stateNode===null){var v=Ho,T=i.contextType;typeof T=="object"&&T!==null&&(v=Qt(T)),v=new i(u,v),s.memoizedState=v.state!==null&&v.state!==void 0?v.state:null,v.updater=lm,s.stateNode=v,v._reactInternals=s,v=s.stateNode,v.props=u,v.state=s.memoizedState,v.refs={},Pf(s),T=i.contextType,v.context=typeof T=="object"&&T!==null?Qt(T):Ho,v.state=s.memoizedState,T=i.getDerivedStateFromProps,typeof T=="function"&&(im(s,i,T,u),v.state=s.memoizedState),typeof i.getDerivedStateFromProps=="function"||typeof v.getSnapshotBeforeUpdate=="function"||typeof v.UNSAFE_componentWillMount!="function"&&typeof v.componentWillMount!="function"||(T=v.state,typeof v.componentWillMount=="function"&&v.componentWillMount(),typeof v.UNSAFE_componentWillMount=="function"&&v.UNSAFE_componentWillMount(),T!==v.state&&lm.enqueueReplaceState(v,v.state,null),yi(s,u,v,p),xi(),v.state=s.memoizedState),typeof v.componentDidMount=="function"&&(s.flags|=4194308),u=!0}else if(t===null){v=s.stateNode;var z=s.memoizedProps,F=ro(i,z);v.props=F;var se=v.context,ue=i.contextType;T=Ho,typeof ue=="object"&&ue!==null&&(T=Qt(ue));var he=i.getDerivedStateFromProps;ue=typeof he=="function"||typeof v.getSnapshotBeforeUpdate=="function",z=s.pendingProps!==z,ue||typeof v.UNSAFE_componentWillReceiveProps!="function"&&typeof v.componentWillReceiveProps!="function"||(z||se!==T)&&T0(s,v,u,T),sr=!1;var oe=s.memoizedState;v.state=oe,yi(s,u,v,p),xi(),se=s.memoizedState,z||oe!==se||sr?(typeof he=="function"&&(im(s,i,he,u),se=s.memoizedState),(F=sr||k0(s,i,F,u,oe,se,T))?(ue||typeof v.UNSAFE_componentWillMount!="function"&&typeof v.componentWillMount!="function"||(typeof v.componentWillMount=="function"&&v.componentWillMount(),typeof v.UNSAFE_componentWillMount=="function"&&v.UNSAFE_componentWillMount()),typeof v.componentDidMount=="function"&&(s.flags|=4194308)):(typeof v.componentDidMount=="function"&&(s.flags|=4194308),s.memoizedProps=u,s.memoizedState=se),v.props=u,v.state=se,v.context=T,u=F):(typeof v.componentDidMount=="function"&&(s.flags|=4194308),u=!1)}else{v=s.stateNode,Hf(t,s),T=s.memoizedProps,ue=ro(i,T),v.props=ue,he=s.pendingProps,oe=v.context,se=i.contextType,F=Ho,typeof se=="object"&&se!==null&&(F=Qt(se)),z=i.getDerivedStateFromProps,(se=typeof z=="function"||typeof v.getSnapshotBeforeUpdate=="function")||typeof v.UNSAFE_componentWillReceiveProps!="function"&&typeof v.componentWillReceiveProps!="function"||(T!==he||oe!==F)&&T0(s,v,u,F),sr=!1,oe=s.memoizedState,v.state=oe,yi(s,u,v,p),xi();var ae=s.memoizedState;T!==he||oe!==ae||sr||t!==null&&t.dependencies!==null&&ac(t.dependencies)?(typeof z=="function"&&(im(s,i,z,u),ae=s.memoizedState),(ue=sr||k0(s,i,ue,u,oe,ae,F)||t!==null&&t.dependencies!==null&&ac(t.dependencies))?(se||typeof v.UNSAFE_componentWillUpdate!="function"&&typeof v.componentWillUpdate!="function"||(typeof v.componentWillUpdate=="function"&&v.componentWillUpdate(u,ae,F),typeof v.UNSAFE_componentWillUpdate=="function"&&v.UNSAFE_componentWillUpdate(u,ae,F)),typeof v.componentDidUpdate=="function"&&(s.flags|=4),typeof v.getSnapshotBeforeUpdate=="function"&&(s.flags|=1024)):(typeof v.componentDidUpdate!="function"||T===t.memoizedProps&&oe===t.memoizedState||(s.flags|=4),typeof v.getSnapshotBeforeUpdate!="function"||T===t.memoizedProps&&oe===t.memoizedState||(s.flags|=1024),s.memoizedProps=u,s.memoizedState=ae),v.props=u,v.state=ae,v.context=F,u=ue):(typeof v.componentDidUpdate!="function"||T===t.memoizedProps&&oe===t.memoizedState||(s.flags|=4),typeof v.getSnapshotBeforeUpdate!="function"||T===t.memoizedProps&&oe===t.memoizedState||(s.flags|=1024),u=!1)}return v=u,_c(t,s),u=(s.flags&128)!==0,v||u?(v=s.stateNode,i=u&&typeof i.getDerivedStateFromError!="function"?null:v.render(),s.flags|=1,t!==null&&u?(s.child=Wo(s,t.child,null,p),s.child=Wo(s,null,i,p)):Yt(t,s,i,p),s.memoizedState=v.state,t=s.child):t=Os(t,s,p),t}function q0(t,s,i,u){return ci(),s.flags|=256,Yt(t,s,i,u),s.child}var dm={dehydrated:null,treeContext:null,retryLane:0,hydrationErrors:null};function fm(t){return{baseLanes:t,cachePool:Mx()}}function mm(t,s,i){return t=t!==null?t.childLanes&~i:0,s&&(t|=On),t}function F0(t,s,i){var u=s.pendingProps,p=!1,v=(s.flags&128)!==0,T;if((T=v)||(T=t!==null&&t.memoizedState===null?!1:(It.current&2)!==0),T&&(p=!0,s.flags&=-129),T=(s.flags&32)!==0,s.flags&=-33,t===null){if(ct){if(p?ir(s):lr(),ct){var z=St,F;if(F=z){e:{for(F=z,z=ns;F.nodeType!==8;){if(!z){z=null;break e}if(F=qn(F.nextSibling),F===null){z=null;break e}}z=F}z!==null?(s.memoizedState={dehydrated:z,treeContext:Wr!==null?{id:ks,overflow:Ts}:null,retryLane:536870912,hydrationErrors:null},F=pn(18,null,null,0),F.stateNode=z,F.return=s,s.child=F,tn=s,St=null,F=!0):F=!1}F||Jr(s)}if(z=s.memoizedState,z!==null&&(z=z.dehydrated,z!==null))return Wm(z)?s.lanes=32:s.lanes=536870912,null;Ds(s)}return z=u.children,u=u.fallback,p?(lr(),p=s.mode,z=Ec({mode:"hidden",children:z},p),u=Zr(u,p,i,null),z.return=s,u.return=s,z.sibling=u,s.child=z,p=s.child,p.memoizedState=fm(i),p.childLanes=mm(t,T,i),s.memoizedState=dm,u):(ir(s),hm(s,z))}if(F=t.memoizedState,F!==null&&(z=F.dehydrated,z!==null)){if(v)s.flags&256?(ir(s),s.flags&=-257,s=pm(t,s,i)):s.memoizedState!==null?(lr(),s.child=t.child,s.flags|=128,s=null):(lr(),p=u.fallback,z=s.mode,u=Ec({mode:"visible",children:u.children},z),p=Zr(p,z,i,null),p.flags|=2,u.return=s,p.return=s,u.sibling=p,s.child=u,Wo(s,t.child,null,i),u=s.child,u.memoizedState=fm(i),u.childLanes=mm(t,T,i),s.memoizedState=dm,s=p);else if(ir(s),Wm(z)){if(T=z.nextSibling&&z.nextSibling.dataset,T)var se=T.dgst;T=se,u=Error(a(419)),u.stack="",u.digest=T,ui({value:u,source:null,stack:null}),s=pm(t,s,i)}else if(Bt||di(t,s,i,!1),T=(i&t.childLanes)!==0,Bt||T){if(T=vt,T!==null&&(u=i&-i,u=(u&42)!==0?1:mn(u),u=(u&(T.suspendedLanes|i))!==0?0:u,u!==0&&u!==F.retryLane))throw F.retryLane=u,Po(t,u),bn(T,t,u),I0;z.data==="$?"||Dm(),s=pm(t,s,i)}else z.data==="$?"?(s.flags|=192,s.child=t.child,s=null):(t=F.treeContext,St=qn(z.nextSibling),tn=s,ct=!0,Qr=null,ns=!1,t!==null&&(Mn[Rn++]=ks,Mn[Rn++]=Ts,Mn[Rn++]=Wr,ks=t.id,Ts=t.overflow,Wr=s),s=hm(s,u.children),s.flags|=4096);return s}return p?(lr(),p=u.fallback,z=s.mode,F=t.child,se=F.sibling,u=Cs(F,{mode:"hidden",children:u.children}),u.subtreeFlags=F.subtreeFlags&65011712,se!==null?p=Cs(se,p):(p=Zr(p,z,i,null),p.flags|=2),p.return=s,u.return=s,u.sibling=p,s.child=u,u=p,p=s.child,z=t.child.memoizedState,z===null?z=fm(i):(F=z.cachePool,F!==null?(se=zt._currentValue,F=F.parent!==se?{parent:se,pool:se}:F):F=Mx(),z={baseLanes:z.baseLanes|i,cachePool:F}),p.memoizedState=z,p.childLanes=mm(t,T,i),s.memoizedState=dm,u):(ir(s),i=t.child,t=i.sibling,i=Cs(i,{mode:"visible",children:u.children}),i.return=s,i.sibling=null,t!==null&&(T=s.deletions,T===null?(s.deletions=[t],s.flags|=16):T.push(t)),s.child=i,s.memoizedState=null,i)}function hm(t,s){return s=Ec({mode:"visible",children:s},t.mode),s.return=t,t.child=s}function Ec(t,s){return t=pn(22,t,null,s),t.lanes=0,t.stateNode={_visibility:1,_pendingMarkers:null,_retryCache:null,_transitions:null},t}function pm(t,s,i){return Wo(s,t.child,null,i),t=hm(s,s.pendingProps.children),t.flags|=2,s.memoizedState=null,t}function Y0(t,s,i){t.lanes|=s;var u=t.alternate;u!==null&&(u.lanes|=s),Df(t.return,s,i)}function gm(t,s,i,u,p){var v=t.memoizedState;v===null?t.memoizedState={isBackwards:s,rendering:null,renderingStartTime:0,last:u,tail:i,tailMode:p}:(v.isBackwards=s,v.rendering=null,v.renderingStartTime=0,v.last=u,v.tail=i,v.tailMode=p)}function G0(t,s,i){var u=s.pendingProps,p=u.revealOrder,v=u.tail;if(Yt(t,s,u.children,i),u=It.current,(u&2)!==0)u=u&1|2,s.flags|=128;else{if(t!==null&&(t.flags&128)!==0)e:for(t=s.child;t!==null;){if(t.tag===13)t.memoizedState!==null&&Y0(t,i,s);else if(t.tag===19)Y0(t,i,s);else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===s)break e;for(;t.sibling===null;){if(t.return===null||t.return===s)break e;t=t.return}t.sibling.return=t.return,t=t.sibling}u&=1}switch(V(It,u),p){case"forwards":for(i=s.child,p=null;i!==null;)t=i.alternate,t!==null&&Nc(t)===null&&(p=i),i=i.sibling;i=p,i===null?(p=s.child,s.child=null):(p=i.sibling,i.sibling=null),gm(s,!1,p,i,v);break;case"backwards":for(i=null,p=s.child,s.child=null;p!==null;){if(t=p.alternate,t!==null&&Nc(t)===null){s.child=p;break}t=p.sibling,p.sibling=i,i=p,p=t}gm(s,!0,i,null,v);break;case"together":gm(s,!1,null,null,void 0);break;default:s.memoizedState=null}return s.child}function Os(t,s,i){if(t!==null&&(s.dependencies=t.dependencies),mr|=s.lanes,(i&s.childLanes)===0)if(t!==null){if(di(t,s,i,!1),(i&s.childLanes)===0)return null}else return null;if(t!==null&&s.child!==t.child)throw Error(a(153));if(s.child!==null){for(t=s.child,i=Cs(t,t.pendingProps),s.child=i,i.return=s;t.sibling!==null;)t=t.sibling,i=i.sibling=Cs(t,t.pendingProps),i.return=s;i.sibling=null}return s.child}function xm(t,s){return(t.lanes&s)!==0?!0:(t=t.dependencies,!!(t!==null&&ac(t)))}function W_(t,s,i){switch(s.tag){case 3:ie(s,s.stateNode.containerInfo),nr(s,zt,t.memoizedState.cache),ci();break;case 27:case 5:Ee(s);break;case 4:ie(s,s.stateNode.containerInfo);break;case 10:nr(s,s.type,s.memoizedProps.value);break;case 13:var u=s.memoizedState;if(u!==null)return u.dehydrated!==null?(ir(s),s.flags|=128,null):(i&s.child.childLanes)!==0?F0(t,s,i):(ir(s),t=Os(t,s,i),t!==null?t.sibling:null);ir(s);break;case 19:var p=(t.flags&128)!==0;if(u=(i&s.childLanes)!==0,u||(di(t,s,i,!1),u=(i&s.childLanes)!==0),p){if(u)return G0(t,s,i);s.flags|=128}if(p=s.memoizedState,p!==null&&(p.rendering=null,p.tail=null,p.lastEffect=null),V(It,It.current),u)break;return null;case 22:case 23:return s.lanes=0,H0(t,s,i);case 24:nr(s,zt,t.memoizedState.cache)}return Os(t,s,i)}function X0(t,s,i){if(t!==null)if(t.memoizedProps!==s.pendingProps)Bt=!0;else{if(!xm(t,i)&&(s.flags&128)===0)return Bt=!1,W_(t,s,i);Bt=(t.flags&131072)!==0}else Bt=!1,ct&&(s.flags&1048576)!==0&&Sx(s,oc,s.index);switch(s.lanes=0,s.tag){case 16:e:{t=s.pendingProps;var u=s.elementType,p=u._init;if(u=p(u._payload),s.type=u,typeof u=="function")Ef(u)?(t=ro(u,t),s.tag=1,s=V0(null,s,u,t,i)):(s.tag=0,s=um(null,s,u,t,i));else{if(u!=null){if(p=u.$$typeof,p===M){s.tag=11,s=L0(null,s,u,t,i);break e}else if(p===O){s.tag=14,s=$0(null,s,u,t,i);break e}}throw s=U(u)||u,Error(a(306,s,""))}}return s;case 0:return um(t,s,s.type,s.pendingProps,i);case 1:return u=s.type,p=ro(u,s.pendingProps),V0(t,s,u,p,i);case 3:e:{if(ie(s,s.stateNode.containerInfo),t===null)throw Error(a(387));u=s.pendingProps;var v=s.memoizedState;p=v.element,Hf(t,s),yi(s,u,null,i);var T=s.memoizedState;if(u=T.cache,nr(s,zt,u),u!==v.cache&&Of(s,[zt],i,!0),xi(),u=T.element,v.isDehydrated)if(v={element:u,isDehydrated:!1,cache:T.cache},s.updateQueue.baseState=v,s.memoizedState=v,s.flags&256){s=q0(t,s,u,i);break e}else if(u!==p){p=Tn(Error(a(424)),s),ui(p),s=q0(t,s,u,i);break e}else{switch(t=s.stateNode.containerInfo,t.nodeType){case 9:t=t.body;break;default:t=t.nodeName==="HTML"?t.ownerDocument.body:t}for(St=qn(t.firstChild),tn=s,ct=!0,Qr=null,ns=!0,i=E0(s,null,u,i),s.child=i;i;)i.flags=i.flags&-3|4096,i=i.sibling}else{if(ci(),u===p){s=Os(t,s,i);break e}Yt(t,s,u,i)}s=s.child}return s;case 26:return _c(t,s),t===null?(i=Qy(s.type,null,s.pendingProps,null))?s.memoizedState=i:ct||(i=s.type,t=s.pendingProps,u=Hc(fe.current).createElement(i),u[Ht]=s,u[Kt]=t,Xt(u,i,t),At(u),s.stateNode=u):s.memoizedState=Qy(s.type,t.memoizedProps,s.pendingProps,t.memoizedState),null;case 27:return Ee(s),t===null&&ct&&(u=s.stateNode=Zy(s.type,s.pendingProps,fe.current),tn=s,ns=!0,p=St,xr(s.type)?(Km=p,St=qn(u.firstChild)):St=p),Yt(t,s,s.pendingProps.children,i),_c(t,s),t===null&&(s.flags|=4194304),s.child;case 5:return t===null&&ct&&((p=u=St)&&(u=SE(u,s.type,s.pendingProps,ns),u!==null?(s.stateNode=u,tn=s,St=qn(u.firstChild),ns=!1,p=!0):p=!1),p||Jr(s)),Ee(s),p=s.type,v=s.pendingProps,T=t!==null?t.memoizedProps:null,u=v.children,Gm(p,v)?u=null:T!==null&&Gm(p,T)&&(s.flags|=32),s.memoizedState!==null&&(p=Yf(t,s,U_,null,null,i),Hi._currentValue=p),_c(t,s),Yt(t,s,u,i),s.child;case 6:return t===null&&ct&&((t=i=St)&&(i=_E(i,s.pendingProps,ns),i!==null?(s.stateNode=i,tn=s,St=null,t=!0):t=!1),t||Jr(s)),null;case 13:return F0(t,s,i);case 4:return ie(s,s.stateNode.containerInfo),u=s.pendingProps,t===null?s.child=Wo(s,null,u,i):Yt(t,s,u,i),s.child;case 11:return L0(t,s,s.type,s.pendingProps,i);case 7:return Yt(t,s,s.pendingProps,i),s.child;case 8:return Yt(t,s,s.pendingProps.children,i),s.child;case 12:return Yt(t,s,s.pendingProps.children,i),s.child;case 10:return u=s.pendingProps,nr(s,s.type,u.value),Yt(t,s,u.children,i),s.child;case 9:return p=s.type._context,u=s.pendingProps.children,to(s),p=Qt(p),u=u(p),s.flags|=1,Yt(t,s,u,i),s.child;case 14:return $0(t,s,s.type,s.pendingProps,i);case 15:return P0(t,s,s.type,s.pendingProps,i);case 19:return G0(t,s,i);case 31:return u=s.pendingProps,i=s.mode,u={mode:u.mode,children:u.children},t===null?(i=Ec(u,i),i.ref=s.ref,s.child=i,i.return=s,s=i):(i=Cs(t.child,u),i.ref=s.ref,s.child=i,i.return=s,s=i),s;case 22:return H0(t,s,i);case 24:return to(s),u=Qt(zt),t===null?(p=Lf(),p===null&&(p=vt,v=zf(),p.pooledCache=v,v.refCount++,v!==null&&(p.pooledCacheLanes|=i),p=v),s.memoizedState={parent:u,cache:p},Pf(s),nr(s,zt,p)):((t.lanes&i)!==0&&(Hf(t,s),yi(s,null,null,i),xi()),p=t.memoizedState,v=s.memoizedState,p.parent!==u?(p={parent:u,cache:u},s.memoizedState=p,s.lanes===0&&(s.memoizedState=s.updateQueue.baseState=p),nr(s,zt,u)):(u=v.cache,nr(s,zt,u),u!==p.cache&&Of(s,[zt],i,!0))),Yt(t,s,s.pendingProps.children,i),s.child;case 29:throw s.pendingProps}throw Error(a(156,s.tag))}function zs(t){t.flags|=4}function Z0(t,s){if(s.type!=="stylesheet"||(s.state.loading&4)!==0)t.flags&=-16777217;else if(t.flags|=16777216,!sv(s)){if(s=Dn.current,s!==null&&((it&4194048)===it?ss!==null:(it&62914560)!==it&&(it&536870912)===0||s!==ss))throw pi=$f,Rx;t.flags|=8192}}function Cc(t,s){s!==null&&(t.flags|=4),t.flags&16384&&(s=t.tag!==22?Pe():536870912,t.lanes|=s,ea|=s)}function _i(t,s){if(!ct)switch(t.tailMode){case"hidden":s=t.tail;for(var i=null;s!==null;)s.alternate!==null&&(i=s),s=s.sibling;i===null?t.tail=null:i.sibling=null;break;case"collapsed":i=t.tail;for(var u=null;i!==null;)i.alternate!==null&&(u=i),i=i.sibling;u===null?s||t.tail===null?t.tail=null:t.tail.sibling=null:u.sibling=null}}function jt(t){var s=t.alternate!==null&&t.alternate.child===t.child,i=0,u=0;if(s)for(var p=t.child;p!==null;)i|=p.lanes|p.childLanes,u|=p.subtreeFlags&65011712,u|=p.flags&65011712,p.return=t,p=p.sibling;else for(p=t.child;p!==null;)i|=p.lanes|p.childLanes,u|=p.subtreeFlags,u|=p.flags,p.return=t,p=p.sibling;return t.subtreeFlags|=u,t.childLanes=i,s}function K_(t,s,i){var u=s.pendingProps;switch(Af(s),s.tag){case 31:case 16:case 15:case 0:case 11:case 7:case 8:case 12:case 9:case 14:return jt(s),null;case 1:return jt(s),null;case 3:return i=s.stateNode,u=null,t!==null&&(u=t.memoizedState.cache),s.memoizedState.cache!==u&&(s.flags|=2048),Ms(zt),ge(),i.pendingContext&&(i.context=i.pendingContext,i.pendingContext=null),(t===null||t.child===null)&&(li(s)?zs(s):t===null||t.memoizedState.isDehydrated&&(s.flags&256)===0||(s.flags|=1024,Cx())),jt(s),null;case 26:return i=s.memoizedState,t===null?(zs(s),i!==null?(jt(s),Z0(s,i)):(jt(s),s.flags&=-16777217)):i?i!==t.memoizedState?(zs(s),jt(s),Z0(s,i)):(jt(s),s.flags&=-16777217):(t.memoizedProps!==u&&zs(s),jt(s),s.flags&=-16777217),null;case 27:Ne(s),i=fe.current;var p=s.type;if(t!==null&&s.stateNode!=null)t.memoizedProps!==u&&zs(s);else{if(!u){if(s.stateNode===null)throw Error(a(166));return jt(s),null}t=J.current,li(s)?_x(s):(t=Zy(p,u,i),s.stateNode=t,zs(s))}return jt(s),null;case 5:if(Ne(s),i=s.type,t!==null&&s.stateNode!=null)t.memoizedProps!==u&&zs(s);else{if(!u){if(s.stateNode===null)throw Error(a(166));return jt(s),null}if(t=J.current,li(s))_x(s);else{switch(p=Hc(fe.current),t){case 1:t=p.createElementNS("http://www.w3.org/2000/svg",i);break;case 2:t=p.createElementNS("http://www.w3.org/1998/Math/MathML",i);break;default:switch(i){case"svg":t=p.createElementNS("http://www.w3.org/2000/svg",i);break;case"math":t=p.createElementNS("http://www.w3.org/1998/Math/MathML",i);break;case"script":t=p.createElement("div"),t.innerHTML="