diff --git a/sdk/ai/azure-ai-agents/CHANGELOG.md b/sdk/ai/azure-ai-agents/CHANGELOG.md index 0eabaf89afe0..7604d0862c38 100644 --- a/sdk/ai/azure-ai-agents/CHANGELOG.md +++ b/sdk/ai/azure-ai-agents/CHANGELOG.md @@ -8,11 +8,14 @@ ### Features Added - Add `RunStepDetailsActivity`, describing MCP function parameters. +- Add `RunStepDeltaCustomBingGroundingToolCall`, describing `BingCustomSearchTool` updates in streaming scenario. ### Bugs Fixed ### Sample updates +- Bing Grounding and Bing Custom Search samples were fixed to correctly present references. + ## 1.2.0b2 (2025-08-12) ### Features Added diff --git a/sdk/ai/azure-ai-agents/README.md b/sdk/ai/azure-ai-agents/README.md index 4c724661c0b8..7302a8570047 100644 --- a/sdk/ai/azure-ai-agents/README.md +++ b/sdk/ai/azure-ai-agents/README.md @@ -369,7 +369,7 @@ Here is an example: ```python -conn_id = os.environ["AZURE_BING_CONNECTION_ID"] +conn_id = project_client.connections.get(os.environ["BING_CONNECTION_NAME"]).id # Initialize agent bing tool and add the connection id bing = BingGroundingTool(connection_id=conn_id) @@ -474,9 +474,7 @@ The tool approval flow looks like this: # Create and process agent run in thread with MCP tools mcp_tool.update_headers("SuperSecret", "123456") # mcp_tool.set_approval_mode("never") # Uncomment to disable approval requirement -run = agents_client.runs.create( - thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources -) +run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources) print(f"Created run, ID: {run.id}") while run.status in ["queued", "in_progress", "requires_action"]: diff --git a/sdk/ai/azure-ai-agents/apiview-properties.json b/sdk/ai/azure-ai-agents/apiview-properties.json index ee40bdfb9621..797a90ba7833 100644 --- a/sdk/ai/azure-ai-agents/apiview-properties.json +++ b/sdk/ai/azure-ai-agents/apiview-properties.json @@ -136,6 +136,7 @@ "azure.ai.agents.models.RunStepDeltaCodeInterpreterLogOutput": "Azure.AI.Agents.RunStepDeltaCodeInterpreterLogOutput", "azure.ai.agents.models.RunStepDeltaCodeInterpreterToolCall": "Azure.AI.Agents.RunStepDeltaCodeInterpreterToolCall", "azure.ai.agents.models.RunStepDeltaConnectedAgentToolCall": "Azure.AI.Agents.RunStepDeltaConnectedAgentToolCall", + "azure.ai.agents.models.RunStepDeltaCustomBingGroundingToolCall": "Azure.AI.Agents.RunStepDeltaCustomBingGroundingToolCall", "azure.ai.agents.models.RunStepDeltaDeepResearchToolCall": "Azure.AI.Agents.RunStepDeltaDeepResearchToolCall", "azure.ai.agents.models.RunStepDeltaDetail": "Azure.AI.Agents.RunStepDeltaDetail", "azure.ai.agents.models.RunStepDeltaFileSearchToolCall": "Azure.AI.Agents.RunStepDeltaFileSearchToolCall", diff --git a/sdk/ai/azure-ai-agents/assets.json b/sdk/ai/azure-ai-agents/assets.json index 7e8976743aa2..8784d77ce5af 100644 --- a/sdk/ai/azure-ai-agents/assets.json +++ b/sdk/ai/azure-ai-agents/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "python", "TagPrefix": "python/ai/azure-ai-agents", - "Tag": "python/ai/azure-ai-agents_ad0998d8d9" + "Tag": "python/ai/azure-ai-agents_6e57db9f8e" } diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py index a05e78ae67c5..0c88b03c2d2f 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/__init__.py @@ -145,6 +145,7 @@ RunStepDeltaCodeInterpreterOutput, RunStepDeltaCodeInterpreterToolCall, RunStepDeltaConnectedAgentToolCall, + RunStepDeltaCustomBingGroundingToolCall, RunStepDeltaDeepResearchToolCall, RunStepDeltaDetail, RunStepDeltaFileSearchToolCall, @@ -382,6 +383,7 @@ "RunStepDeltaCodeInterpreterOutput", "RunStepDeltaCodeInterpreterToolCall", "RunStepDeltaConnectedAgentToolCall", + "RunStepDeltaCustomBingGroundingToolCall", "RunStepDeltaDeepResearchToolCall", "RunStepDeltaDetail", "RunStepDeltaFileSearchToolCall", diff --git a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py index 7aa7ca6468cf..b5e5339d8c7c 100644 --- a/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py +++ b/sdk/ai/azure-ai-agents/azure/ai/agents/models/_models.py @@ -4425,7 +4425,8 @@ class RunStepBingCustomSearchToolCall(RunStepToolCall, discriminator="bing_custo :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is "bing_custom_search". :vartype type: str - :ivar bing_custom_search: Reserved for future use. Required. + :ivar bing_custom_search: The dictionary with request and response from Custom Bing Grounding + search tool. Required. :vartype bing_custom_search: dict[str, str] """ @@ -4433,7 +4434,7 @@ class RunStepBingCustomSearchToolCall(RunStepToolCall, discriminator="bing_custo """The object type, which is always 'bing_custom_search'. Required. Default value is \"bing_custom_search\".""" bing_custom_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Reserved for future use. Required.""" + """The dictionary with request and response from Custom Bing Grounding search tool. Required.""" @overload def __init__( @@ -4465,7 +4466,8 @@ class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_groundin :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is "bing_grounding". :vartype type: str - :ivar bing_grounding: Reserved for future use. Required. + :ivar bing_grounding: The dictionary with request and response from Bing Grounding search tool. + Required. :vartype bing_grounding: dict[str, str] """ @@ -4473,7 +4475,7 @@ class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_groundin """The object type, which is always 'bing_grounding'. Required. Default value is \"bing_grounding\".""" bing_grounding: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Reserved for future use. Required.""" + """The dictionary with request and response from Bing Grounding search tool. Required.""" @overload def __init__( @@ -4985,10 +4987,11 @@ class RunStepDeltaToolCall(_Model): call details. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepDeltaAzureAISearchToolCall, RunStepDeltaBingGroundingToolCall, - RunStepDeltaCodeInterpreterToolCall, RunStepDeltaConnectedAgentToolCall, - RunStepDeltaDeepResearchToolCall, RunStepDeltaFileSearchToolCall, RunStepDeltaFunctionToolCall, - RunStepDeltaMcpToolCall, RunStepDeltaOpenAPIToolCall + RunStepDeltaAzureAISearchToolCall, RunStepDeltaCustomBingGroundingToolCall, + RunStepDeltaBingGroundingToolCall, RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaConnectedAgentToolCall, RunStepDeltaDeepResearchToolCall, + RunStepDeltaFileSearchToolCall, RunStepDeltaFunctionToolCall, RunStepDeltaMcpToolCall, + RunStepDeltaOpenAPIToolCall :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. :vartype index: int @@ -5078,7 +5081,8 @@ class RunStepDeltaBingGroundingToolCall(RunStepDeltaToolCall, discriminator="bin :ivar type: The object type, which is always "bing_grounding". Required. Default value is "bing_grounding". :vartype type: str - :ivar bing_grounding: Reserved for future use. Required. + :ivar bing_grounding: The dictionary with request and response from Bing Grounding search tool. + Required. :vartype bing_grounding: dict[str, str] """ @@ -5086,7 +5090,7 @@ class RunStepDeltaBingGroundingToolCall(RunStepDeltaToolCall, discriminator="bin """The object type, which is always \"bing_grounding\". Required. Default value is \"bing_grounding\".""" bing_grounding: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) - """Reserved for future use. Required.""" + """The dictionary with request and response from Bing Grounding search tool. Required.""" @overload def __init__( @@ -5418,6 +5422,47 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type="connected_agent", **kwargs) +class RunStepDeltaCustomBingGroundingToolCall(RunStepDeltaToolCall, discriminator="bing_custom_search"): + """Represents the Custom Bing Grounding tool call in a streaming run step. + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_custom_search'. Required. Default value is + "bing_custom_search". + :vartype type: str + :ivar bing_custom_search: The dictionary with request and response from Custom Bing Grounding + search tool. Required. + :vartype bing_custom_search: dict[str, str] + """ + + type: Literal["bing_custom_search"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The object type, which is always 'bing_custom_search'. Required. Default value is + \"bing_custom_search\".""" + bing_custom_search: Dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """The dictionary with request and response from Custom Bing Grounding search tool. Required.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + bing_custom_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_custom_search", **kwargs) + + class RunStepDeltaDeepResearchToolCall(RunStepDeltaToolCall, discriminator="deep_research"): """Represents the Deep research in a streaming run step. diff --git a/sdk/ai/azure-ai-agents/azure_ai_agents_tests.env b/sdk/ai/azure-ai-agents/azure_ai_agents_tests.env index 8ade71bdd73d..882df17ae56e 100644 --- a/sdk/ai/azure-ai-agents/azure_ai_agents_tests.env +++ b/sdk/ai/azure-ai-agents/azure_ai_agents_tests.env @@ -7,7 +7,7 @@ ######################################################################################################################## # Agents tests -# +# AZURE_AI_AGENTS_TESTS_PROJECT_CONNECTION_STRING= AZURE_AI_AGENTS_TESTS_PROJECT_ENDPOINT= AZURE_AI_AGENTS_TESTS_DATA_PATH= @@ -17,4 +17,6 @@ AZURE_AI_AGENTS_TESTS_SEARCH_CONNECTION_ID= AZURE_AI_AGENTS_TESTS_IS_TEST_RUN=True AZURE_AI_AGENTS_TESTS_BING_CONNECTION_ID= AZURE_AI_AGENTS_TESTS_PLAYWRIGHT_CONNECTION_ID= -AZURE_AI_AGENTS_TESTS_DEEP_RESEARCH_MODEL= \ No newline at end of file +AZURE_AI_AGENTS_TESTS_DEEP_RESEARCH_MODEL= +AZURE_AI_AGENTS_TESTS_BING_CUSTOM_CONNECTION_ID= +AZURE_AI_AGENTS_TESTS_BING_CONFIGURATION_NAME= \ No newline at end of file diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py index 727a23483cc9..bc1ff7cb54e0 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py +++ b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_eventhandler_with_bing_grounding.py @@ -21,11 +21,12 @@ page of your Azure AI Foundry portal. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BING_CONNECTION_ID - The connection id of the Bing connection, as found in the "Connected resources" tab - in your Azure AI Foundry project. + 3) BING_CONNECTION_NAME - The name of a connection to the Bing search resource as it is + listed in Azure AI Foundry connected resources. """ import os +import re from typing import Any from azure.identity import DefaultAzureCredential from azure.ai.projects import AIProjectClient @@ -45,7 +46,9 @@ class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - print(f"Text delta received: {delta.text}") + # Do not print reference text as we will show actual citation instead. + if re.match(r"\u3010(.+)\u3011", delta.text) is None: + print(f"Text delta received: {delta.text}") if delta.delta.content and isinstance(delta.delta.content[0], MessageDeltaTextContent): delta_text_content = delta.delta.content[0] if delta_text_content.text and delta_text_content.text.annotations: @@ -85,7 +88,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: with project_client: agents_client = project_client.agents - bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] + bing_connection_id = project_client.connections.get(os.environ["BING_CONNECTION_NAME"]).id print(f"Bing Connection ID: {bing_connection_id}") # Initialize agent bing tool and add the connection id @@ -117,7 +120,12 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) if response_message: + responses = [] for text_message in response_message.text_messages: - print(f"Agent response: {text_message.text.value}") + responses.append(text_message.text.value) + message = " ".join(responses) for annotation in response_message.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") + message = message.replace( + annotation.text, f" [{annotation.url_citation.title}]({annotation.url_citation.url})" + ) + print(f"Agent response: {message}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py index bf72fe403740..43e1bafead5e 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py +++ b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_bing_grounding.py @@ -21,11 +21,12 @@ page of your Azure AI Foundry portal. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, as found in the "Connected resources" tab - in your Azure AI Foundry project. + 3) BING_CONNECTION_NAME - The name of a connection to the Bing search resource as it is + listed in Azure AI Foundry connected resources. """ import os +import re from azure.ai.projects import AIProjectClient from azure.ai.agents.models import AgentStreamEvent, RunStepDeltaChunk from azure.ai.agents.models import ( @@ -48,7 +49,7 @@ with project_client: agents_client = project_client.agents - bing_connection_id = os.environ["AZURE_BING_CONNECTION_ID"] + bing_connection_id = project_client.connections.get(os.environ["BING_CONNECTION_NAME"]).id bing = BingGroundingTool(connection_id=bing_connection_id) print(f"Bing Connection ID: {bing_connection_id}") @@ -69,12 +70,15 @@ print(f"Created message, message ID {message.id}") # Process Agent run and stream events back to the client. It may take a few minutes for the agent to complete the run. + reference_text = re.compile(r"\u3010(.+)\u3011") with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id) as stream: for event_type, event_data, _ in stream: if isinstance(event_data, MessageDeltaChunk): - print(f"Text delta received: {event_data.text}") + # Do not print reference text as we will show actual citation instead. + if reference_text.match(event_data.text) is None: + print(f"Text delta received: {event_data.text}") if event_data.delta.content and isinstance(event_data.delta.content[0], MessageDeltaTextContent): delta_text_content = event_data.delta.content[0] if delta_text_content.text and delta_text_content.text.annotations: @@ -113,7 +117,12 @@ response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) if response_message: + responses = [] for text_message in response_message.text_messages: - print(f"Agent response: {text_message.text.value}") + responses.append(text_message.text.value) + message = " ".join(responses) for annotation in response_message.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") + message = message.replace( + annotation.text, f" [{annotation.url_citation.title}]({annotation.url_citation.url})" + ) + print(f"Agent response: {message}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py index 02850212ef32..6ec8161d4dda 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py +++ b/sdk/ai/azure-ai-agents/samples/agents_streaming/sample_agents_stream_iteration_with_mcp.py @@ -93,9 +93,7 @@ # Process Agent run and stream events back to the client. It may take a few minutes for the agent to complete the run. mcp_tool.update_headers("SuperSecret", "123456") # mcp_tool.set_approval_mode("never") # Uncomment to disable approval requirement - with agents_client.runs.stream( - thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources - ) as stream: + with agents_client.runs.stream(thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources) as stream: for event_type, event_data, _ in stream: @@ -201,9 +199,7 @@ agents_client.delete_agent(agent.id) print("Deleted agent") - response_message = agents_client.messages.get_last_message_by_role( - thread_id=thread.id, role=MessageRole.AGENT - ) + response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) if response_message: for text_message in response_message.text_messages: print(f"Agent response: {text_message.text.value}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py index 38b32e93e575..a94c31ec2d8c 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py +++ b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_custom_search.py @@ -23,24 +23,26 @@ page of your Azure AI Foundry portal. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) BING_CUSTOM_CONNECTION_ID - The ID of the Bing Custom Search connection, in the format of: - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} + 3) BING_CUSTOM_CONNECTION_NAME - The name of a connection to the custom search Bing resource as it is + listed in Azure AI Foundry connected resources. + 4) BING_CONFIGURATION_NAME - the name of a search configuration in Grounding with Bing Custom Search + resource. """ import os from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.agents.models import BingCustomSearchTool +from azure.ai.agents.models import BingCustomSearchTool, ListSortOrder project_client = AIProjectClient( endpoint=os.environ["PROJECT_ENDPOINT"], credential=DefaultAzureCredential(), ) -conn_id = os.environ["BING_CUSTOM_CONNECTION_ID"] +conn_id = project_client.connections.get(os.environ["BING_CUSTOM_CONNECTION_NAME"]).id # Initialize Bing Custom Search tool with connection id and instance name -bing_custom_tool = BingCustomSearchTool(connection_id=conn_id, instance_name="") +bing_custom_tool = BingCustomSearchTool(connection_id=conn_id, instance_name=os.environ["BING_CONFIGURATION_NAME"]) # Create Agent with the Bing Custom Search tool and process Agent run with project_client: @@ -78,10 +80,15 @@ print("Deleted agent") # Fetch and log all messages - messages = agents_client.messages.list(thread_id=thread.id) + messages = agents_client.messages.list(thread_id=thread.id, order=ListSortOrder.ASCENDING) for msg in messages: if msg.text_messages: + responses = [] for text_message in msg.text_messages: - print(f"Agent response: {text_message.text.value}") + responses.append(text_message.text.value) + message = " ".join(responses) for annotation in msg.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") + message = message.replace( + annotation.text, f" [{annotation.url_citation.title}]({annotation.url_citation.url})" + ) + print(f"{msg.role}: {message}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py index f042a17da650..b56463973d06 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py +++ b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_bing_grounding.py @@ -21,8 +21,8 @@ page of your Azure AI Foundry portal. 2) MODEL_DEPLOYMENT_NAME - The deployment name of the AI model, as found under the "Name" column in the "Models + endpoints" tab in your Azure AI Foundry project. - 3) AZURE_BING_CONNECTION_ID - The ID of the Bing connection, in the format of: - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace-name}/connections/{connection-name} + 3) BING_CONNECTION_NAME - The name of a connection to the Bing resource as it is + listed in Azure AI Foundry connected resources. """ import os @@ -37,7 +37,7 @@ ) # [START create_agent_with_bing_grounding_tool] -conn_id = os.environ["AZURE_BING_CONNECTION_ID"] +conn_id = project_client.connections.get(os.environ["BING_CONNECTION_NAME"]).id # Initialize agent bing tool and add the connection id bing = BingGroundingTool(connection_id=conn_id) @@ -100,7 +100,12 @@ # Print the Agent's response message with optional citation response_message = agents_client.messages.get_last_message_by_role(thread_id=thread.id, role=MessageRole.AGENT) if response_message: + responses = [] for text_message in response_message.text_messages: - print(f"Agent response: {text_message.text.value}") + responses.append(text_message.text.value) + message = " ".join(responses) for annotation in response_message.url_citation_annotations: - print(f"URL Citation: [{annotation.url_citation.title}]({annotation.url_citation.url})") + message = message.replace( + annotation.text, f" [{annotation.url_citation.title}]({annotation.url_citation.url})" + ) + print(f"Agent response: {message}") diff --git a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py index bbaa61d58212..dbc40f03bd9d 100644 --- a/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py +++ b/sdk/ai/azure-ai-agents/samples/agents_tools/sample_agents_mcp.py @@ -93,9 +93,7 @@ # Create and process agent run in thread with MCP tools mcp_tool.update_headers("SuperSecret", "123456") # mcp_tool.set_approval_mode("never") # Uncomment to disable approval requirement - run = agents_client.runs.create( - thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources - ) + run = agents_client.runs.create(thread_id=thread.id, agent_id=agent.id, tool_resources=mcp_tool.resources) print(f"Created run, ID: {run.id}") while run.status in ["queued", "in_progress", "requires_action"]: diff --git a/sdk/ai/azure-ai-agents/tests/conftest.py b/sdk/ai/azure-ai-agents/tests/conftest.py index dfc7b135408a..56383765fad9 100644 --- a/sdk/ai/azure-ai-agents/tests/conftest.py +++ b/sdk/ai/azure-ai-agents/tests/conftest.py @@ -153,6 +153,12 @@ def azure_workspace_triad_sanitizer(): value="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.CognitiveServices/accounts/00000/projects/00000/connections/00000", ) + # Sanitize the custom bing grounding. + add_body_key_sanitizer( + json_path="tools[*].bing_custom_search.search_configurations[*].connection_id", + value="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.CognitiveServices/accounts/00000/projects/00000/connections/00000", + ) + # Sanitize deep research tool bing connection ID add_body_key_sanitizer( json_path="tools[*].deep_research.bing_grounding_connections[*].connection_id", diff --git a/sdk/ai/azure-ai-agents/tests/test_agents_client.py b/sdk/ai/azure-ai-agents/tests/test_agents_client.py index d8b652dc5c36..ba1638d918ea 100644 --- a/sdk/ai/azure-ai-agents/tests/test_agents_client.py +++ b/sdk/ai/azure-ai-agents/tests/test_agents_client.py @@ -7,9 +7,9 @@ from typing import Any, Dict, Optional, Type import os +import re import json import jsonref -import logging import tempfile import time import pytest @@ -28,6 +28,7 @@ AzureAISearchTool, AzureFunctionStorageQueue, AzureFunctionTool, + BingCustomSearchTool, BingGroundingTool, BrowserAutomationTool, CodeInterpreterTool, @@ -53,12 +54,14 @@ ResponseFormatJsonSchemaType, RunAdditionalFieldList, RunStepAzureAISearchToolCall, + RunStepBingCustomSearchToolCall, RunStepBingGroundingToolCall, RunStepBrowserAutomationToolCall, RunStepConnectedAgentToolCall, RunStepDeepResearchToolCall, RunStepDeltaAzureAISearchToolCall, RunStepDeltaChunk, + RunStepDeltaCustomBingGroundingToolCall, RunStepDeltaBingGroundingToolCall, RunStepDeltaFileSearchToolCall, RunStepDeltaOpenAPIToolCall, @@ -2790,7 +2793,7 @@ def test_azure_ai_search_tool(self, **kwargs): instructions="You are a helpful agent that can search for information using Azure AI Search.", prompt="What is the temperature rating of the cozynights sleeping bag?", expected_class=RunStepAzureAISearchToolCall, - specific_message_text="60", + agent_message_regex="60", uri_annotation=MessageTextUrlCitationDetails( url="www.microsoft.com", title="product_info_7.md", @@ -3114,7 +3117,7 @@ def test_azure_function_call(self, **kwargs): prompt="What is the most prevalent element in the universe? What would foo say?", # TODO: Implement the run step for AzureFunction. expected_class=None, - specific_message_text="bar", + agent_message_regex="bar", ) @agentClientPreparer() @@ -3146,7 +3149,7 @@ def test_browser_automation_tool(self, **kwargs): # load a VM and open a browser. Use a large polling interval to avoid tons of REST API calls in test recordings. polling_interval=60, expected_class=RunStepBrowserAutomationToolCall, - specific_message_text="the year-to-date (ytd) stock price change for microsoft (msft) is", + agent_message_regex="the year-to-date [(]ytd[)] stock price change for microsoft [(]msft[)] is", ) @agentClientPreparer() @@ -3436,6 +3439,56 @@ def test_bing_grounding_tool_streaming(self, **kwargs): ), ) + @agentClientPreparer() + @recorded_by_proxy + def test_custom_bing_grounding_tool(self, **kwargs): + """Test Bing grounding tool call in non-streaming Scenario.""" + with self.create_client(by_endpoint=True, **kwargs) as client: + model_name = "gpt-4o" + bing_custom_tool = BingCustomSearchTool( + connection_id=kwargs.get("azure_ai_agents_tests_bing_custom_connection_id"), + instance_name=kwargs.get("azure_ai_agents_tests_bing_configuration_name"), + ) + + self._do_test_tool( + client=client, + model_name=model_name, + tool_to_test=bing_custom_tool, + instructions="You are helpful agent", + prompt="How many medals did the USA win in the 2024 summer olympics?", + expected_class=RunStepBingCustomSearchToolCall, + agent_message_regex="40.+gold.+44 silver.+42.+bronze", + uri_annotation=MessageTextUrlCitationDetails( + url="*", + title="*", + ), + ) + + @agentClientPreparer() + @recorded_by_proxy + def test_custom_bing_grounding_tool_streaming(self, **kwargs): + """Test Bing grounding tool call in streaming Scenario.""" + with self.create_client(by_endpoint=True, **kwargs) as client: + model_name = "gpt-4o" + bing_custom_tool = BingCustomSearchTool( + connection_id=kwargs.get("azure_ai_agents_tests_bing_custom_connection_id"), + instance_name=kwargs.get("azure_ai_agents_tests_bing_configuration_name"), + ) + + self._do_test_tool_streaming( + client=client, + model_name=model_name, + tool_to_test=bing_custom_tool, + instructions="You are helpful agent", + prompt="How many medals did the USA win in the 2024 summer olympics?", + expected_delta_class=RunStepDeltaCustomBingGroundingToolCall, + agent_message_regex="40.+gold.+44 silver.+42.+bronze", + uri_annotation=MessageTextUrlCitationDetails( + url="*", + title="*", + ), + ) + def _do_test_tool( self, client, @@ -3446,7 +3499,7 @@ def _do_test_tool( expected_class, headers=None, polling_interval=1, - specific_message_text=None, + agent_message_regex=None, minimal_text_length=1, uri_annotation=None, file_annotation=None, @@ -3467,7 +3520,7 @@ def _do_test_tool( :param headers: The headers used to call the agents. For example: {"x-ms-enable-preview": "true"} :param polling_interval: The polling interval (useful, when we need to wait longer times). - :param specific_message_text: The specific text to search in the messages. Must be all lower-case. + :param agent_message_regex: The regular expression to search in the messages. Must be all lower-case. :param minimal_text_length: The minimal length of a text. :param uri_annotation: The URI annotation, which have to present in response. :param file_annotation: The file annotation, which have to present in response. @@ -3515,8 +3568,8 @@ def _do_test_tool( # Search for the specific message when asked. text = "\n".join([t.text.value.lower() for t in text_messages]) - if specific_message_text: - assert specific_message_text in text, f"{specific_message_text} was not found in {text}." + if agent_message_regex: + assert re.findall(agent_message_regex, text), f"{agent_message_regex} was not found in {text}." # Search for the specific URL and title in the message annotation. if uri_annotation is not None: @@ -3563,6 +3616,7 @@ def _do_test_tool_streaming( headers: Dict[str, str] = None, uri_annotation: MessageTextUrlCitationDetails = None, file_annotation: MessageTextFileCitationDetails = None, + agent_message_regex=None, ): """ The helper method to test the non-interactive tools in the streaming scenarios. @@ -3576,6 +3630,7 @@ def _do_test_tool_streaming( For example: {"x-ms-enable-preview": "true"} :param uri_annotation: The URI annotation, which have to present in response. :param file_annotation: The file annotation, which have to present in response. + :param agent_message_regex: The regular expression to search in the messages. Must be all lower-case. """ if headers is None: headers = {} @@ -3605,6 +3660,8 @@ def _do_test_tool_streaming( # Annotation checks has_uri_annotation = uri_annotation is None has_file_annotation = file_annotation is None + # Agent message regex + has_agent_message_regex = agent_message_regex is None for event_type, event_data, _ in stream: if isinstance(event_data, MessageDeltaChunk): @@ -3623,6 +3680,9 @@ def _do_test_tool_streaming( has_file_annotation = has_file_annotation or self._has_file_annotation( event_data, file_annotation ) + for content in event_data.content: + if not has_agent_message_regex and isinstance(content, MessageTextContent): + has_agent_message_regex = re.findall(agent_message_regex, content.text.value) elif isinstance(event_data, RunStepDeltaChunk): if expected_delta_class is not None: @@ -3653,6 +3713,7 @@ def _do_test_tool_streaming( assert got_expected_delta, f"The delta tool call of type {expected_delta_class} was not found." assert is_completed, "The stream was not completed." assert is_run_step_created, "No run steps were created." + assert has_agent_message_regex, f"The text {agent_message_regex} was not found." assert ( has_uri_annotation diff --git a/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py b/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py index a7604629bbd2..a0ace0461e8c 100644 --- a/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py +++ b/sdk/ai/azure-ai-agents/tests/test_agents_client_async.py @@ -8,8 +8,8 @@ import json import jsonref -import logging import os +import re import pytest import io import time @@ -22,6 +22,7 @@ AzureFunctionStorageQueue, AgentStreamEvent, AgentThread, + BingCustomSearchTool, BingGroundingTool, BrowserAutomationTool, CodeInterpreterTool, @@ -46,6 +47,8 @@ ResponseFormatJsonSchemaType, RunAdditionalFieldList, RunStepDeltaAzureAISearchToolCall, + RunStepDeltaCustomBingGroundingToolCall, + RunStepBingCustomSearchToolCall, RunStepBingGroundingToolCall, RunStepBrowserAutomationToolCall, RunStepConnectedAgentToolCall, @@ -2708,7 +2711,7 @@ async def test_azure_function_call(self, **kwargs): prompt="What is the most prevalent element in the universe? What would foo say?", # TODO: Implement the run step for AzureFunction. expected_class=None, - specific_message_text="bar", + agent_message_regex="bar", ) @agentClientPreparer() @@ -2965,7 +2968,7 @@ async def test_azure_ai_search_tool(self, **kwargs): instructions="You are a helpful agent that can search for information using Azure AI Search.", prompt="What is the temperature rating of the cozynights sleeping bag?", expected_class=RunStepAzureAISearchToolCall, - specific_message_text="60", + agent_message_regex="60", uri_annotation=MessageTextUrlCitationDetails( url="www.microsoft.com", title="product_info_7.md", @@ -3021,7 +3024,7 @@ async def test_browser_automation_tool(self, **kwargs): # load a VM and open a browser. Use a large polling interval to avoid tons of REST API calls in test recordings. polling_interval=60, expected_class=RunStepBrowserAutomationToolCall, - specific_message_text="the year-to-date (ytd) stock price change for microsoft (msft) is", + agent_message_regex="the year-to-date [(]ytd[)] stock price change for microsoft [(]msft[)] is", ) @agentClientPreparer() @@ -3220,12 +3223,14 @@ async def test_bing_grounding_tool(self, **kwargs): """Test Bing grounding tool call in non-streaming Scenario.""" async with self.create_client(by_endpoint=True, **kwargs) as client: model_name = "gpt-4o" - openapi_tool = BingGroundingTool(connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id")) + bing_grounding_tool = BingGroundingTool( + connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id") + ) await self._do_test_tool( client=client, model_name=model_name, - tool_to_test=openapi_tool, + tool_to_test=bing_grounding_tool, instructions="You are helpful agent", prompt="How does wikipedia explain Euler's Identity?", expected_class=RunStepBingGroundingToolCall, @@ -3241,12 +3246,14 @@ async def test_bing_grounding_tool_streaming(self, **kwargs): """Test Bing grounding tool call in streaming Scenario.""" async with self.create_client(by_endpoint=True, **kwargs) as client: model_name = "gpt-4o" - openapi_tool = BingGroundingTool(connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id")) + bing_grounding_tool = BingGroundingTool( + connection_id=kwargs.get("azure_ai_agents_tests_bing_connection_id") + ) await self._do_test_tool_streaming( client=client, model_name=model_name, - tool_to_test=openapi_tool, + tool_to_test=bing_grounding_tool, instructions="You are helpful agent", prompt="How does wikipedia explain Euler's Identity?", expected_delta_class=RunStepDeltaBingGroundingToolCall, @@ -3256,6 +3263,56 @@ async def test_bing_grounding_tool_streaming(self, **kwargs): ), ) + @agentClientPreparer() + @recorded_by_proxy_async + async def test_custom_bing_grounding_tool(self, **kwargs): + """Test Bing grounding tool call in non-streaming Scenario.""" + async with self.create_client(by_endpoint=True, **kwargs) as client: + model_name = "gpt-4o" + bing_custom_tool = BingCustomSearchTool( + connection_id=kwargs.get("azure_ai_agents_tests_bing_custom_connection_id"), + instance_name=kwargs.get("azure_ai_agents_tests_bing_configuration_name"), + ) + + await self._do_test_tool( + client=client, + model_name=model_name, + tool_to_test=bing_custom_tool, + instructions="You are helpful agent", + prompt="How many medals did the USA win in the 2024 summer olympics?", + expected_class=RunStepBingCustomSearchToolCall, + agent_message_regex="40.+gold.+44 silver.+42.+bronze", + uri_annotation=MessageTextUrlCitationDetails( + url="*", + title="*", + ), + ) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_custom_bing_grounding_tool_streaming(self, **kwargs): + """Test Bing grounding tool call in streaming Scenario.""" + async with self.create_client(by_endpoint=True, **kwargs) as client: + model_name = "gpt-4o" + bing_custom_tool = BingCustomSearchTool( + connection_id=kwargs.get("azure_ai_agents_tests_bing_custom_connection_id"), + instance_name=kwargs.get("azure_ai_agents_tests_bing_configuration_name"), + ) + + await self._do_test_tool_streaming( + client=client, + model_name=model_name, + tool_to_test=bing_custom_tool, + instructions="You are helpful agent", + prompt="How many medals did the USA win in the 2024 summer olympics?", + expected_delta_class=RunStepDeltaCustomBingGroundingToolCall, + agent_message_regex="40.+gold.+44 silver.+42.+bronze", + uri_annotation=MessageTextUrlCitationDetails( + url="*", + title="*", + ), + ) + async def _do_test_tool( self, client, @@ -3266,7 +3323,7 @@ async def _do_test_tool( expected_class, headers=None, polling_interval=1, - specific_message_text=None, + agent_message_regex=None, minimal_text_length=1, uri_annotation=None, file_annotation=None, @@ -3287,7 +3344,7 @@ async def _do_test_tool( :param headers: The headers used to call the agents. For example: {"x-ms-enable-preview": "true"} :param polling_interval: The polling interval (useful, when we need to wait longer times). - :param specific_message_text: The specific text to search in the messages. Must be all lower-case. + :param agent_message_regex: The regular expression to search in the messages. Must be all lower-case. :param minimal_text_length: The minimal length of a text. :param uri_annotation: The URI annotation, which have to present in response. :param file_annotation: The file annotation, which have to present in response. @@ -3335,8 +3392,8 @@ async def _do_test_tool( # Search for the specific message when asked. text = "\n".join([t.text.value.lower() for t in text_messages]) - if specific_message_text: - assert specific_message_text in text, f"{specific_message_text} was not found in {text}." + if agent_message_regex: + assert re.findall(agent_message_regex, text), f"{agent_message_regex} was not found in {text}." # Search for the specific URL and title in the message annotation. if uri_annotation is not None: @@ -3383,6 +3440,7 @@ async def _do_test_tool_streaming( headers=None, uri_annotation=None, file_annotation=None, + agent_message_regex=None, ): """ The helper method to test the non-interactive tools in the streaming scenarios. @@ -3396,6 +3454,7 @@ async def _do_test_tool_streaming( For example: {"x-ms-enable-preview": "true"} :param uri_annotation: The URI annotation, which have to present in response. :param file_annotation: The file annotation, which have to present in response. + :param agent_message_regex: The regular expression to search in the messages. Must be all lower-case. """ if headers is None: headers = {} @@ -3424,6 +3483,8 @@ async def _do_test_tool_streaming( # Annotation checks has_uri_annotation = uri_annotation is None has_file_annotation = file_annotation is None + # Agent message regex + has_agent_message_regex = agent_message_regex is None async for event_type, event_data, _ in stream: if isinstance(event_data, MessageDeltaChunk): @@ -3442,6 +3503,9 @@ async def _do_test_tool_streaming( has_file_annotation = has_file_annotation or self._has_file_annotation( event_data, file_annotation ) + for content in event_data.content: + if not has_agent_message_regex and isinstance(content, MessageTextContent): + has_agent_message_regex = re.findall(agent_message_regex, content.text.value) elif isinstance(event_data, RunStepDeltaChunk): if expected_delta_class is not None: @@ -3472,6 +3536,7 @@ async def _do_test_tool_streaming( assert got_expected_delta, f"The delta tool call of type {expected_delta_class} was not found." assert is_completed, "The stream was not completed." assert is_run_step_created, "No run steps were created." + assert has_agent_message_regex, f"The text {agent_message_regex} was not found." assert ( has_uri_annotation diff --git a/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py b/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py index d77fb9a28271..a71f2d2c1fdb 100644 --- a/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py +++ b/sdk/ai/azure-ai-agents/tests/test_agents_client_base.py @@ -32,6 +32,8 @@ azure_ai_agents_tests_playwright_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.CognitiveServices/accounts/00000/projects/00000/connections/00000", azure_ai_agents_tests_deep_research_model="gpt-4o-deep-research", azure_ai_agents_tests_is_test_run="True", + azure_ai_agents_tests_bing_custom_connection_id="/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.CognitiveServices/accounts/00000/projects/00000/connections/00000", + azure_ai_agents_tests_bing_configuration_name="sample_configuration", ) # Set to True to enable SDK logging diff --git a/sdk/ai/azure-ai-agents/tsp-location.yaml b/sdk/ai/azure-ai-agents/tsp-location.yaml index f00316254952..b6f6e67d620e 100644 --- a/sdk/ai/azure-ai-agents/tsp-location.yaml +++ b/sdk/ai/azure-ai-agents/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Agents -commit: 5470a13587ba49e4fb0980073ca6ba468ef837c3 +commit: 2bd336d8503a20e419b1564ca871e11ed91bf55e repo: Azure/azure-rest-api-specs additionalDirectories: