diff --git a/.gitignore b/.gitignore index 899a8b9ecc..f3b78125fd 100644 --- a/.gitignore +++ b/.gitignore @@ -209,13 +209,14 @@ WARP.md **/tmpclaude* # Azurite storage emulator files -*/__azurite_db_blob__.json -*/__azurite_db_blob_extent__.json -*/__azurite_db_queue__.json -*/__azurite_db_queue_extent__.json -*/__azurite_db_table__.json +*/__azurite_db_blob__.json* +*/__azurite_db_blob_extent__.json* +*/__azurite_db_queue__.json* +*/__azurite_db_queue_extent__.json* +*/__azurite_db_table__.json* */__blobstorage__/ */__queuestorage__/ +*/AzuriteConfig # Azure Functions local settings local.settings.json diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py b/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py index 5684d1ec1b..9b3d3db27a 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/__init__.py @@ -3,8 +3,6 @@ import importlib.metadata from ._app import AgentFunctionApp -from ._callbacks import AgentCallbackContext, AgentResponseCallbackProtocol -from ._orchestration import DurableAIAgent try: __version__ = importlib.metadata.version(__name__) @@ -12,9 +10,6 @@ __version__ = "0.0.0" # Fallback for development mode __all__ = [ - "AgentCallbackContext", "AgentFunctionApp", - "AgentResponseCallbackProtocol", - "DurableAIAgent", "__version__", ] diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py index 5e64a3feaf..7a49214b33 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_app.py @@ -8,16 +8,16 @@ import json import re +import uuid from collections.abc import Callable, Mapping from dataclasses import dataclass +from datetime import datetime, timezone from typing import TYPE_CHECKING, Any, TypeVar, cast import azure.durable_functions as df import azure.functions as func from agent_framework import AgentProtocol, get_logger - -from ._callbacks import AgentResponseCallbackProtocol -from ._constants import ( +from agent_framework_durabletask import ( DEFAULT_MAX_POLL_RETRIES, DEFAULT_POLL_INTERVAL_SECONDS, MIMETYPE_APPLICATION_JSON, @@ -28,12 +28,17 @@ THREAD_ID_HEADER, WAIT_FOR_RESPONSE_FIELD, WAIT_FOR_RESPONSE_HEADER, + AgentResponseCallbackProtocol, + AgentSessionId, + ApiResponseFields, + DurableAgentState, + DurableAIAgent, + RunRequest, ) -from ._durable_agent_state import DurableAgentState + from ._entities import create_agent_entity from ._errors import IncomingRequestError -from ._models import AgentSessionId, RunRequest -from ._orchestration import AgentOrchestrationContextType, DurableAIAgent +from ._orchestration import AgentOrchestrationContextType, AgentTask, AzureFunctionsAgentExecutor logger = get_logger("agent_framework.azurefunctions") @@ -294,7 +299,7 @@ def get_agent( self, context: AgentOrchestrationContextType, agent_name: str, - ) -> DurableAIAgent: + ) -> DurableAIAgent[AgentTask]: """Return a DurableAIAgent proxy for a registered agent. Args: @@ -305,14 +310,15 @@ def get_agent( ValueError: If the requested agent has not been registered. Returns: - DurableAIAgent wrapper bound to the orchestration context. + DurableAIAgent[AgentTask] wrapper bound to the orchestration context. """ normalized_name = str(agent_name) if normalized_name not in self._agent_metadata: raise ValueError(f"Agent '{normalized_name}' is not registered with this app.") - return DurableAIAgent(context, normalized_name) + executor = AzureFunctionsAgentExecutor(context) + return DurableAIAgent(executor, normalized_name) def _setup_agent_functions( self, @@ -375,8 +381,6 @@ async def http_start(req: func.HttpRequest, client: df.DurableOrchestrationClien "enable_tool_calls": true|false (optional, default: true) } """ - logger.debug(f"[HTTP Trigger] Received request on route: /api/agents/{agent_name}/run") - request_response_format: str = REQUEST_RESPONSE_FORMAT_JSON thread_id: str | None = None @@ -385,9 +389,9 @@ async def http_start(req: func.HttpRequest, client: df.DurableOrchestrationClien thread_id = self._resolve_thread_id(req=req, req_body=req_body) wait_for_response = self._should_wait_for_response(req=req, req_body=req_body) - logger.debug(f"[HTTP Trigger] Message: {message}") - logger.debug(f"[HTTP Trigger] Thread ID: {thread_id}") - logger.debug(f"[HTTP Trigger] wait_for_response: {wait_for_response}") + logger.debug( + f"[HTTP Trigger] Message: {message}, Thread ID: {thread_id}, wait_for_response: {wait_for_response}" + ) if not message: logger.warning("[HTTP Trigger] Request rejected: Missing message") @@ -401,15 +405,18 @@ async def http_start(req: func.HttpRequest, client: df.DurableOrchestrationClien session_id = self._create_session_id(agent_name, thread_id) correlation_id = self._generate_unique_id() - logger.debug(f"[HTTP Trigger] Using session ID: {session_id}") - logger.debug(f"[HTTP Trigger] Generated correlation ID: {correlation_id}") - logger.debug("[HTTP Trigger] Calling entity to run agent...") + logger.debug( + f"[HTTP Trigger] Calling entity to run agent using session ID: {session_id} " + f"and correlation ID: {correlation_id}" + ) - entity_instance_id = session_id.to_entity_id() + entity_instance_id = df.EntityId( + name=session_id.entity_name, + key=session_id.key, + ) run_request = self._build_request_data( req_body, message, - thread_id, correlation_id, request_response_format, ) @@ -622,14 +629,16 @@ async def _handle_mcp_tool_invocation( session_id = AgentSessionId.with_random_key(agent_name) # Build entity instance ID - entity_instance_id = session_id.to_entity_id() + entity_instance_id = df.EntityId( + name=session_id.entity_name, + key=session_id.key, + ) # Create run request correlation_id = self._generate_unique_id() run_request = self._build_request_data( req_body={"message": query, "role": "user"}, message=query, - thread_id=str(session_id), correlation_id=correlation_id, request_response_format=REQUEST_RESPONSE_FORMAT_TEXT, ) @@ -781,7 +790,7 @@ async def _poll_entity_for_response( agent_response = state.try_get_agent_response(correlation_id) if agent_response: result = self._build_success_result( - response_data=agent_response, + response_message=agent_response.text, message=message, thread_id=thread_id, correlation_id=correlation_id, @@ -827,23 +836,22 @@ async def _build_timeout_result(self, message: str, thread_id: str, correlation_ ) def _build_success_result( - self, response_data: dict[str, Any], message: str, thread_id: str, correlation_id: str, state: DurableAgentState + self, response_message: str, message: str, thread_id: str, correlation_id: str, state: DurableAgentState ) -> dict[str, Any]: """Build the success result returned to the HTTP caller.""" return self._build_response_payload( - response=response_data.get("content"), + response=response_message, message=message, thread_id=thread_id, status="success", correlation_id=correlation_id, - extra_fields={"message_count": response_data.get("message_count", state.message_count)}, + extra_fields={ApiResponseFields.MESSAGE_COUNT: state.message_count}, ) def _build_request_data( self, req_body: dict[str, Any], message: str, - thread_id: str, correlation_id: str, request_response_format: str, ) -> dict[str, Any]: @@ -857,8 +865,8 @@ def _build_request_data( request_response_format=request_response_format, response_format=req_body.get("response_format"), enable_tool_calls=enable_tool_calls, - thread_id=thread_id, correlation_id=correlation_id, + created_at=datetime.now(timezone.utc), ).to_dict() def _build_accepted_response(self, message: str, thread_id: str, correlation_id: str) -> dict[str, Any]: @@ -910,15 +918,13 @@ def _convert_payload_to_text(self, payload: dict[str, Any]) -> str: def _generate_unique_id(self) -> str: """Generate a new unique identifier.""" - import uuid - return uuid.uuid4().hex - def _create_session_id(self, func_name: str, thread_id: str | None) -> AgentSessionId: + def _create_session_id(self, agent_name: str, thread_id: str | None) -> AgentSessionId: """Create a session identifier using the provided thread id or a random value.""" if thread_id: - return AgentSessionId(name=func_name, key=thread_id) - return AgentSessionId.with_random_key(name=func_name) + return AgentSessionId(name=agent_name, key=thread_id) + return AgentSessionId.with_random_key(name=agent_name) def _resolve_thread_id(self, req: func.HttpRequest, req_body: dict[str, Any]) -> str: """Retrieve the thread identifier from request body or query parameters.""" diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py index ba6040b99b..fb874692b0 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_entities.py @@ -8,346 +8,41 @@ """ import asyncio -import inspect -from collections.abc import AsyncIterable, Callable +from collections.abc import Callable from typing import Any, cast import azure.durable_functions as df -from agent_framework import ( - AgentProtocol, - AgentResponse, - AgentResponseUpdate, - ChatMessage, - Content, - Role, - get_logger, +from agent_framework import AgentProtocol, get_logger +from agent_framework_durabletask import ( + AgentEntity, + AgentEntityStateProviderMixin, + AgentResponseCallbackProtocol, ) -from ._callbacks import AgentCallbackContext, AgentResponseCallbackProtocol -from ._durable_agent_state import ( - DurableAgentState, - DurableAgentStateData, - DurableAgentStateEntry, - DurableAgentStateRequest, - DurableAgentStateResponse, -) -from ._models import RunRequest - logger = get_logger("agent_framework.azurefunctions.entities") -class AgentEntity: - """Durable entity that manages agent execution and conversation state. - - This entity: - - Maintains conversation history - - Executes agent with messages - - Stores agent responses - - Handles tool execution - - Operations: - - run: Execute the agent with a message - - run_agent: (Deprecated) Execute the agent with a message - - reset: Clear conversation history +class AzureFunctionEntityStateProvider(AgentEntityStateProviderMixin): + """Azure Functions Durable Entity state provider for AgentEntity. - Attributes: - agent: The AgentProtocol instance - state: The DurableAgentState managing conversation history + This class utilizes the Durable Entity context from `azure-functions-durable` package + to get and set the state of the agent entity. """ - agent: AgentProtocol - state: DurableAgentState - - def __init__( - self, - agent: AgentProtocol, - callback: AgentResponseCallbackProtocol | None = None, - ): - """Initialize the agent entity. - - Args: - agent: The Microsoft Agent Framework agent instance (must implement AgentProtocol) - callback: Optional callback invoked during streaming updates and final responses - """ - self.agent = agent - self.state = DurableAgentState() - self.callback = callback - - logger.debug(f"[AgentEntity] Initialized with agent type: {type(agent).__name__}") - - def _is_error_response(self, entry: DurableAgentStateEntry) -> bool: - """Check if a conversation history entry is an error response. - - Error responses should be kept in history for tracking but not sent to the agent - since Azure OpenAI doesn't support 'error' content type. - - Args: - entry: A conversation history entry (DurableAgentStateEntry or dict) - - Returns: - True if the entry is a response containing error content, False otherwise - """ - if isinstance(entry, DurableAgentStateResponse): - return entry.is_error - return False - - async def run_agent( - self, - context: df.DurableEntityContext, - request: RunRequest | dict[str, Any] | str, - ) -> AgentResponse: - """(Deprecated) Execute the agent with a message directly in the entity. - - Args: - context: Entity context - request: RunRequest object, dict, or string message (for backward compatibility) - - Returns: - AgentResponse enriched with execution metadata. - """ - return await self.run(context, request) - - async def run( - self, - context: df.DurableEntityContext, - request: RunRequest | dict[str, Any] | str, - ) -> AgentResponse: - """Execute the agent with a message directly in the entity. - - Args: - context: Entity context - request: RunRequest object, dict, or string message (for backward compatibility) - - Returns: - AgentResponse enriched with execution metadata. - """ - if isinstance(request, str): - run_request = RunRequest(message=request, role=Role.USER) - elif isinstance(request, dict): - run_request = RunRequest.from_dict(request) - else: - run_request = request - - message = run_request.message - thread_id = run_request.thread_id - correlation_id = run_request.correlation_id - if not thread_id: - raise ValueError("RunRequest must include a thread_id") - if not correlation_id: - raise ValueError("RunRequest must include a correlation_id") - response_format = run_request.response_format - enable_tool_calls = run_request.enable_tool_calls - - state_request = DurableAgentStateRequest.from_run_request(run_request) - self.state.data.conversation_history.append(state_request) - - logger.debug(f"[AgentEntity.run] Received Message: {state_request}") - - try: - # Build messages from conversation history, excluding error responses - # Error responses are kept in history for tracking but not sent to the agent - chat_messages: list[ChatMessage] = [ - m.to_chat_message() - for entry in self.state.data.conversation_history - if not self._is_error_response(entry) - for m in entry.messages - ] - - run_kwargs: dict[str, Any] = {"messages": chat_messages, "options": {}} - if not enable_tool_calls: - run_kwargs["options"]["tools"] = None - if response_format: - run_kwargs["options"]["response_format"] = response_format - - agent_response: AgentResponse = await self._invoke_agent( - run_kwargs=run_kwargs, - correlation_id=correlation_id, - thread_id=thread_id, - request_message=message, - ) - - logger.debug( - "[AgentEntity.run] Agent invocation completed - response type: %s", - type(agent_response).__name__, - ) - - try: - response_text = agent_response.text if agent_response.text else "No response" - logger.debug(f"Response: {response_text[:100]}...") - except Exception as extraction_error: - logger.error( - "Error extracting response text: %s", - extraction_error, - exc_info=True, - ) - - state_response = DurableAgentStateResponse.from_run_response(correlation_id, agent_response) - self.state.data.conversation_history.append(state_response) - - logger.debug("[AgentEntity.run] AgentResponse stored in conversation history") + def __init__(self, context: df.DurableEntityContext) -> None: + self._context = context - return agent_response + def _get_state_dict(self) -> dict[str, Any]: + raw_state = self._context.get_state(lambda: {}) + if not isinstance(raw_state, dict): + return {} + return cast(dict[str, Any], raw_state) - except Exception as exc: - logger.exception("[AgentEntity.run] Agent execution failed.") - - # Create error message - error_message = ChatMessage( - role=Role.ASSISTANT, contents=[Content.from_error(message=str(exc), error_code=type(exc).__name__)] - ) - - error_response = AgentResponse(messages=[error_message]) - - # Create and store error response in conversation history - error_state_response = DurableAgentStateResponse.from_run_response(correlation_id, error_response) - error_state_response.is_error = True - self.state.data.conversation_history.append(error_state_response) - - return error_response - - async def _invoke_agent( - self, - run_kwargs: dict[str, Any], - correlation_id: str, - thread_id: str, - request_message: str, - ) -> AgentResponse: - """Execute the agent, preferring streaming when available.""" - callback_context: AgentCallbackContext | None = None - if self.callback is not None: - callback_context = self._build_callback_context( - correlation_id=correlation_id, - thread_id=thread_id, - request_message=request_message, - ) - - run_stream_callable = getattr(self.agent, "run_stream", None) - if callable(run_stream_callable): - try: - stream_candidate = run_stream_callable(**run_kwargs) - if inspect.isawaitable(stream_candidate): - stream_candidate = await stream_candidate - - return await self._consume_stream( - stream=cast(AsyncIterable[AgentResponseUpdate], stream_candidate), - callback_context=callback_context, - ) - except TypeError as type_error: - if "__aiter__" not in str(type_error): - raise - logger.debug( - "run_stream returned a non-async result; falling back to run(): %s", - type_error, - ) - except Exception as stream_error: - logger.warning( - "run_stream failed; falling back to run(): %s", - stream_error, - exc_info=True, - ) - else: - logger.debug("Agent does not expose run_stream; falling back to run().") - - agent_response = await self._invoke_non_stream(run_kwargs) - await self._notify_final_response(agent_response, callback_context) - return agent_response - - async def _consume_stream( - self, - stream: AsyncIterable[AgentResponseUpdate], - callback_context: AgentCallbackContext | None = None, - ) -> AgentResponse: - """Consume streaming responses and build the final AgentResponse.""" - updates: list[AgentResponseUpdate] = [] - - async for update in stream: - updates.append(update) - await self._notify_stream_update(update, callback_context) - - if updates: - response = AgentResponse.from_agent_run_response_updates(updates) - else: - logger.debug("[AgentEntity] No streaming updates received; creating empty response") - response = AgentResponse(messages=[]) - - await self._notify_final_response(response, callback_context) - return response - - async def _invoke_non_stream(self, run_kwargs: dict[str, Any]) -> AgentResponse: - """Invoke the agent without streaming support.""" - run_callable = getattr(self.agent, "run", None) - if run_callable is None or not callable(run_callable): - raise AttributeError("Agent does not implement run() method") + def _set_state_dict(self, state: dict[str, Any]) -> None: + self._context.set_state(state) - result = run_callable(**run_kwargs) - if inspect.isawaitable(result): - result = await result - - if not isinstance(result, AgentResponse): - raise TypeError(f"Agent run() must return an AgentResponse instance; received {type(result).__name__}") - - return result - - async def _notify_stream_update( - self, - update: AgentResponseUpdate, - context: AgentCallbackContext | None, - ) -> None: - """Invoke the streaming callback if one is registered.""" - if self.callback is None or context is None: - return - - try: - callback_result = self.callback.on_streaming_response_update(update, context) - if inspect.isawaitable(callback_result): - await callback_result - except Exception as exc: - logger.warning( - "[AgentEntity] Streaming callback raised an exception: %s", - exc, - exc_info=True, - ) - - async def _notify_final_response( - self, - response: AgentResponse, - context: AgentCallbackContext | None, - ) -> None: - """Invoke the final response callback if one is registered.""" - if self.callback is None or context is None: - return - - try: - callback_result = self.callback.on_agent_response(response, context) - if inspect.isawaitable(callback_result): - await callback_result - except Exception as exc: - logger.warning( - "[AgentEntity] Response callback raised an exception: %s", - exc, - exc_info=True, - ) - - def _build_callback_context( - self, - correlation_id: str, - thread_id: str, - request_message: str, - ) -> AgentCallbackContext: - """Create the callback context provided to consumers.""" - agent_name = getattr(self.agent, "name", None) or type(self.agent).__name__ - return AgentCallbackContext( - agent_name=agent_name, - correlation_id=correlation_id, - thread_id=thread_id, - request_message=request_message, - ) - - def reset(self, context: df.DurableEntityContext) -> None: - """Reset the entity state (clear conversation history).""" - logger.debug("[AgentEntity.reset] Resetting entity state") - self.state.data = DurableAgentStateData(conversation_history=[]) - logger.debug("[AgentEntity.reset] State reset complete") + def _get_thread_id_from_entity(self) -> str: + return str(self._context.entity_key) def create_agent_entity( @@ -368,19 +63,10 @@ async def _entity_coroutine(context: df.DurableEntityContext) -> None: """Async handler that executes the entity operations.""" try: logger.debug("[entity_function] Entity triggered") - logger.debug(f"[entity_function] Operation: {context.operation_name}") - - current_state = context.get_state(lambda: None) - logger.debug("Retrieved state: %s", str(current_state)[:100]) - entity = AgentEntity(agent, callback) + logger.debug("[entity_function] Operation: %s", context.operation_name) - if current_state is not None: - entity.state = DurableAgentState.from_dict(current_state) - logger.debug( - "[entity_function] Restored entity from state (message_count: %s)", entity.state.message_count - ) - else: - logger.debug("[entity_function] Created new entity instance") + state_provider = AzureFunctionEntityStateProvider(context) + entity = AgentEntity(agent, callback, state_provider=state_provider) operation = context.operation_name @@ -394,21 +80,18 @@ async def _entity_coroutine(context: df.DurableEntityContext) -> None: # Fall back to treating input as message string request = "" if input_data is None else str(cast(object, input_data)) - result = await entity.run(context, request) + result = await entity.run(request) context.set_result(result.to_dict()) elif operation == "reset": - entity.reset(context) + entity.reset() context.set_result({"status": "reset"}) else: logger.error("[entity_function] Unknown operation: %s", operation) context.set_result({"error": f"Unknown operation: {operation}"}) - serialized_state = entity.state.to_dict() - logger.debug("State dict: %s", serialized_state) - context.set_state(serialized_state) - logger.info(f"[entity_function] Operation {operation} completed successfully") + logger.info("[entity_function] Operation %s completed successfully", operation) except Exception as exc: logger.exception("[entity_function] Error executing entity operation %s", exc) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py b/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py index 724ba9903c..4e55fe1819 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py +++ b/python/packages/azurefunctions/agent_framework_azurefunctions/_orchestration.py @@ -5,24 +5,22 @@ This module provides support for using agents inside Durable Function orchestrations. """ -import uuid -from collections.abc import AsyncIterator, Callable, Sequence -from typing import TYPE_CHECKING, Any, TypeAlias, cast - -from agent_framework import ( - AgentProtocol, - AgentResponse, - AgentResponseUpdate, - AgentThread, - ChatMessage, - get_logger, +from collections.abc import Callable +from typing import TYPE_CHECKING, Any, TypeAlias + +import azure.durable_functions as df +from agent_framework import AgentThread, get_logger +from agent_framework_durabletask import ( + DurableAgentExecutor, + RunRequest, + ensure_response_format, + load_agent_response, ) from azure.durable_functions.models import TaskBase +from azure.durable_functions.models.actions.NoOpAction import NoOpAction from azure.durable_functions.models.Task import CompoundTask, TaskState from pydantic import BaseModel -from ._models import AgentSessionId, DurableAgentThread, RunRequest - logger = get_logger("agent_framework.azurefunctions.orchestration") CompoundActionConstructor: TypeAlias = Callable[[list[Any]], Any] | None @@ -45,6 +43,25 @@ def __init__( _TypedCompoundTask = CompoundTask +class PreCompletedTask(TaskBase): # type: ignore[misc] + """A simple task that is already completed with a result. + + Used for fire-and-forget mode where we want to return immediately + with an acceptance response without waiting for entity processing. + """ + + def __init__(self, result: Any): + """Initialize with a completed result. + + Args: + result: The result value for this completed task + """ + # Initialize with a NoOp action since we don't need actual orchestration actions + super().__init__(-1, NoOpAction()) + # Immediately mark as completed with the result + self.set_value(is_error=False, value=result) + + class AgentTask(_TypedCompoundTask): """A custom Task that wraps entity calls and provides typed AgentResponse results. @@ -65,10 +82,13 @@ def __init__( response_format: Optional Pydantic model for response parsing correlation_id: Correlation ID for logging """ - super().__init__([entity_task]) + # Set instance variables BEFORE calling super().__init__ + # because super().__init__ may trigger try_set_value for pre-completed tasks self._response_format = response_format self._correlation_id = correlation_id + super().__init__([entity_task]) + # Override action_repr to expose the inner task's action directly # This ensures compatibility with ReplaySchema V3 which expects Action objects. self.action_repr = entity_task.action_repr @@ -95,10 +115,10 @@ def try_set_value(self, child: TaskBase) -> None: ) try: - response = self._load_agent_response(raw_result) + response = load_agent_response(raw_result) if self._response_format is not None: - self._ensure_response_format( + ensure_response_format( self._response_format, self._correlation_id, response, @@ -118,230 +138,84 @@ def try_set_value(self, child: TaskBase) -> None: self._first_error = child.result self.set_value(is_error=True, value=self._first_error) - def _load_agent_response(self, agent_response: AgentResponse | dict[str, Any] | None) -> AgentResponse: - """Convert raw payloads into AgentResponse instance.""" - if agent_response is None: - raise ValueError("agent_response cannot be None") - logger.debug("[load_agent_response] Loading agent response of type: %s", type(agent_response)) +class AzureFunctionsAgentExecutor(DurableAgentExecutor[AgentTask]): + """Executor that executes durable agents inside Azure Functions orchestrations.""" - if isinstance(agent_response, AgentResponse): - return agent_response - if isinstance(agent_response, dict): - logger.debug("[load_agent_response] Converting dict payload using AgentResponse.from_dict") - return AgentResponse.from_dict(agent_response) + def __init__(self, context: AgentOrchestrationContextType): + self.context = context - raise TypeError(f"Unsupported type for agent_response: {type(agent_response)}") + def generate_unique_id(self) -> str: + return str(self.context.new_uuid()) - def _ensure_response_format( + def get_run_request( self, - response_format: type[BaseModel] | None, - correlation_id: str, - response: AgentResponse, - ) -> None: - """Ensure the AgentResponse value is parsed into the expected response_format.""" - if response_format is not None and not isinstance(response.value, response_format): - response.try_parse_value(response_format) - - logger.debug( - "[DurableAIAgent] Loaded AgentResponse.value for correlation_id %s with type: %s", - correlation_id, - type(response.value).__name__, - ) - - -class DurableAIAgent(AgentProtocol): - """A durable agent implementation that uses entity methods to interact with agent entities. - - This class implements AgentProtocol and provides methods to work with Azure Durable Functions - orchestrations, which use generators and yield instead of async/await. - - Key methods: - - get_new_thread(): Create a new conversation thread - - run(): Execute the agent and return a Task for yielding in orchestrations + message: str, + *, + options: dict[str, Any] | None = None, + ) -> RunRequest: + """Get the current run request from the orchestration context. - Note: The run() method is NOT async. It returns a Task directly that must be - yielded in orchestrations to wait for the entity call to complete. + Args: + message: The message to send to the agent + options: Optional options dictionary. Supported keys include + ``response_format``, ``enable_tool_calls``, and ``wait_for_response``. + Additional keys are forwarded to the agent execution. - Example usage in orchestration: - writer = app.get_agent(context, "WriterAgent") - thread = writer.get_new_thread() # NOT yielded - returns immediately + Returns: + RunRequest: The current run request - response = yield writer.run( # Yielded - waits for entity call - message="Write a haiku about coding", - thread=thread - ) - """ + Raises: + ValueError: If wait_for_response=False (not supported in orchestrations) + """ + # Create a copy to avoid modifying the caller's dict - def __init__(self, context: AgentOrchestrationContextType, agent_name: str): - """Initialize the DurableAIAgent. + request = super().get_run_request(message, options=options) + request.orchestration_id = self.context.instance_id + return request - Args: - context: The orchestration context - agent_name: Name of the agent (used to construct entity ID) - """ - self.context = context - self.agent_name = agent_name - self.id = str(uuid.uuid4()) - self.name = agent_name - self.description = f"Durable agent proxy for {agent_name}" - logger.debug("[DurableAIAgent] Initialized for agent: %s", agent_name) - - # We return an AgentTask here which is a TaskBase subclass. - # This is an intentional deviation from AgentProtocol which defines run() as async. - # The AgentTask can be yielded in Durable Functions orchestrations and will provide - # a typed AgentResponse result. - def run( # type: ignore[override] + def run_durable_agent( self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, + agent_name: str, + run_request: RunRequest, thread: AgentThread | None = None, - options: dict[str, Any] | None = None, - **kwargs: Any, ) -> AgentTask: - """Execute the agent with messages and return an AgentTask for orchestrations. - This method implements AgentProtocol and returns an AgentTask (subclass of TaskBase) - that can be yielded in Durable Functions orchestrations. The task's result will be - a typed AgentResponse. + # Resolve session + session_id = self._create_session_id(agent_name, thread) - Args: - messages: The message(s) to send to the agent - thread: Optional agent thread for conversation context - options: Optional dict containing chat options like response_format, tools, etc. - **kwargs: Additional arguments (enable_tool_calls) - - Returns: - An AgentTask that resolves to an AgentResponse when yielded - - Example: - @app.orchestration_trigger(context_name="context") - def my_orchestration(context): - agent = app.get_agent(context, "MyAgent") - thread = agent.get_new_thread() - response = yield agent.run("Hello", thread=thread, options={"response_format": MyModel}) - # response is typed as AgentResponse - """ - message_str = self._normalize_messages(messages) - - # Extract options from the options dict (aligned with ChatAgent pattern) - opts = options or {} - response_format: type[BaseModel] | None = opts.get("response_format") - enable_tool_calls = opts.get("enable_tool_calls", kwargs.get("enable_tool_calls", True)) + entity_id = df.EntityId( + name=session_id.entity_name, + key=session_id.key, + ) - # Get the session ID for the entity - if isinstance(thread, DurableAgentThread) and thread.session_id is not None: - session_id = thread.session_id - else: - # Create a unique session ID for each call when no thread is provided - # This ensures each call gets its own conversation context - session_key = str(self.context.new_uuid()) - session_id = AgentSessionId(name=self.agent_name, key=session_key) - logger.debug("[DurableAIAgent] No thread provided, created unique session_id: %s", session_id) - - # Create entity ID from session ID - entity_id = session_id.to_entity_id() - - # Generate a deterministic correlation ID for this call - # This is required by the entity and must be unique per call - correlation_id = str(self.context.new_uuid()) logger.debug( - "[DurableAIAgent] Using correlation_id: %s for entity_id: %s for session_id: %s", - correlation_id, + "[AzureFunctionsAgentProvider] correlation_id: %s entity_id: %s session_id: %s", + run_request.correlation_id, entity_id, session_id, ) - # Prepare the request using RunRequest model - # Include the orchestration's instance_id so it can be stored in the agent's entity state - run_request = RunRequest( - message=message_str, - enable_tool_calls=enable_tool_calls, - correlation_id=correlation_id, - thread_id=session_id.key, - response_format=response_format, - orchestration_id=self.context.instance_id, - ) + # Branch based on wait_for_response + if not run_request.wait_for_response: + # Fire-and-forget mode: signal entity and return pre-completed task + logger.debug( + "[AzureFunctionsAgentExecutor] Fire-and-forget mode: signaling entity (correlation: %s)", + run_request.correlation_id, + ) + self.context.signal_entity(entity_id, "run", run_request.to_dict()) - logger.debug("[DurableAIAgent] Calling entity %s with message: %s", entity_id, message_str[:100]) + # Create acceptance response using base class helper + acceptance_response = self._create_acceptance_response(run_request.correlation_id) - # Call the entity to get the underlying task - entity_task = self.context.call_entity(entity_id, "run", run_request.to_dict()) + # Create a pre-completed task with the acceptance response + entity_task = PreCompletedTask(acceptance_response) + else: + # Blocking mode: call entity and wait for response + entity_task = self.context.call_entity(entity_id, "run", run_request.to_dict()) - # Wrap it in an AgentTask that will convert the result to AgentResponse - agent_task = AgentTask( + return AgentTask( entity_task=entity_task, - response_format=response_format, - correlation_id=correlation_id, + response_format=run_request.response_format, + correlation_id=run_request.correlation_id, ) - - logger.debug( - "[DurableAIAgent] Created AgentTask for correlation_id %s", - correlation_id, - ) - - return agent_task - - def run_stream( - self, - messages: str | ChatMessage | Sequence[str | ChatMessage] | None = None, - *, - thread: AgentThread | None = None, - **kwargs: Any, - ) -> AsyncIterator[AgentResponseUpdate]: - """Run the agent with streaming (not supported for durable agents). - - Raises: - NotImplementedError: Streaming is not supported for durable agents. - """ - raise NotImplementedError("Streaming is not supported for durable agents in orchestrations.") - - def get_new_thread(self, **kwargs: Any) -> AgentThread: - """Create a new agent thread for this orchestration instance. - - Each call creates a unique thread with its own conversation context. - The session ID is deterministic (uses context.new_uuid()) to ensure - orchestration replay works correctly. - - Returns: - A new AgentThread instance with a unique session ID - """ - # Generate a deterministic unique key for this thread - # Using context.new_uuid() ensures the same GUID is generated during replay - session_key = str(self.context.new_uuid()) - - # Create AgentSessionId with agent name and session key - session_id = AgentSessionId(name=self.agent_name, key=session_key) - - thread = DurableAgentThread.from_session_id(session_id, **kwargs) - - logger.debug("[DurableAIAgent] Created new thread with session_id: %s", session_id) - return thread - - def _messages_to_string(self, messages: list[ChatMessage]) -> str: - """Convert a list of ChatMessage objects to a single string. - - Args: - messages: List of ChatMessage objects - - Returns: - Concatenated string of message contents - """ - return "\n".join([msg.text or "" for msg in messages]) - - def _normalize_messages(self, messages: str | ChatMessage | Sequence[str | ChatMessage] | None) -> str: - """Convert supported message inputs to a single string.""" - if messages is None: - return "" - if isinstance(messages, str): - return messages - if isinstance(messages, ChatMessage): - return messages.text or "" - if isinstance(messages, list): - if not messages: - return "" - first_item = messages[0] - if isinstance(first_item, str): - return "\n".join(cast(list[str], messages)) - return self._messages_to_string(cast(list[ChatMessage], messages)) - return str(messages) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/py.typed b/python/packages/azurefunctions/agent_framework_azurefunctions/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/packages/azurefunctions/pyproject.toml b/python/packages/azurefunctions/pyproject.toml index 3ad3651c0c..2fe65dc801 100644 --- a/python/packages/azurefunctions/pyproject.toml +++ b/python/packages/azurefunctions/pyproject.toml @@ -23,6 +23,7 @@ classifiers = [ ] dependencies = [ "agent-framework-core", + "agent-framework-durabletask", "azure-functions", "azure-functions-durable", ] diff --git a/python/packages/azurefunctions/tests/integration_tests/test_01_single_agent.py b/python/packages/azurefunctions/tests/integration_tests/test_01_single_agent.py index 2f152552e2..7af3a3b653 100644 --- a/python/packages/azurefunctions/tests/integration_tests/test_01_single_agent.py +++ b/python/packages/azurefunctions/tests/integration_tests/test_01_single_agent.py @@ -15,10 +15,9 @@ """ import pytest +from agent_framework_durabletask import THREAD_ID_HEADER from testutils import SampleTestHelper, skip_if_azure_functions_integration_tests_disabled -from agent_framework_azurefunctions._constants import THREAD_ID_HEADER - # Module-level markers - applied to all tests in this file pytestmark = [ pytest.mark.sample("01_single_agent"), diff --git a/python/packages/azurefunctions/tests/test_app.py b/python/packages/azurefunctions/tests/test_app.py index b4b0428f43..f8b414fc34 100644 --- a/python/packages/azurefunctions/tests/test_app.py +++ b/python/packages/azurefunctions/tests/test_app.py @@ -2,6 +2,8 @@ """Unit tests for AgentFunctionApp.""" +# pyright: reportPrivateUsage=false + import json from collections.abc import Awaitable, Callable from typing import Any, TypeVar @@ -11,20 +13,42 @@ import azure.functions as func import pytest from agent_framework import AgentResponse, ChatMessage - -from agent_framework_azurefunctions import AgentFunctionApp -from agent_framework_azurefunctions._app import WAIT_FOR_RESPONSE_FIELD, WAIT_FOR_RESPONSE_HEADER -from agent_framework_azurefunctions._constants import ( +from agent_framework_durabletask import ( MIMETYPE_APPLICATION_JSON, MIMETYPE_TEXT_PLAIN, THREAD_ID_HEADER, + WAIT_FOR_RESPONSE_FIELD, + WAIT_FOR_RESPONSE_HEADER, + AgentEntity, + AgentEntityStateProviderMixin, + DurableAgentState, ) -from agent_framework_azurefunctions._durable_agent_state import DurableAgentState -from agent_framework_azurefunctions._entities import AgentEntity, create_agent_entity + +from agent_framework_azurefunctions import AgentFunctionApp +from agent_framework_azurefunctions._entities import create_agent_entity TFunc = TypeVar("TFunc", bound=Callable[..., Any]) +def _identity_decorator(func: TFunc) -> TFunc: + return func + + +class _InMemoryStateProvider(AgentEntityStateProviderMixin): + def __init__(self, *, thread_id: str = "test-thread", initial_state: dict[str, Any] | None = None) -> None: + self._thread_id = thread_id + self._state_dict: dict[str, Any] = initial_state or {} + + def _get_state_dict(self) -> dict[str, Any]: + return self._state_dict + + def _set_state_dict(self, state: dict[str, Any]) -> None: + self._state_dict = state + + def _get_thread_id_from_entity(self) -> str: + return self._thread_id + + class TestAgentFunctionAppInit: """Test suite for AgentFunctionApp initialization.""" @@ -88,7 +112,7 @@ def test_add_agent_uses_specific_callback(self) -> None: app.add_agent(mock_agent, callback=specific_callback) setup_mock.assert_called_once() - _, _, passed_callback, enable_http_endpoint, enable_mcp_tool_trigger = setup_mock.call_args[0] + _, _, passed_callback, enable_http_endpoint, _enable_mcp_tool_trigger = setup_mock.call_args[0] assert passed_callback is specific_callback assert enable_http_endpoint is True @@ -104,7 +128,7 @@ def test_default_callback_applied_when_no_specific(self) -> None: app.add_agent(mock_agent) setup_mock.assert_called_once() - _, _, passed_callback, enable_http_endpoint, enable_mcp_tool_trigger = setup_mock.call_args[0] + _, _, passed_callback, enable_http_endpoint, _enable_mcp_tool_trigger = setup_mock.call_args[0] assert passed_callback is default_callback assert enable_http_endpoint is True @@ -119,7 +143,7 @@ def test_init_with_agents_uses_default_callback(self) -> None: AgentFunctionApp(agents=[mock_agent], default_callback=default_callback) setup_mock.assert_called_once() - _, _, passed_callback, enable_http_endpoint, enable_mcp_tool_trigger = setup_mock.call_args[0] + _, _, passed_callback, enable_http_endpoint, _enable_mcp_tool_trigger = setup_mock.call_args[0] assert passed_callback is default_callback assert enable_http_endpoint is True @@ -335,13 +359,12 @@ async def test_entity_run_agent_operation(self) -> None: return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Test response")]) ) - entity = AgentEntity(mock_agent) - mock_context = Mock() + entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="test-conv-123")) - result = await entity.run( - mock_context, - {"message": "Test message", "thread_id": "test-conv-123", "correlationId": "corr-app-entity-1"}, - ) + result = await entity.run({ + "message": "Test message", + "correlationId": "corr-app-entity-1", + }) assert isinstance(result, AgentResponse) assert result.text == "Test response" @@ -354,22 +377,17 @@ async def test_entity_stores_conversation_history(self) -> None: return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response 1")]) ) - entity = AgentEntity(mock_agent) - mock_context = Mock() + entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="conv-1")) # Send first message - await entity.run( - mock_context, {"message": "Message 1", "thread_id": "conv-1", "correlationId": "corr-app-entity-2"} - ) + await entity.run({"message": "Message 1", "correlationId": "corr-app-entity-2"}) # Each conversation turn creates 2 entries: request and response history = entity.state.data.conversation_history[0].messages # Request entry assert len(history) == 1 # Just the user message # Send second message - await entity.run( - mock_context, {"message": "Message 2", "thread_id": "conv-2", "correlationId": "corr-app-entity-2b"} - ) + await entity.run({"message": "Message 2", "correlationId": "corr-app-entity-2b"}) # Now we have 4 entries total (2 requests + 2 responses) # Access the first request entry @@ -393,32 +411,26 @@ async def test_entity_increments_message_count(self) -> None: return_value=AgentResponse(messages=[ChatMessage(role="assistant", text="Response")]) ) - entity = AgentEntity(mock_agent) - mock_context = Mock() + entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="conv-1")) assert len(entity.state.data.conversation_history) == 0 - await entity.run( - mock_context, {"message": "Message 1", "thread_id": "conv-1", "correlationId": "corr-app-entity-3a"} - ) + await entity.run({"message": "Message 1", "correlationId": "corr-app-entity-3a"}) assert len(entity.state.data.conversation_history) == 2 - await entity.run( - mock_context, {"message": "Message 2", "thread_id": "conv-1", "correlationId": "corr-app-entity-3b"} - ) + await entity.run({"message": "Message 2", "correlationId": "corr-app-entity-3b"}) assert len(entity.state.data.conversation_history) == 4 def test_entity_reset(self) -> None: """Test that entity reset clears state.""" mock_agent = Mock() - entity = AgentEntity(mock_agent) + entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider()) # Set some state entity.state = DurableAgentState() # Reset - mock_context = Mock() - entity.reset(mock_context) + entity.reset() assert len(entity.state.data.conversation_history) == 0 @@ -447,7 +459,6 @@ def test_entity_function_handles_run_operation(self) -> None: mock_context.operation_name = "run" mock_context.get_input.return_value = { "message": "Test message", - "thread_id": "conv-123", "correlationId": "corr-app-factory-1", } mock_context.get_state.return_value = None @@ -475,7 +486,6 @@ def test_entity_function_handles_run_agent_operation(self) -> None: mock_context.operation_name = "run_agent" mock_context.get_input.return_value = { "message": "Test message", - "thread_id": "conv-123", "correlationId": "corr-app-factory-1", } mock_context.get_state.return_value = None @@ -595,7 +605,11 @@ def test_entity_function_restores_state(self) -> None: } mock_context = Mock() - mock_context.operation_name = "reset" + mock_context.operation_name = "run" + mock_context.get_input.return_value = { + "message": "Test message", + "correlationId": "corr-restore-1", + } mock_context.get_state.return_value = existing_state with patch.object(DurableAgentState, "from_dict", wraps=DurableAgentState.from_dict) as from_dict_mock: @@ -612,12 +626,12 @@ async def test_entity_handles_agent_error(self) -> None: mock_agent = Mock() mock_agent.run = AsyncMock(side_effect=Exception("Agent error")) - entity = AgentEntity(mock_agent) - mock_context = Mock() + entity = AgentEntity(mock_agent, state_provider=_InMemoryStateProvider(thread_id="conv-1")) - result = await entity.run( - mock_context, {"message": "Test message", "thread_id": "conv-1", "correlationId": "corr-app-error-1"} - ) + result = await entity.run({ + "message": "Test message", + "correlationId": "corr-app-error-1", + }) assert isinstance(result, AgentResponse) assert len(result.messages) == 1 @@ -710,7 +724,7 @@ def test_extract_thread_id_from_query_params(self) -> None: request = Mock() request.params = {"thread_id": "query-thread"} - req_body = {} + req_body: dict[str, Any] = {} thread_id = app._resolve_thread_id(request, req_body) @@ -777,7 +791,7 @@ async def test_http_run_accepts_plain_text(self) -> None: assert run_request["message"] == "Plain text via HTTP" assert run_request["role"] == "user" - assert "thread_id" in run_request + assert "thread_id" not in run_request async def test_http_run_accept_header_returns_json(self) -> None: """Test that Accept header requesting JSON results in JSON response.""" @@ -913,9 +927,9 @@ def test_setup_mcp_tool_trigger_registers_decorators(self) -> None: patch.object(app, "durable_client_input") as client_mock, ): # Setup mock decorator chain - func_name_mock.return_value = lambda f: f - mcp_trigger_mock.return_value = lambda f: f - client_mock.return_value = lambda f: f + func_name_mock.return_value = _identity_decorator + mcp_trigger_mock.return_value = _identity_decorator + client_mock.return_value = _identity_decorator app._setup_mcp_tool_trigger(mock_agent.name, mock_agent.description) @@ -938,11 +952,11 @@ def test_setup_mcp_tool_trigger_uses_default_description(self) -> None: app = AgentFunctionApp() with ( - patch.object(app, "function_name", return_value=lambda f: f), + patch.object(app, "function_name", return_value=_identity_decorator), patch.object(app, "mcp_tool_trigger") as mcp_trigger_mock, - patch.object(app, "durable_client_input", return_value=lambda f: f), + patch.object(app, "durable_client_input", return_value=_identity_decorator), ): - mcp_trigger_mock.return_value = lambda f: f + mcp_trigger_mock.return_value = _identity_decorator app._setup_mcp_tool_trigger(mock_agent.name, None) @@ -1128,10 +1142,10 @@ def test_health_check_includes_mcp_tool_enabled(self) -> None: app = AgentFunctionApp(agents=[mock_agent], enable_mcp_tool_trigger=True) # Capture the health check handler function - captured_handler = None + captured_handler: Callable[[func.HttpRequest], func.HttpResponse] | None = None - def capture_decorator(*args, **kwargs): - def decorator(func): + def capture_decorator(*args: Any, **kwargs: Any) -> Callable[[TFunc], TFunc]: + def decorator(func: TFunc) -> TFunc: nonlocal captured_handler captured_handler = func return func diff --git a/python/packages/azurefunctions/tests/test_entities.py b/python/packages/azurefunctions/tests/test_entities.py index 5d980f8610..555b588887 100644 --- a/python/packages/azurefunctions/tests/test_entities.py +++ b/python/packages/azurefunctions/tests/test_entities.py @@ -1,423 +1,32 @@ # Copyright (c) Microsoft. All rights reserved. -"""Unit tests for AgentEntity and entity operations. +"""Unit tests for create_agent_entity factory function. Run with: pytest tests/test_entities.py -v """ -import asyncio -from collections.abc import AsyncIterator, Callable -from datetime import datetime +from collections.abc import Callable from typing import Any, TypeVar -from unittest.mock import AsyncMock, Mock, patch +from unittest.mock import AsyncMock, Mock import pytest -from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage, Role -from pydantic import BaseModel +from agent_framework import AgentResponse, ChatMessage, Role -from agent_framework_azurefunctions._durable_agent_state import ( - DurableAgentState, - DurableAgentStateData, - DurableAgentStateMessage, - DurableAgentStateRequest, - DurableAgentStateTextContent, -) -from agent_framework_azurefunctions._entities import AgentEntity, create_agent_entity -from agent_framework_azurefunctions._models import RunRequest +from agent_framework_azurefunctions._entities import create_agent_entity TFunc = TypeVar("TFunc", bound=Callable[..., Any]) -def _role_value(chat_message: DurableAgentStateMessage) -> str: - """Helper to extract the string role from a ChatMessage.""" - role = getattr(chat_message, "role", None) - role_value = getattr(role, "value", role) - if role_value is None: - return "" - return str(role_value) - - def _agent_response(text: str | None) -> AgentResponse: """Create an AgentResponse with a single assistant message.""" message = ( - ChatMessage(role="assistant", text=text) if text is not None else ChatMessage(role="assistant", contents=[]) + ChatMessage(role=Role.ASSISTANT, text=text) + if text is not None + else ChatMessage(role=Role.ASSISTANT, contents=[]) ) return AgentResponse(messages=[message]) -class RecordingCallback: - """Callback implementation capturing streaming and final responses for assertions.""" - - def __init__(self): - self.stream_mock = AsyncMock() - self.response_mock = AsyncMock() - - async def on_streaming_response_update( - self, - update: AgentResponseUpdate, - context: Any, - ) -> None: - await self.stream_mock(update, context) - - async def on_agent_response(self, response: AgentResponse, context: Any) -> None: - await self.response_mock(response, context) - - -class EntityStructuredResponse(BaseModel): - answer: float - - -class TestAgentEntityInit: - """Test suite for AgentEntity initialization.""" - - def test_init_creates_entity(self) -> None: - """Test that AgentEntity initializes correctly.""" - mock_agent = Mock() - - entity = AgentEntity(mock_agent) - - assert entity.agent == mock_agent - assert len(entity.state.data.conversation_history) == 0 - assert entity.state.data.extension_data is None - assert entity.state.schema_version == DurableAgentState.SCHEMA_VERSION - - def test_init_stores_agent_reference(self) -> None: - """Test that the agent reference is stored correctly.""" - mock_agent = Mock() - mock_agent.name = "TestAgent" - - entity = AgentEntity(mock_agent) - - assert entity.agent.name == "TestAgent" - - def test_init_with_different_agent_types(self) -> None: - """Test initialization with different agent types.""" - agent1 = Mock() - agent1.__class__.__name__ = "AzureOpenAIAgent" - - agent2 = Mock() - agent2.__class__.__name__ = "CustomAgent" - - entity1 = AgentEntity(agent1) - entity2 = AgentEntity(agent2) - - assert entity1.agent.__class__.__name__ == "AzureOpenAIAgent" - assert entity2.agent.__class__.__name__ == "CustomAgent" - - -class TestAgentEntityRunAgent: - """Test suite for the run_agent operation.""" - - async def test_run_executes_agent(self) -> None: - """Test that run executes the agent.""" - mock_agent = Mock() - mock_response = _agent_response("Test response") - mock_agent.run = AsyncMock(return_value=mock_response) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - result = await entity.run( - mock_context, {"message": "Test message", "thread_id": "conv-123", "correlationId": "corr-entity-1"} - ) - - # Verify agent.run was called - mock_agent.run.assert_called_once() - _, kwargs = mock_agent.run.call_args - sent_messages: list[Any] = kwargs.get("messages") - assert len(sent_messages) == 1 - sent_message = sent_messages[0] - assert isinstance(sent_message, ChatMessage) - assert getattr(sent_message, "text", None) == "Test message" - assert getattr(sent_message.role, "value", sent_message.role) == "user" - - # Verify result - assert isinstance(result, AgentResponse) - assert result.text == "Test response" - - async def test_run_agent_executes_agent(self) -> None: - """Test that run_agent executes the agent.""" - mock_agent = Mock() - mock_response = _agent_response("Test response") - mock_agent.run = AsyncMock(return_value=mock_response) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - result = await entity.run_agent( - mock_context, {"message": "Test message", "thread_id": "conv-123", "correlationId": "corr-entity-1"} - ) - - # Verify agent.run was called - mock_agent.run.assert_called_once() - _, kwargs = mock_agent.run.call_args - sent_messages: list[Any] = kwargs.get("messages") - assert len(sent_messages) == 1 - sent_message = sent_messages[0] - assert isinstance(sent_message, ChatMessage) - assert getattr(sent_message, "text", None) == "Test message" - assert getattr(sent_message.role, "value", sent_message.role) == "user" - - # Verify result - assert isinstance(result, AgentResponse) - assert result.text == "Test response" - - async def test_run_agent_streaming_callbacks_invoked(self) -> None: - """Ensure streaming updates trigger callbacks and run() is not used.""" - - updates = [ - AgentResponseUpdate(text="Hello"), - AgentResponseUpdate(text=" world"), - ] - - async def update_generator() -> AsyncIterator[AgentResponseUpdate]: - for update in updates: - yield update - - mock_agent = Mock() - mock_agent.name = "StreamingAgent" - mock_agent.run_stream = Mock(return_value=update_generator()) - mock_agent.run = AsyncMock(side_effect=AssertionError("run() should not be called when streaming succeeds")) - - callback = RecordingCallback() - entity = AgentEntity(mock_agent, callback=callback) - mock_context = Mock() - - result = await entity.run( - mock_context, - { - "message": "Tell me something", - "thread_id": "session-1", - "correlationId": "corr-stream-1", - }, - ) - - assert isinstance(result, AgentResponse) - assert "Hello" in result.text - assert callback.stream_mock.await_count == len(updates) - assert callback.response_mock.await_count == 1 - mock_agent.run.assert_not_called() - - # Validate callback arguments - stream_calls = callback.stream_mock.await_args_list - for expected_update, recorded_call in zip(updates, stream_calls, strict=True): - assert recorded_call.args[0] is expected_update - context = recorded_call.args[1] - assert context.agent_name == "StreamingAgent" - assert context.correlation_id == "corr-stream-1" - assert context.thread_id == "session-1" - assert context.request_message == "Tell me something" - - final_call = callback.response_mock.await_args - assert final_call is not None - final_response, final_context = final_call.args - assert final_context.agent_name == "StreamingAgent" - assert final_context.correlation_id == "corr-stream-1" - assert final_context.thread_id == "session-1" - assert final_context.request_message == "Tell me something" - assert getattr(final_response, "text", "").strip() - - async def test_run_agent_final_callback_without_streaming(self) -> None: - """Ensure the final callback fires even when streaming is unavailable.""" - - mock_agent = Mock() - mock_agent.name = "NonStreamingAgent" - mock_agent.run_stream = None - agent_response = _agent_response("Final response") - mock_agent.run = AsyncMock(return_value=agent_response) - - callback = RecordingCallback() - entity = AgentEntity(mock_agent, callback=callback) - mock_context = Mock() - - result = await entity.run( - mock_context, - { - "message": "Hi", - "thread_id": "session-2", - "correlationId": "corr-final-1", - }, - ) - - assert isinstance(result, AgentResponse) - assert result.text == "Final response" - assert callback.stream_mock.await_count == 0 - assert callback.response_mock.await_count == 1 - - final_call = callback.response_mock.await_args - assert final_call is not None - assert final_call.args[0] is agent_response - final_context = final_call.args[1] - assert final_context.agent_name == "NonStreamingAgent" - assert final_context.correlation_id == "corr-final-1" - assert final_context.thread_id == "session-2" - assert final_context.request_message == "Hi" - - async def test_run_agent_updates_conversation_history(self) -> None: - """Test that run_agent updates the conversation history.""" - mock_agent = Mock() - mock_response = _agent_response("Agent response") - mock_agent.run = AsyncMock(return_value=mock_response) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - await entity.run( - mock_context, {"message": "User message", "thread_id": "conv-1", "correlationId": "corr-entity-2"} - ) - - # Should have 1 entry: user message + assistant response - user_history = entity.state.data.conversation_history[0].messages - assistant_history = entity.state.data.conversation_history[1].messages - - assert len(user_history) == 1 - - user_msg = user_history[0] - assert _role_value(user_msg) == "user" - assert user_msg.text == "User message" - - assistant_msg = assistant_history[0] - assert _role_value(assistant_msg) == "assistant" - assert assistant_msg.text == "Agent response" - - async def test_run_agent_increments_message_count(self) -> None: - """Test that run_agent increments the message count.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - assert len(entity.state.data.conversation_history) == 0 - - await entity.run( - mock_context, {"message": "Message 1", "thread_id": "conv-1", "correlationId": "corr-entity-3a"} - ) - assert len(entity.state.data.conversation_history) == 2 - - await entity.run( - mock_context, {"message": "Message 2", "thread_id": "conv-1", "correlationId": "corr-entity-3b"} - ) - assert len(entity.state.data.conversation_history) == 4 - - await entity.run( - mock_context, {"message": "Message 3", "thread_id": "conv-1", "correlationId": "corr-entity-3c"} - ) - assert len(entity.state.data.conversation_history) == 6 - - async def test_run_agent_with_none_thread_id(self) -> None: - """Test run_agent with a None thread identifier.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - with pytest.raises(ValueError, match="thread_id"): - await entity.run(mock_context, {"message": "Message", "thread_id": None, "correlationId": "corr-entity-5"}) - - async def test_run_agent_multiple_conversations(self) -> None: - """Test that run_agent maintains history across multiple messages.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - # Send multiple messages - await entity.run( - mock_context, {"message": "Message 1", "thread_id": "conv-1", "correlationId": "corr-entity-8a"} - ) - await entity.run( - mock_context, {"message": "Message 2", "thread_id": "conv-1", "correlationId": "corr-entity-8b"} - ) - await entity.run( - mock_context, {"message": "Message 3", "thread_id": "conv-1", "correlationId": "corr-entity-8c"} - ) - - history = entity.state.data.conversation_history - assert len(history) == 6 - assert entity.state.message_count == 6 - - -class TestAgentEntityReset: - """Test suite for the reset operation.""" - - def test_reset_clears_conversation_history(self) -> None: - """Test that reset clears the conversation history.""" - mock_agent = Mock() - entity = AgentEntity(mock_agent) - - # Add some history with proper DurableAgentStateEntry objects - entity.state.data.conversation_history = [ - DurableAgentStateRequest( - correlation_id="test-1", - created_at=datetime.now(), - messages=[ - DurableAgentStateMessage( - role="user", - contents=[DurableAgentStateTextContent(text="msg1")], - ) - ], - ), - ] - - mock_context = Mock() - entity.reset(mock_context) - - assert entity.state.data.conversation_history == [] - - def test_reset_with_extension_data(self) -> None: - """Test that reset works when entity has extension data.""" - mock_agent = Mock() - entity = AgentEntity(mock_agent) - - # Set up some initial state with conversation history - entity.state.data = DurableAgentStateData(conversation_history=[], extension_data={"some_key": "some_value"}) - - mock_context = Mock() - entity.reset(mock_context) - - assert len(entity.state.data.conversation_history) == 0 - - def test_reset_clears_message_count(self) -> None: - """Test that reset clears the message count.""" - mock_agent = Mock() - entity = AgentEntity(mock_agent) - - mock_context = Mock() - entity.reset(mock_context) - - assert len(entity.state.data.conversation_history) == 0 - - async def test_reset_after_conversation(self) -> None: - """Test reset after a full conversation.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - # Have a conversation - await entity.run( - mock_context, {"message": "Message 1", "thread_id": "conv-1", "correlationId": "corr-entity-10a"} - ) - await entity.run( - mock_context, {"message": "Message 2", "thread_id": "conv-1", "correlationId": "corr-entity-10b"} - ) - - # Verify state before reset - assert entity.state.message_count == 4 - assert len(entity.state.data.conversation_history) == 4 - - # Reset - entity.reset(mock_context) - - # Verify state after reset - assert entity.state.message_count == 0 - assert len(entity.state.data.conversation_history) == 0 - - class TestCreateAgentEntity: """Test suite for the create_agent_entity factory function.""" @@ -439,9 +48,9 @@ def test_entity_function_handles_run_agent(self) -> None: # Mock context mock_context = Mock() mock_context.operation_name = "run" + mock_context.entity_key = "conv-123" mock_context.get_input.return_value = { "message": "Test message", - "thread_id": "conv-123", "correlationId": "corr-entity-factory", } mock_context.get_state.return_value = None @@ -535,7 +144,7 @@ def test_entity_function_creates_new_entity_on_first_call(self) -> None: assert state["data"] == {"conversationHistory": []} def test_entity_function_restores_existing_state(self) -> None: - """Test that the entity function restores existing state.""" + """Test that the entity function can operate when existing state is present.""" mock_agent = Mock() entity_function = create_agent_entity(mock_agent) @@ -584,482 +193,14 @@ def test_entity_function_restores_existing_state(self) -> None: mock_context.operation_name = "reset" mock_context.get_state.return_value = existing_state - with patch.object(DurableAgentState, "from_dict", wraps=DurableAgentState.from_dict) as from_dict_mock: - entity_function(mock_context) - - from_dict_mock.assert_called_once_with(existing_state) - - -class TestErrorHandling: - """Test suite for error handling in entities.""" - - async def test_run_agent_handles_agent_exception(self) -> None: - """Test that run_agent handles agent exceptions.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(side_effect=Exception("Agent failed")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - result = await entity.run( - mock_context, {"message": "Message", "thread_id": "conv-1", "correlationId": "corr-entity-error-1"} - ) - - assert isinstance(result, AgentResponse) - assert len(result.messages) == 1 - content = result.messages[0].contents[0] - assert content.type == "error" - assert "Agent failed" in (content.message or "") - assert content.error_code == "Exception" - - async def test_run_agent_handles_value_error(self) -> None: - """Test that run_agent handles ValueError instances.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(side_effect=ValueError("Invalid input")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - result = await entity.run( - mock_context, {"message": "Message", "thread_id": "conv-1", "correlationId": "corr-entity-error-2"} - ) - - assert isinstance(result, AgentResponse) - assert len(result.messages) == 1 - content = result.messages[0].contents[0] - assert content.type == "error" - assert content.error_code == "ValueError" - assert "Invalid input" in str(content.message) - - async def test_run_agent_handles_timeout_error(self) -> None: - """Test that run_agent handles TimeoutError instances.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(side_effect=TimeoutError("Request timeout")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - result = await entity.run( - mock_context, {"message": "Message", "thread_id": "conv-1", "correlationId": "corr-entity-error-3"} - ) - - assert isinstance(result, AgentResponse) - assert len(result.messages) == 1 - content = result.messages[0].contents[0] - assert content.type == "error" - assert content.error_code == "TimeoutError" - - def test_entity_function_handles_exception_in_operation(self) -> None: - """Test that the entity function handles exceptions gracefully.""" - mock_agent = Mock() - - entity_function = create_agent_entity(mock_agent) - - mock_context = Mock() - mock_context.operation_name = "run" - mock_context.get_input.side_effect = Exception("Input error") - mock_context.get_state.return_value = None - - # Execute - should not raise entity_function(mock_context) - # Verify error was set assert mock_context.set_result.called - result = mock_context.set_result.call_args[0][0] - assert "error" in result - - async def test_run_agent_preserves_message_on_error(self) -> None: - """Test that run_agent preserves message information on error.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(side_effect=Exception("Error")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - result = await entity.run( - mock_context, - {"message": "Test message", "thread_id": "conv-123", "correlationId": "corr-entity-error-4"}, - ) - - # Even on error, message info should be preserved - assert isinstance(result, AgentResponse) - assert len(result.messages) == 1 - content = result.messages[0].contents[0] - assert content.type == "error" - - -class TestConversationHistory: - """Test suite for conversation history tracking.""" - - async def test_conversation_history_has_timestamps(self) -> None: - """Test that conversation history entries include timestamps.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - await entity.run( - mock_context, {"message": "Message", "thread_id": "conv-1", "correlationId": "corr-entity-history-1"} - ) - - # Check both user and assistant messages have timestamps - for entry in entity.state.data.conversation_history: - timestamp = entry.created_at - assert timestamp is not None - # Verify timestamp is in ISO format - datetime.fromisoformat(str(timestamp)) - - async def test_conversation_history_ordering(self) -> None: - """Test that conversation history maintains the correct order.""" - mock_agent = Mock() - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - # Send multiple messages with different responses - mock_agent.run = AsyncMock(return_value=_agent_response("Response 1")) - await entity.run( - mock_context, - {"message": "Message 1", "thread_id": "conv-1", "correlationId": "corr-entity-history-2a"}, - ) - - mock_agent.run = AsyncMock(return_value=_agent_response("Response 2")) - await entity.run( - mock_context, - {"message": "Message 2", "thread_id": "conv-1", "correlationId": "corr-entity-history-2b"}, - ) - - mock_agent.run = AsyncMock(return_value=_agent_response("Response 3")) - await entity.run( - mock_context, - {"message": "Message 3", "thread_id": "conv-1", "correlationId": "corr-entity-history-2c"}, - ) - - # Verify order - history = entity.state.data.conversation_history - # Each conversation turn creates 2 entries: request and response - assert history[0].messages[0].text == "Message 1" # Request 1 - assert history[1].messages[0].text == "Response 1" # Response 1 - assert history[2].messages[0].text == "Message 2" # Request 2 - assert history[3].messages[0].text == "Response 2" # Response 2 - assert history[4].messages[0].text == "Message 3" # Request 3 - assert history[5].messages[0].text == "Response 3" # Response 3 - async def test_conversation_history_role_alternation(self) -> None: - """Test that conversation history alternates between user and assistant roles.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - await entity.run( - mock_context, - {"message": "Message 1", "thread_id": "conv-1", "correlationId": "corr-entity-history-3a"}, - ) - await entity.run( - mock_context, - {"message": "Message 2", "thread_id": "conv-1", "correlationId": "corr-entity-history-3b"}, - ) - - # Check role alternation - history = entity.state.data.conversation_history - # Each conversation turn creates 2 entries: request and response - assert history[0].messages[0].role == "user" # Request 1 - assert history[1].messages[0].role == "assistant" # Response 1 - assert history[2].messages[0].role == "user" # Request 2 - assert history[3].messages[0].role == "assistant" # Response 2 - - -class TestRunRequestSupport: - """Test suite for RunRequest support in entities.""" - - async def test_run_agent_with_run_request_object(self) -> None: - """Test run_agent with a RunRequest object.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - request = RunRequest( - message="Test message", - thread_id="conv-123", - role=Role.USER, - enable_tool_calls=True, - correlation_id="corr-runreq-1", - ) - - result = await entity.run(mock_context, request) - - assert isinstance(result, AgentResponse) - assert result.text == "Response" - - async def test_run_agent_with_dict_request(self) -> None: - """Test run_agent with a dictionary request.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - request_dict = { - "message": "Test message", - "thread_id": "conv-456", - "role": "system", - "enable_tool_calls": False, - "correlationId": "corr-runreq-2", - } - - result = await entity.run(mock_context, request_dict) - - assert isinstance(result, AgentResponse) - assert result.text == "Response" - - async def test_run_agent_with_string_raises_without_correlation(self) -> None: - """Test that run_agent rejects legacy string input without correlation ID.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - with pytest.raises(ValueError): - await entity.run(mock_context, "Simple message") - - async def test_run_agent_stores_role_in_history(self) -> None: - """Test that run_agent stores the role in conversation history.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - # Send as system role - request = RunRequest( - message="System message", - thread_id="conv-runreq-3", - role=Role.SYSTEM, - correlation_id="corr-runreq-3", - ) - - await entity.run(mock_context, request) - - # Check that system role was stored - history = entity.state.data.conversation_history - assert history[0].messages[0].role == "system" - assert history[0].messages[0].text == "System message" - - async def test_run_agent_with_response_format(self) -> None: - """Test run_agent with a JSON response format.""" - mock_agent = Mock() - # Return JSON response - mock_agent.run = AsyncMock(return_value=_agent_response('{"answer": 42}')) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - request = RunRequest( - message="What is the answer?", - thread_id="conv-runreq-4", - response_format=EntityStructuredResponse, - correlation_id="corr-runreq-4", - ) - - result = await entity.run(mock_context, request) - - assert isinstance(result, AgentResponse) - assert result.text == '{"answer": 42}' - assert result.value is None - - async def test_run_agent_disable_tool_calls(self) -> None: - """Test run_agent with tool calls disabled.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity = AgentEntity(mock_agent) - mock_context = Mock() - - request = RunRequest( - message="Test", thread_id="conv-runreq-5", enable_tool_calls=False, correlation_id="corr-runreq-5" - ) - - result = await entity.run(mock_context, request) - - assert isinstance(result, AgentResponse) - # Agent should have been called (tool disabling is framework-dependent) - mock_agent.run.assert_called_once() - - async def test_entity_function_with_run_request_dict(self) -> None: - """Test that the entity function handles the RunRequest dict format.""" - mock_agent = Mock() - mock_agent.run = AsyncMock(return_value=_agent_response("Response")) - - entity_function = create_agent_entity(mock_agent) - - mock_context = Mock() - mock_context.operation_name = "run" - mock_context.get_input.return_value = { - "message": "Test message", - "thread_id": "conv-789", - "role": "user", - "enable_tool_calls": True, - "correlationId": "corr-runreq-6", - } - mock_context.get_state.return_value = None - - await asyncio.to_thread(entity_function, mock_context) - - # Verify result was set - assert mock_context.set_result.called - result = mock_context.set_result.call_args[0][0] - assert isinstance(result, dict) - - # Check if messages are present - assert "messages" in result - assert len(result["messages"]) > 0 - message = result["messages"][0] - - # Check for text in various possible locations - text_found = False - if "text" in message and message["text"] == "Response": - text_found = True - elif "contents" in message: - for content in message["contents"]: - if isinstance(content, dict) and content.get("text") == "Response": - text_found = True - break - - assert text_found, f"Response text not found in message: {message}" - - -class TestDurableAgentStateRequestOrchestrationId: - """Test suite for DurableAgentStateRequest orchestration_id field.""" - - def test_request_with_orchestration_id(self) -> None: - """Test creating a request with an orchestration_id.""" - request = DurableAgentStateRequest( - correlation_id="corr-123", - created_at=datetime.now(), - messages=[ - DurableAgentStateMessage( - role="user", - contents=[DurableAgentStateTextContent(text="test")], - ) - ], - orchestration_id="orch-456", - ) - - assert request.orchestration_id == "orch-456" - - def test_request_to_dict_includes_orchestration_id(self) -> None: - """Test that to_dict includes orchestrationId when set.""" - request = DurableAgentStateRequest( - correlation_id="corr-123", - created_at=datetime.now(), - messages=[ - DurableAgentStateMessage( - role="user", - contents=[DurableAgentStateTextContent(text="test")], - ) - ], - orchestration_id="orch-789", - ) - - data = request.to_dict() - - assert "orchestrationId" in data - assert data["orchestrationId"] == "orch-789" - - def test_request_to_dict_excludes_orchestration_id_when_none(self) -> None: - """Test that to_dict excludes orchestrationId when not set.""" - request = DurableAgentStateRequest( - correlation_id="corr-123", - created_at=datetime.now(), - messages=[ - DurableAgentStateMessage( - role="user", - contents=[DurableAgentStateTextContent(text="test")], - ) - ], - ) - - data = request.to_dict() - - assert "orchestrationId" not in data - - def test_request_from_dict_with_orchestration_id(self) -> None: - """Test from_dict correctly parses orchestrationId.""" - data = { - "$type": "request", - "correlationId": "corr-123", - "createdAt": "2024-01-01T00:00:00Z", - "messages": [{"role": "user", "contents": [{"$type": "text", "text": "test"}]}], - "orchestrationId": "orch-from-dict", - } - - request = DurableAgentStateRequest.from_dict(data) - - assert request.orchestration_id == "orch-from-dict" - - def test_request_from_run_request_with_orchestration_id(self) -> None: - """Test from_run_request correctly transfers orchestration_id.""" - run_request = RunRequest( - message="test message", - correlation_id="corr-run", - orchestration_id="orch-from-run-request", - ) - - durable_request = DurableAgentStateRequest.from_run_request(run_request) - - assert durable_request.orchestration_id == "orch-from-run-request" - - def test_request_from_run_request_without_orchestration_id(self) -> None: - """Test from_run_request correctly handles missing orchestration_id.""" - run_request = RunRequest( - message="test message", - correlation_id="corr-run", - ) - - durable_request = DurableAgentStateRequest.from_run_request(run_request) - - assert durable_request.orchestration_id is None - - -class TestDurableAgentStateMessageCreatedAt: - """Test suite for DurableAgentStateMessage created_at field handling.""" - - def test_message_from_run_request_without_created_at_preserves_none(self) -> None: - """Test from_run_request preserves None created_at instead of defaulting to current time. - - When a RunRequest has no created_at value, the resulting DurableAgentStateMessage - should also have None for created_at, not default to current UTC time. - """ - run_request = RunRequest( - message="test message", - correlation_id="corr-run", - created_at=None, # Explicitly None - ) - - durable_message = DurableAgentStateMessage.from_run_request(run_request) - - assert durable_message.created_at is None - - def test_message_from_run_request_with_created_at_parses_correctly(self) -> None: - """Test from_run_request correctly parses a valid created_at timestamp.""" - run_request = RunRequest( - message="test message", - correlation_id="corr-run", - created_at="2024-01-15T10:30:00Z", - ) - - durable_message = DurableAgentStateMessage.from_run_request(run_request) - - assert durable_message.created_at is not None - assert durable_message.created_at.year == 2024 - assert durable_message.created_at.month == 1 - assert durable_message.created_at.day == 15 + # Reset should clear history and persist via set_state + assert mock_context.set_state.called + persisted_state = mock_context.set_state.call_args[0][0] + assert persisted_state["data"]["conversationHistory"] == [] if __name__ == "__main__": diff --git a/python/packages/azurefunctions/tests/test_models.py b/python/packages/azurefunctions/tests/test_models.py deleted file mode 100644 index be31f59800..0000000000 --- a/python/packages/azurefunctions/tests/test_models.py +++ /dev/null @@ -1,447 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -"""Unit tests for data models (AgentSessionId, RunRequest, AgentResponse).""" - -import azure.durable_functions as df -import pytest -from agent_framework import Role -from pydantic import BaseModel - -from agent_framework_azurefunctions._models import AgentSessionId, RunRequest - - -class ModuleStructuredResponse(BaseModel): - value: int - - -class TestAgentSessionId: - """Test suite for AgentSessionId.""" - - def test_init_creates_session_id(self) -> None: - """Test that AgentSessionId initializes correctly.""" - session_id = AgentSessionId(name="AgentEntity", key="test-key-123") - - assert session_id.name == "AgentEntity" - assert session_id.key == "test-key-123" - - def test_with_random_key_generates_guid(self) -> None: - """Test that with_random_key generates a GUID.""" - session_id = AgentSessionId.with_random_key(name="AgentEntity") - - assert session_id.name == "AgentEntity" - assert len(session_id.key) == 32 # UUID hex is 32 chars - # Verify it's a valid hex string - int(session_id.key, 16) - - def test_with_random_key_unique_keys(self) -> None: - """Test that with_random_key generates unique keys.""" - session_id1 = AgentSessionId.with_random_key(name="AgentEntity") - session_id2 = AgentSessionId.with_random_key(name="AgentEntity") - - assert session_id1.key != session_id2.key - - def test_to_entity_id_conversion(self) -> None: - """Test conversion to EntityId.""" - session_id = AgentSessionId(name="AgentEntity", key="test-key") - entity_id = session_id.to_entity_id() - - assert isinstance(entity_id, df.EntityId) - assert entity_id.name == "dafx-AgentEntity" - assert entity_id.key == "test-key" - - def test_from_entity_id_conversion(self) -> None: - """Test creation from EntityId.""" - entity_id = df.EntityId(name="dafx-AgentEntity", key="test-key") - session_id = AgentSessionId.from_entity_id(entity_id) - - assert isinstance(session_id, AgentSessionId) - assert session_id.name == "AgentEntity" - assert session_id.key == "test-key" - - def test_round_trip_entity_id_conversion(self) -> None: - """Test round-trip conversion to and from EntityId.""" - original = AgentSessionId(name="AgentEntity", key="test-key") - entity_id = original.to_entity_id() - restored = AgentSessionId.from_entity_id(entity_id) - - assert restored.name == original.name - assert restored.key == original.key - - def test_str_representation(self) -> None: - """Test string representation.""" - session_id = AgentSessionId(name="AgentEntity", key="test-key-123") - str_repr = str(session_id) - - assert str_repr == "@AgentEntity@test-key-123" - - def test_repr_representation(self) -> None: - """Test repr representation.""" - session_id = AgentSessionId(name="AgentEntity", key="test-key") - repr_str = repr(session_id) - - assert "AgentSessionId" in repr_str - assert "AgentEntity" in repr_str - assert "test-key" in repr_str - - def test_parse_valid_session_id(self) -> None: - """Test parsing valid session ID string.""" - session_id = AgentSessionId.parse("@AgentEntity@test-key-123") - - assert session_id.name == "AgentEntity" - assert session_id.key == "test-key-123" - - def test_parse_invalid_format_no_prefix(self) -> None: - """Test parsing invalid format without @ prefix.""" - with pytest.raises(ValueError) as exc_info: - AgentSessionId.parse("AgentEntity@test-key") - - assert "Invalid agent session ID format" in str(exc_info.value) - - def test_parse_invalid_format_single_part(self) -> None: - """Test parsing invalid format with single part.""" - with pytest.raises(ValueError) as exc_info: - AgentSessionId.parse("@AgentEntity") - - assert "Invalid agent session ID format" in str(exc_info.value) - - def test_parse_with_multiple_at_signs_in_key(self) -> None: - """Test parsing with @ signs in the key.""" - session_id = AgentSessionId.parse("@AgentEntity@key-with@symbols") - - assert session_id.name == "AgentEntity" - assert session_id.key == "key-with@symbols" - - def test_parse_round_trip(self) -> None: - """Test round-trip parse and string conversion.""" - original = AgentSessionId(name="AgentEntity", key="test-key") - str_repr = str(original) - parsed = AgentSessionId.parse(str_repr) - - assert parsed.name == original.name - assert parsed.key == original.key - - def test_parse_with_agent_name_override(self) -> None: - """Test parsing @name@key format with agent_name parameter overrides the name.""" - session_id = AgentSessionId.parse("@OriginalAgent@test-key-123", agent_name="OverriddenAgent") - - assert session_id.name == "OverriddenAgent" - assert session_id.key == "test-key-123" - - def test_parse_without_agent_name_uses_parsed_name(self) -> None: - """Test parsing @name@key format without agent_name uses name from string.""" - session_id = AgentSessionId.parse("@ParsedAgent@test-key-123") - - assert session_id.name == "ParsedAgent" - assert session_id.key == "test-key-123" - - def test_parse_plain_string_with_agent_name(self) -> None: - """Test parsing plain string with agent_name uses entire string as key.""" - session_id = AgentSessionId.parse("simple-thread-123", agent_name="TestAgent") - - assert session_id.name == "TestAgent" - assert session_id.key == "simple-thread-123" - - def test_parse_plain_string_without_agent_name_raises(self) -> None: - """Test parsing plain string without agent_name raises ValueError.""" - with pytest.raises(ValueError) as exc_info: - AgentSessionId.parse("simple-thread-123") - - assert "Invalid agent session ID format" in str(exc_info.value) - - def test_to_entity_name_adds_prefix(self) -> None: - """Test that to_entity_name adds the dafx- prefix.""" - entity_name = AgentSessionId.to_entity_name("TestAgent") - assert entity_name == "dafx-TestAgent" - - def test_from_entity_id_strips_prefix(self) -> None: - """Test that from_entity_id strips the dafx- prefix.""" - entity_id = df.EntityId(name="dafx-TestAgent", key="key123") - session_id = AgentSessionId.from_entity_id(entity_id) - - assert session_id.name == "TestAgent" - assert session_id.key == "key123" - - def test_from_entity_id_raises_without_prefix(self) -> None: - """Test that from_entity_id raises ValueError when entity name lacks the prefix.""" - entity_id = df.EntityId(name="TestAgent", key="key123") - - with pytest.raises(ValueError) as exc_info: - AgentSessionId.from_entity_id(entity_id) - - assert "not a valid agent session ID" in str(exc_info.value) - assert "dafx-" in str(exc_info.value) - - -class TestRunRequest: - """Test suite for RunRequest.""" - - def test_init_with_defaults(self) -> None: - """Test RunRequest initialization with defaults.""" - request = RunRequest(message="Hello", thread_id="thread-default") - - assert request.message == "Hello" - assert request.role == Role.USER - assert request.response_format is None - assert request.enable_tool_calls is True - assert request.thread_id == "thread-default" - - def test_init_with_all_fields(self) -> None: - """Test RunRequest initialization with all fields.""" - schema = ModuleStructuredResponse - request = RunRequest( - message="Hello", - thread_id="thread-123", - role=Role.SYSTEM, - response_format=schema, - enable_tool_calls=False, - ) - - assert request.message == "Hello" - assert request.role == Role.SYSTEM - assert request.response_format is schema - assert request.enable_tool_calls is False - assert request.thread_id == "thread-123" - - def test_init_coerces_string_role(self) -> None: - """Ensure string role values are coerced into Role instances.""" - request = RunRequest(message="Hello", thread_id="thread-str-role", role="system") # type: ignore[arg-type] - - assert request.role == Role.SYSTEM - - def test_to_dict_with_defaults(self) -> None: - """Test to_dict with default values.""" - request = RunRequest(message="Test message", thread_id="thread-to-dict") - data = request.to_dict() - - assert data["message"] == "Test message" - assert data["enable_tool_calls"] is True - assert data["role"] == "user" - assert "response_format" not in data or data["response_format"] is None - assert data["thread_id"] == "thread-to-dict" - - def test_to_dict_with_all_fields(self) -> None: - """Test to_dict with all fields.""" - schema = ModuleStructuredResponse - request = RunRequest( - message="Hello", - thread_id="thread-456", - role=Role.ASSISTANT, - response_format=schema, - enable_tool_calls=False, - ) - data = request.to_dict() - - assert data["message"] == "Hello" - assert data["role"] == "assistant" - assert data["response_format"]["__response_schema_type__"] == "pydantic_model" - assert data["response_format"]["module"] == schema.__module__ - assert data["response_format"]["qualname"] == schema.__qualname__ - assert data["enable_tool_calls"] is False - assert data["thread_id"] == "thread-456" - - def test_from_dict_with_defaults(self) -> None: - """Test from_dict with minimal data.""" - data = {"message": "Hello", "thread_id": "thread-from-dict"} - request = RunRequest.from_dict(data) - - assert request.message == "Hello" - assert request.role == Role.USER - assert request.enable_tool_calls is True - assert request.thread_id == "thread-from-dict" - - def test_from_dict_with_all_fields(self) -> None: - """Test from_dict with all fields.""" - data = { - "message": "Test", - "role": "system", - "response_format": { - "__response_schema_type__": "pydantic_model", - "module": ModuleStructuredResponse.__module__, - "qualname": ModuleStructuredResponse.__qualname__, - }, - "enable_tool_calls": False, - "thread_id": "thread-789", - } - request = RunRequest.from_dict(data) - - assert request.message == "Test" - assert request.role == Role.SYSTEM - assert request.response_format is ModuleStructuredResponse - assert request.enable_tool_calls is False - assert request.thread_id == "thread-789" - - def test_from_dict_with_unknown_role_preserves_value(self) -> None: - """Test from_dict keeps custom roles intact.""" - data = {"message": "Test", "role": "reviewer", "thread_id": "thread-with-custom-role"} - request = RunRequest.from_dict(data) - - assert request.role.value == "reviewer" - assert request.role != Role.USER - - def test_from_dict_empty_message(self) -> None: - """Test from_dict with empty message.""" - data = {"thread_id": "thread-empty"} - request = RunRequest.from_dict(data) - - assert request.message == "" - assert request.role == Role.USER - assert request.thread_id == "thread-empty" - - def test_round_trip_dict_conversion(self) -> None: - """Test round-trip to_dict and from_dict.""" - original = RunRequest( - message="Test message", - thread_id="thread-123", - role=Role.SYSTEM, - response_format=ModuleStructuredResponse, - enable_tool_calls=False, - ) - - data = original.to_dict() - restored = RunRequest.from_dict(data) - - assert restored.message == original.message - assert restored.role == original.role - assert restored.response_format is ModuleStructuredResponse - assert restored.enable_tool_calls == original.enable_tool_calls - assert restored.thread_id == original.thread_id - - def test_round_trip_with_pydantic_response_format(self) -> None: - """Ensure Pydantic response formats serialize and deserialize properly.""" - original = RunRequest( - message="Structured", - thread_id="thread-pydantic", - response_format=ModuleStructuredResponse, - ) - - data = original.to_dict() - - assert data["response_format"]["__response_schema_type__"] == "pydantic_model" - assert data["response_format"]["module"] == ModuleStructuredResponse.__module__ - assert data["response_format"]["qualname"] == ModuleStructuredResponse.__qualname__ - - restored = RunRequest.from_dict(data) - assert restored.response_format is ModuleStructuredResponse - - def test_init_with_correlationId(self) -> None: - """Test RunRequest initialization with correlationId.""" - request = RunRequest(message="Test message", thread_id="thread-corr-init", correlation_id="corr-123") - - assert request.message == "Test message" - assert request.correlation_id == "corr-123" - - def test_to_dict_with_correlationId(self) -> None: - """Test to_dict includes correlationId.""" - request = RunRequest(message="Test", thread_id="thread-corr-to-dict", correlation_id="corr-456") - data = request.to_dict() - - assert data["message"] == "Test" - assert data["correlationId"] == "corr-456" - - def test_from_dict_with_correlationId(self) -> None: - """Test from_dict with correlationId.""" - data = {"message": "Test", "correlationId": "corr-789", "thread_id": "thread-corr-from-dict"} - request = RunRequest.from_dict(data) - - assert request.message == "Test" - assert request.correlation_id == "corr-789" - assert request.thread_id == "thread-corr-from-dict" - - def test_round_trip_with_correlationId(self) -> None: - """Test round-trip to_dict and from_dict with correlationId.""" - original = RunRequest( - message="Test message", - thread_id="thread-123", - role=Role.SYSTEM, - correlation_id="corr-123", - ) - - data = original.to_dict() - restored = RunRequest.from_dict(data) - - assert restored.message == original.message - assert restored.role == original.role - assert restored.correlation_id == original.correlation_id - assert restored.thread_id == original.thread_id - - def test_init_with_orchestration_id(self) -> None: - """Test RunRequest initialization with orchestration_id.""" - request = RunRequest( - message="Test message", - thread_id="thread-orch-init", - orchestration_id="orch-123", - ) - - assert request.message == "Test message" - assert request.orchestration_id == "orch-123" - - def test_to_dict_with_orchestration_id(self) -> None: - """Test to_dict includes orchestrationId.""" - request = RunRequest( - message="Test", - thread_id="thread-orch-to-dict", - orchestration_id="orch-456", - ) - data = request.to_dict() - - assert data["message"] == "Test" - assert data["orchestrationId"] == "orch-456" - - def test_to_dict_excludes_orchestration_id_when_none(self) -> None: - """Test to_dict excludes orchestrationId when not set.""" - request = RunRequest( - message="Test", - thread_id="thread-orch-none", - ) - data = request.to_dict() - - assert "orchestrationId" not in data - - def test_from_dict_with_orchestration_id(self) -> None: - """Test from_dict with orchestrationId.""" - data = { - "message": "Test", - "orchestrationId": "orch-789", - "thread_id": "thread-orch-from-dict", - } - request = RunRequest.from_dict(data) - - assert request.message == "Test" - assert request.orchestration_id == "orch-789" - assert request.thread_id == "thread-orch-from-dict" - - def test_round_trip_with_orchestration_id(self) -> None: - """Test round-trip to_dict and from_dict with orchestration_id.""" - original = RunRequest( - message="Test message", - thread_id="thread-123", - role=Role.SYSTEM, - correlation_id="corr-123", - orchestration_id="orch-123", - ) - - data = original.to_dict() - restored = RunRequest.from_dict(data) - - assert restored.message == original.message - assert restored.role == original.role - assert restored.correlation_id == original.correlation_id - assert restored.orchestration_id == original.orchestration_id - assert restored.thread_id == original.thread_id - - -class TestModelIntegration: - """Test suite for integration between models.""" - - def test_run_request_with_session_id(self) -> None: - """Test using RunRequest with AgentSessionId.""" - session_id = AgentSessionId.with_random_key("AgentEntity") - request = RunRequest(message="Test message", thread_id=str(session_id)) - - assert request.thread_id is not None - assert request.thread_id == str(session_id) - assert request.thread_id.startswith("@AgentEntity@") - - -if __name__ == "__main__": - pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/azurefunctions/tests/test_orchestration.py b/python/packages/azurefunctions/tests/test_orchestration.py index 8a7473ee8b..2b9a4126d4 100644 --- a/python/packages/azurefunctions/tests/test_orchestration.py +++ b/python/packages/azurefunctions/tests/test_orchestration.py @@ -6,11 +6,11 @@ from unittest.mock import Mock import pytest -from agent_framework import AgentResponse, AgentThread, ChatMessage +from agent_framework import AgentResponse, ChatMessage, Role +from agent_framework_durabletask import DurableAIAgent from azure.durable_functions.models.Task import TaskBase, TaskState -from agent_framework_azurefunctions import AgentFunctionApp, DurableAIAgent -from agent_framework_azurefunctions._models import AgentSessionId, DurableAgentThread +from agent_framework_azurefunctions import AgentFunctionApp from agent_framework_azurefunctions._orchestration import AgentTask @@ -38,46 +38,96 @@ def _create_entity_task(task_id: int = 1) -> TaskBase: return _FakeTask(task_id) -class TestAgentResponseHelpers: - """Tests for helper utilities that prepare AgentResponse values.""" +@pytest.fixture +def mock_context(): + """Create a mock orchestration context with UUID support.""" + context = Mock() + context.instance_id = "test-instance" + context.current_utc_datetime = Mock() + return context - @staticmethod - def _create_agent_task() -> AgentTask: - entity_task = _create_entity_task() - return AgentTask(entity_task, None, "correlation-id") - def test_load_agent_response_from_instance(self) -> None: - task = self._create_agent_task() - response = AgentResponse(messages=[ChatMessage(role="assistant", text='{"foo": "bar"}')]) +@pytest.fixture +def mock_context_with_uuid() -> tuple[Mock, str]: + """Create a mock context with a single UUID.""" + from uuid import UUID + + context = Mock() + context.instance_id = "test-instance" + context.current_utc_datetime = Mock() + test_uuid = UUID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa") + context.new_uuid = Mock(return_value=test_uuid) + return context, test_uuid.hex + + +@pytest.fixture +def mock_context_with_multiple_uuids() -> tuple[Mock, list[str]]: + """Create a mock context with multiple UUIDs via side_effect.""" + from uuid import UUID + + context = Mock() + context.instance_id = "test-instance" + context.current_utc_datetime = Mock() + uuids = [ + UUID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"), + UUID("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"), + UUID("cccccccc-cccc-cccc-cccc-cccccccccccc"), + ] + context.new_uuid = Mock(side_effect=uuids) + # Return the hex versions for assertion checking + hex_uuids = [uuid.hex for uuid in uuids] + return context, hex_uuids + - loaded = task._load_agent_response(response) +@pytest.fixture +def executor_with_uuid() -> tuple[Any, Mock, str]: + """Create an executor with a mocked generate_unique_id method.""" + from agent_framework_azurefunctions._orchestration import AzureFunctionsAgentExecutor - assert loaded is response - assert loaded.value is None + context = Mock() + context.instance_id = "test-instance" + context.current_utc_datetime = Mock() - def test_load_agent_response_from_serialized(self) -> None: - task = self._create_agent_task() - serialized = AgentResponse(messages=[ChatMessage(role="assistant", text="structured")]).to_dict() - serialized["value"] = {"answer": 42} + executor = AzureFunctionsAgentExecutor(context) + test_uuid_hex = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" + executor.generate_unique_id = Mock(return_value=test_uuid_hex) - loaded = task._load_agent_response(serialized) + return executor, context, test_uuid_hex - assert loaded is not None - assert loaded.value == {"answer": 42} - loaded_dict = loaded.to_dict() - assert loaded_dict["type"] == "agent_response" - def test_load_agent_response_rejects_none(self) -> None: - task = self._create_agent_task() +@pytest.fixture +def executor_with_multiple_uuids() -> tuple[Any, Mock, list[str]]: + """Create an executor with multiple mocked UUIDs.""" + from agent_framework_azurefunctions._orchestration import AzureFunctionsAgentExecutor - with pytest.raises(ValueError): - task._load_agent_response(None) + context = Mock() + context.instance_id = "test-instance" + context.current_utc_datetime = Mock() - def test_load_agent_response_rejects_unsupported_type(self) -> None: - task = self._create_agent_task() + executor = AzureFunctionsAgentExecutor(context) + uuid_hexes = [ + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", + "cccccccc-cccc-cccc-cccc-cccccccccccc", + "dddddddd-dddd-dddd-dddd-dddddddddddd", + "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee", + ] + executor.generate_unique_id = Mock(side_effect=uuid_hexes) - with pytest.raises(TypeError, match="Unsupported type"): - task._load_agent_response(["invalid", "list"]) # type: ignore[arg-type] + return executor, context, uuid_hexes + + +@pytest.fixture +def executor_with_context(mock_context_with_uuid: tuple[Mock, str]) -> tuple[Any, Mock]: + """Create an executor with a mocked context.""" + from agent_framework_azurefunctions._orchestration import AzureFunctionsAgentExecutor + + context, _ = mock_context_with_uuid + return AzureFunctionsAgentExecutor(context), context + + +class TestAgentResponseHelpers: + """Tests for response handling through public AgentTask API.""" def test_try_set_value_success(self) -> None: """Test try_set_value correctly processes successful task completion.""" @@ -142,354 +192,99 @@ class TestSchema(BaseModel): assert isinstance(task.result.value, TestSchema) assert task.result.value.answer == "42" - def test_ensure_response_format_parses_value(self) -> None: - """Test _ensure_response_format correctly parses response value.""" - from pydantic import BaseModel - - class SampleSchema(BaseModel): - name: str - - task = self._create_agent_task() - response = AgentResponse(messages=[ChatMessage(role="assistant", text='{"name": "test"}')]) - - # Value should be None initially - assert response.value is None - - # Parse the value - task._ensure_response_format(SampleSchema, "test-correlation", response) - - # Value should now be parsed - assert isinstance(response.value, SampleSchema) - assert response.value.name == "test" - - def test_ensure_response_format_skips_if_already_parsed(self) -> None: - """Test _ensure_response_format does not re-parse if value already matches format.""" - from pydantic import BaseModel - - class SampleSchema(BaseModel): - name: str - - task = self._create_agent_task() - existing_value = SampleSchema(name="existing") - response = AgentResponse( - messages=[ChatMessage(role="assistant", text='{"name": "new"}')], - value=existing_value, - ) - - # Call _ensure_response_format - task._ensure_response_format(SampleSchema, "test-correlation", response) - - # Value should remain unchanged (not re-parsed) - assert response.value is existing_value - assert response.value.name == "existing" - - -class TestDurableAIAgent: - """Test suite for DurableAIAgent wrapper.""" - def test_init(self) -> None: - """Test DurableAIAgent initialization.""" - mock_context = Mock() - mock_context.instance_id = "test-instance-123" - - agent = DurableAIAgent(mock_context, "TestAgent") - - assert agent.context == mock_context - assert agent.agent_name == "TestAgent" - - def test_implements_agent_protocol(self) -> None: - """Test that DurableAIAgent implements AgentProtocol.""" - from agent_framework import AgentProtocol - - mock_context = Mock() - agent = DurableAIAgent(mock_context, "TestAgent") +class TestAgentFunctionAppGetAgent: + """Test suite for AgentFunctionApp.get_agent.""" - # Check that agent satisfies AgentProtocol - assert isinstance(agent, AgentProtocol) + def test_get_agent_raises_for_unregistered_agent(self) -> None: + """Test get_agent raises ValueError when agent is not registered.""" + app = _app_with_registered_agents("KnownAgent") - def test_has_agent_protocol_properties(self) -> None: - """Test that DurableAIAgent has AgentProtocol properties.""" - mock_context = Mock() - agent = DurableAIAgent(mock_context, "TestAgent") + with pytest.raises(ValueError, match=r"Agent 'MissingAgent' is not registered with this app\."): + app.get_agent(Mock(), "MissingAgent") - # AgentProtocol properties - assert hasattr(agent, "id") - assert hasattr(agent, "name") - assert hasattr(agent, "description") - # Verify values - assert agent.name == "TestAgent" - assert agent.description == "Durable agent proxy for TestAgent" - assert agent.id is not None # Auto-generated UUID +class TestAzureFunctionsFireAndForget: + """Test fire-and-forget mode for AzureFunctionsAgentExecutor.""" - def test_get_new_thread(self) -> None: - """Test creating a new agent thread.""" - mock_context = Mock() - mock_context.instance_id = "test-instance-456" - mock_context.new_uuid = Mock(return_value="test-guid-456") + def test_fire_and_forget_calls_signal_entity(self, executor_with_uuid: tuple[Any, Mock, str]) -> None: + """Verify wait_for_response=False calls signal_entity instead of call_entity.""" + executor, context, _ = executor_with_uuid + context.signal_entity = Mock() + context.call_entity = Mock(return_value=_create_entity_task()) - agent = DurableAIAgent(mock_context, "WriterAgent") + agent = DurableAIAgent(executor, "TestAgent") thread = agent.get_new_thread() - assert isinstance(thread, DurableAgentThread) - assert thread.session_id is not None - session_id = thread.session_id - assert isinstance(session_id, AgentSessionId) - assert session_id.name == "WriterAgent" - assert session_id.key == "test-guid-456" - mock_context.new_uuid.assert_called_once() - - def test_get_new_thread_deterministic(self) -> None: - """Test that get_new_thread creates deterministic session IDs.""" - - mock_context = Mock() - mock_context.instance_id = "test-instance-789" - mock_context.new_uuid = Mock(side_effect=["session-guid-1", "session-guid-2"]) - - agent = DurableAIAgent(mock_context, "EditorAgent") - - # Create multiple threads - they should have unique session IDs - thread1 = agent.get_new_thread() - thread2 = agent.get_new_thread() - - assert isinstance(thread1, DurableAgentThread) - assert isinstance(thread2, DurableAgentThread) - - session_id1 = thread1.session_id - session_id2 = thread2.session_id - assert session_id1 is not None and session_id2 is not None - assert isinstance(session_id1, AgentSessionId) - assert isinstance(session_id2, AgentSessionId) - assert session_id1.name == "EditorAgent" - assert session_id2.name == "EditorAgent" - assert session_id1.key == "session-guid-1" - assert session_id2.key == "session-guid-2" - assert mock_context.new_uuid.call_count == 2 - - def test_run_creates_entity_call(self) -> None: - """Test that run() creates proper entity call and returns a Task.""" - mock_context = Mock() - mock_context.instance_id = "test-instance-001" - mock_context.new_uuid = Mock(side_effect=["thread-guid", "correlation-guid"]) - - entity_task = _create_entity_task() - mock_context.call_entity = Mock(return_value=entity_task) - - agent = DurableAIAgent(mock_context, "TestAgent") + # Run with wait_for_response=False + result = agent.run("Test message", thread=thread, options={"wait_for_response": False}) - # Create thread - thread = agent.get_new_thread() + # Verify signal_entity was called and call_entity was not + assert context.signal_entity.call_count == 1 + assert context.call_entity.call_count == 0 - # Call run() - returns AgentTask directly - task = agent.run(messages="Test message", thread=thread, enable_tool_calls=True) - - assert isinstance(task, AgentTask) - assert task.children[0] == entity_task - - # Verify call_entity was called with correct parameters - assert mock_context.call_entity.called - call_args = mock_context.call_entity.call_args - entity_id, operation, request = call_args[0] - - assert operation == "run" - assert request["message"] == "Test message" - assert request["enable_tool_calls"] is True - assert "correlationId" in request - assert request["correlationId"] == "correlation-guid" - assert "thread_id" in request - assert request["thread_id"] == "thread-guid" - # Verify orchestration ID is set from context.instance_id - assert "orchestrationId" in request - assert request["orchestrationId"] == "test-instance-001" - - def test_run_sets_orchestration_id(self) -> None: - """Test that run() sets the orchestration_id from context.instance_id.""" - mock_context = Mock() - mock_context.instance_id = "my-orchestration-123" - mock_context.new_uuid = Mock(side_effect=["thread-guid", "correlation-guid"]) + # Should still return an AgentTask + assert isinstance(result, AgentTask) - entity_task = _create_entity_task() - mock_context.call_entity = Mock(return_value=entity_task) + def test_fire_and_forget_returns_completed_task(self, executor_with_uuid: tuple[Any, Mock, str]) -> None: + """Verify wait_for_response=False returns pre-completed AgentTask.""" + executor, context, _ = executor_with_uuid + context.signal_entity = Mock() - agent = DurableAIAgent(mock_context, "TestAgent") + agent = DurableAIAgent(executor, "TestAgent") thread = agent.get_new_thread() - agent.run(messages="Test", thread=thread) + result = agent.run("Test message", thread=thread, options={"wait_for_response": False}) - call_args = mock_context.call_entity.call_args - request = call_args[0][2] + # Task should be immediately complete + assert isinstance(result, AgentTask) + assert result.is_completed - assert request["orchestrationId"] == "my-orchestration-123" - - def test_run_without_thread(self) -> None: - """Test that run() works without explicit thread (creates unique session key).""" - mock_context = Mock() - mock_context.instance_id = "test-instance-002" - mock_context.new_uuid = Mock(side_effect=["auto-generated-guid", "correlation-guid"]) - - entity_task = _create_entity_task() - mock_context.call_entity = Mock(return_value=entity_task) + def test_fire_and_forget_returns_acceptance_response(self, executor_with_uuid: tuple[Any, Mock, str]) -> None: + """Verify wait_for_response=False returns acceptance response.""" + executor, context, _ = executor_with_uuid + context.signal_entity = Mock() - agent = DurableAIAgent(mock_context, "TestAgent") - - # Call without thread - task = agent.run(messages="Test message") - - assert isinstance(task, AgentTask) - assert task.children[0] == entity_task - - # Verify the entity ID uses the auto-generated GUID with dafx- prefix - call_args = mock_context.call_entity.call_args - entity_id = call_args[0][0] - assert entity_id.name == "dafx-TestAgent" - assert entity_id.key == "auto-generated-guid" - # Should be called twice: once for session_key, once for correlationId - assert mock_context.new_uuid.call_count == 2 - - def test_run_with_response_format(self) -> None: - """Test that run() passes response format correctly.""" - mock_context = Mock() - mock_context.instance_id = "test-instance-003" - - entity_task = _create_entity_task() - mock_context.call_entity = Mock(return_value=entity_task) - - agent = DurableAIAgent(mock_context, "TestAgent") - - from pydantic import BaseModel - - class SampleSchema(BaseModel): - key: str - - # Create thread and call - thread = agent.get_new_thread() - - task = agent.run(messages="Test message", thread=thread, options={"response_format": SampleSchema}) - - assert isinstance(task, AgentTask) - assert task.children[0] == entity_task - - # Verify schema was passed in the call_entity arguments - call_args = mock_context.call_entity.call_args - input_data = call_args[0][2] # Third argument is input_data - assert "response_format" in input_data - assert input_data["response_format"]["__response_schema_type__"] == "pydantic_model" - assert input_data["response_format"]["module"] == SampleSchema.__module__ - assert input_data["response_format"]["qualname"] == SampleSchema.__qualname__ - - def test_messages_to_string(self) -> None: - """Test converting ChatMessage list to string.""" - from agent_framework import ChatMessage - - mock_context = Mock() - agent = DurableAIAgent(mock_context, "TestAgent") - - messages = [ - ChatMessage(role="user", text="Hello"), - ChatMessage(role="assistant", text="Hi there"), - ChatMessage(role="user", text="How are you?"), - ] - - result = agent._messages_to_string(messages) - - assert result == "Hello\nHi there\nHow are you?" - - def test_run_with_chat_message(self) -> None: - """Test that run() handles ChatMessage input.""" - from agent_framework import ChatMessage - - mock_context = Mock() - mock_context.new_uuid = Mock(side_effect=["thread-guid", "correlation-guid"]) - entity_task = _create_entity_task() - mock_context.call_entity = Mock(return_value=entity_task) - - agent = DurableAIAgent(mock_context, "TestAgent") + agent = DurableAIAgent(executor, "TestAgent") thread = agent.get_new_thread() - # Call with ChatMessage - msg = ChatMessage(role="user", text="Hello") - task = agent.run(messages=msg, thread=thread) - - assert isinstance(task, AgentTask) - assert task.children[0] == entity_task - - # Verify message was converted to string - call_args = mock_context.call_entity.call_args - request = call_args[0][2] - assert request["message"] == "Hello" - - def test_run_stream_raises_not_implemented(self) -> None: - """Test that run_stream() method raises NotImplementedError.""" - mock_context = Mock() - agent = DurableAIAgent(mock_context, "TestAgent") - - with pytest.raises(NotImplementedError) as exc_info: - agent.run_stream("Test message") - - error_msg = str(exc_info.value) - assert "Streaming is not supported" in error_msg - - def test_entity_id_format(self) -> None: - """Test that EntityId is created with correct format (name, key).""" - from azure.durable_functions import EntityId - - mock_context = Mock() - mock_context.new_uuid = Mock(return_value="test-guid-789") - mock_context.call_entity = Mock(return_value=_create_entity_task()) - - agent = DurableAIAgent(mock_context, "WriterAgent") + result = agent.run("Test message", thread=thread, options={"wait_for_response": False}) + + # Get the result + response = result.result + assert isinstance(response, AgentResponse) + assert len(response.messages) == 1 + assert response.messages[0].role == Role.SYSTEM + # Check message contains key information + message_text = response.messages[0].text + assert "accepted" in message_text.lower() + assert "background" in message_text.lower() + + def test_blocking_mode_still_works(self, executor_with_uuid: tuple[Any, Mock, str]) -> None: + """Verify wait_for_response=True uses call_entity as before.""" + executor, context, _ = executor_with_uuid + context.signal_entity = Mock() + context.call_entity = Mock(return_value=_create_entity_task()) + + agent = DurableAIAgent(executor, "TestAgent") thread = agent.get_new_thread() - # Call run() to trigger entity ID creation - agent.run("Test", thread=thread) + result = agent.run("Test message", thread=thread, options={"wait_for_response": True}) - # Verify call_entity was called with correct EntityId - call_args = mock_context.call_entity.call_args - entity_id = call_args[0][0] + # Verify call_entity was called and signal_entity was not + assert context.call_entity.call_count == 1 + assert context.signal_entity.call_count == 0 - # EntityId should be EntityId(name="dafx-WriterAgent", key="test-guid-789") - # Which formats as "@dafx-writeragent@test-guid-789" - assert isinstance(entity_id, EntityId) - assert entity_id.name == "dafx-WriterAgent" - assert entity_id.key == "test-guid-789" - assert str(entity_id) == "@dafx-writeragent@test-guid-789" - - -class TestAgentFunctionAppGetAgent: - """Test suite for AgentFunctionApp.get_agent.""" - - def test_get_agent_method(self) -> None: - """Test get_agent method creates DurableAIAgent for registered agent.""" - app = _app_with_registered_agents("MyAgent") - mock_context = Mock() - mock_context.instance_id = "test-instance-100" - - agent = app.get_agent(mock_context, "MyAgent") - - assert isinstance(agent, DurableAIAgent) - assert agent.agent_name == "MyAgent" - assert agent.context == mock_context - - def test_get_agent_raises_for_unregistered_agent(self) -> None: - """Test get_agent raises ValueError when agent is not registered.""" - app = _app_with_registered_agents("KnownAgent") - - with pytest.raises(ValueError, match=r"Agent 'MissingAgent' is not registered with this app\."): - app.get_agent(Mock(), "MissingAgent") + # Should return an AgentTask + assert isinstance(result, AgentTask) class TestOrchestrationIntegration: """Integration tests for orchestration scenarios.""" - def test_sequential_agent_calls_simulation(self) -> None: + def test_sequential_agent_calls_simulation(self, executor_with_multiple_uuids: tuple[Any, Mock, list[str]]) -> None: """Simulate sequential agent calls in an orchestration.""" - mock_context = Mock() - mock_context.instance_id = "test-orchestration-001" - # new_uuid will be called 3 times: - # 1. thread creation - # 2. correlationId for first call - # 3. correlationId for second call - mock_context.new_uuid = Mock(side_effect=["deterministic-guid-001", "corr-1", "corr-2"]) + executor, context, uuid_hexes = executor_with_multiple_uuids # Track entity calls entity_calls: list[dict[str, Any]] = [] @@ -498,10 +293,10 @@ def mock_call_entity_side_effect(entity_id: Any, operation: str, input_data: dic entity_calls.append({"entity_id": str(entity_id), "operation": operation, "input": input_data}) return _create_entity_task() - mock_context.call_entity = Mock(side_effect=mock_call_entity_side_effect) + context.call_entity = Mock(side_effect=mock_call_entity_side_effect) - app = _app_with_registered_agents("WriterAgent") - agent = app.get_agent(mock_context, "WriterAgent") + # Create agent directly with executor (not via app.get_agent) + agent = DurableAIAgent(executor, "WriterAgent") # Create thread thread = agent.get_new_thread() @@ -517,18 +312,15 @@ def mock_call_entity_side_effect(entity_id: Any, operation: str, input_data: dic # Verify both calls used the same entity (same session key) assert len(entity_calls) == 2 assert entity_calls[0]["entity_id"] == entity_calls[1]["entity_id"] - # EntityId format is @dafx-writeragent@deterministic-guid-001 - assert entity_calls[0]["entity_id"] == "@dafx-writeragent@deterministic-guid-001" - # new_uuid called 3 times: thread + 2 correlation IDs - assert mock_context.new_uuid.call_count == 3 + # EntityId format is @dafx-writeragent@ + expected_entity_id = f"@dafx-writeragent@{uuid_hexes[0]}" + assert entity_calls[0]["entity_id"] == expected_entity_id + # generate_unique_id called 3 times: thread + 2 correlation IDs + assert executor.generate_unique_id.call_count == 3 - def test_multiple_agents_in_orchestration(self) -> None: + def test_multiple_agents_in_orchestration(self, executor_with_multiple_uuids: tuple[Any, Mock, list[str]]) -> None: """Test using multiple different agents in one orchestration.""" - mock_context = Mock() - mock_context.instance_id = "test-orchestration-002" - # Mock new_uuid to return different GUIDs for each call - # Order: writer thread, editor thread, writer correlation, editor correlation - mock_context.new_uuid = Mock(side_effect=["writer-guid-001", "editor-guid-002", "writer-corr", "editor-corr"]) + executor, context, uuid_hexes = executor_with_multiple_uuids entity_calls: list[str] = [] @@ -536,11 +328,11 @@ def mock_call_entity_side_effect(entity_id: Any, operation: str, input_data: dic entity_calls.append(str(entity_id)) return _create_entity_task() - mock_context.call_entity = Mock(side_effect=mock_call_entity_side_effect) + context.call_entity = Mock(side_effect=mock_call_entity_side_effect) - app = _app_with_registered_agents("WriterAgent", "EditorAgent") - writer = app.get_agent(mock_context, "WriterAgent") - editor = app.get_agent(mock_context, "EditorAgent") + # Create agents directly with executor (not via app.get_agent) + writer = DurableAIAgent(executor, "WriterAgent") + editor = DurableAIAgent(executor, "EditorAgent") writer_thread = writer.get_new_thread() editor_thread = editor.get_new_thread() @@ -554,62 +346,11 @@ def mock_call_entity_side_effect(entity_id: Any, operation: str, input_data: dic # Verify different entity IDs were used assert len(entity_calls) == 2 - # EntityId format is @dafx-agentname@guid (lowercased agent name with dafx- prefix) - assert entity_calls[0] == "@dafx-writeragent@writer-guid-001" - assert entity_calls[1] == "@dafx-editoragent@editor-guid-002" - - -class TestAgentThreadSerialization: - """Test that AgentThread can be serialized for orchestration state.""" - - async def test_agent_thread_serialize(self) -> None: - """Test that AgentThread can be serialized.""" - thread = AgentThread() - - # Serialize - serialized = await thread.serialize() - - assert isinstance(serialized, dict) - assert "service_thread_id" in serialized - - async def test_agent_thread_deserialize(self) -> None: - """Test that AgentThread can be deserialized.""" - thread = AgentThread() - serialized = await thread.serialize() - - # Deserialize - restored = await AgentThread.deserialize(serialized) - - assert isinstance(restored, AgentThread) - assert restored.service_thread_id == thread.service_thread_id - - async def test_durable_agent_thread_serialization(self) -> None: - """Test that DurableAgentThread persists session metadata during serialization.""" - mock_context = Mock() - mock_context.instance_id = "test-instance-999" - mock_context.new_uuid = Mock(return_value="test-guid-999") - - agent = DurableAIAgent(mock_context, "TestAgent") - thread = agent.get_new_thread() - - assert isinstance(thread, DurableAgentThread) - # Verify custom attribute and property exist - assert thread.session_id is not None - session_id = thread.session_id - assert isinstance(session_id, AgentSessionId) - assert session_id.name == "TestAgent" - assert session_id.key == "test-guid-999" - - # Standard serialization should still work - serialized = await thread.serialize() - assert isinstance(serialized, dict) - assert serialized.get("durable_session_id") == str(session_id) - - # After deserialization, we'd need to restore the custom attribute - # This would be handled by the orchestration framework - restored = await DurableAgentThread.deserialize(serialized) - assert isinstance(restored, DurableAgentThread) - assert restored.session_id == session_id + # EntityId format is @dafx-agentname@uuid_hex (lowercased agent name with dafx- prefix) + expected_writer_id = f"@dafx-writeragent@{uuid_hexes[0]}" + expected_editor_id = f"@dafx-editoragent@{uuid_hexes[1]}" + assert entity_calls[0] == expected_writer_id + assert entity_calls[1] == expected_editor_id if __name__ == "__main__": diff --git a/python/packages/core/agent_framework/azure/__init__.py b/python/packages/core/agent_framework/azure/__init__.py index 21ca71d85b..93d7dc1e0d 100644 --- a/python/packages/core/agent_framework/azure/__init__.py +++ b/python/packages/core/agent_framework/azure/__init__.py @@ -4,9 +4,9 @@ from typing import Any _IMPORTS: dict[str, tuple[str, str]] = { - "AgentCallbackContext": ("agent_framework_azurefunctions", "agent-framework-azurefunctions"), + "AgentCallbackContext": ("agent_framework_durabletask", "agent-framework-durabletask"), "AgentFunctionApp": ("agent_framework_azurefunctions", "agent-framework-azurefunctions"), - "AgentResponseCallbackProtocol": ("agent_framework_azurefunctions", "agent-framework-azurefunctions"), + "AgentResponseCallbackProtocol": ("agent_framework_durabletask", "agent-framework-durabletask"), "AzureAIAgentClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAIAgentOptions": ("agent_framework_azure_ai", "agent-framework-azure-ai"), "AzureAIProjectAgentOptions": ("agent_framework_azure_ai", "agent-framework-azure-ai"), @@ -24,7 +24,10 @@ "AzureOpenAIResponsesOptions": ("agent_framework.azure._responses_client", "agent-framework-core"), "AzureOpenAISettings": ("agent_framework.azure._shared", "agent-framework-core"), "AzureUserSecurityContext": ("agent_framework.azure._chat_client", "agent-framework-core"), - "DurableAIAgent": ("agent_framework_azurefunctions", "agent-framework-azurefunctions"), + "DurableAIAgent": ("agent_framework_durabletask", "agent-framework-durabletask"), + "DurableAIAgentClient": ("agent_framework_durabletask", "agent-framework-durabletask"), + "DurableAIAgentOrchestrationContext": ("agent_framework_durabletask", "agent-framework-durabletask"), + "DurableAIAgentWorker": ("agent_framework_durabletask", "agent-framework-durabletask"), "get_entra_auth_token": ("agent_framework.azure._entra_id_authentication", "agent-framework-core"), } diff --git a/python/packages/core/agent_framework/azure/__init__.pyi b/python/packages/core/agent_framework/azure/__init__.pyi index a7e311f315..a819019039 100644 --- a/python/packages/core/agent_framework/azure/__init__.pyi +++ b/python/packages/core/agent_framework/azure/__init__.pyi @@ -9,11 +9,14 @@ from agent_framework_azure_ai import ( AzureAISettings, ) from agent_framework_azure_ai_search import AzureAISearchContextProvider, AzureAISearchSettings -from agent_framework_azurefunctions import ( +from agent_framework_azurefunctions import AgentFunctionApp +from agent_framework_durabletask import ( AgentCallbackContext, - AgentFunctionApp, AgentResponseCallbackProtocol, DurableAIAgent, + DurableAIAgentClient, + DurableAIAgentOrchestrationContext, + DurableAIAgentWorker, ) from agent_framework.azure._assistants_client import AzureOpenAIAssistantsClient @@ -39,5 +42,8 @@ __all__ = [ "AzureOpenAIResponsesClient", "AzureOpenAISettings", "DurableAIAgent", + "DurableAIAgentClient", + "DurableAIAgentOrchestrationContext", + "DurableAIAgentWorker", "get_entra_auth_token", ] diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index 11376e7f69..74a9ffebd7 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -50,6 +50,7 @@ all = [ "agent-framework-copilotstudio", "agent-framework-declarative", "agent-framework-devui", + "agent-framework-durabletask", "agent-framework-github-copilot", "agent-framework-lab", "agent-framework-mem0", diff --git a/python/packages/durabletask/LICENSE b/python/packages/durabletask/LICENSE new file mode 100644 index 0000000000..9e841e7a26 --- /dev/null +++ b/python/packages/durabletask/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/durabletask/README.md b/python/packages/durabletask/README.md new file mode 100644 index 0000000000..083de30871 --- /dev/null +++ b/python/packages/durabletask/README.md @@ -0,0 +1,31 @@ +# Get Started with Microsoft Agent Framework Durable Task + +[![PyPI](https://img.shields.io/pypi/v/agent-framework-durabletask)](https://pypi.org/project/agent-framework-durabletask/) + +Please install this package via pip: + +```bash +pip install agent-framework-durabletask --pre +``` + +## Durable Task Integration + +The durable task integration lets you host Microsoft Agent Framework agents using the [Durable Task](https://github.com/microsoft/durabletask-python) framework so they can persist state, replay conversation history, and recover from failures automatically. + +### Basic Usage Example + +```python +from durabletask import TaskHubGrpcWorker +from agent_framework.azure import DurableAIAgentWorker + +# Create the worker +with TaskHubGrpcWorker(...) as worker: + + # Register the agent worker wrapper + agent_worker = DurableAIAgentWorker(worker) + + # Register the agent + agent_worker.add_agent(my_agent) +``` + +For more details, review the Python [README](https://github.com/microsoft/agent-framework/tree/main/python/README.md) and the samples directory. diff --git a/python/packages/durabletask/agent_framework_durabletask/__init__.py b/python/packages/durabletask/agent_framework_durabletask/__init__.py new file mode 100644 index 0000000000..84a1361d9a --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/__init__.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Durable Task integration for Microsoft Agent Framework.""" + +import importlib.metadata + +from ._callbacks import AgentCallbackContext, AgentResponseCallbackProtocol +from ._client import DurableAIAgentClient +from ._constants import ( + DEFAULT_MAX_POLL_RETRIES, + DEFAULT_POLL_INTERVAL_SECONDS, + MIMETYPE_APPLICATION_JSON, + MIMETYPE_TEXT_PLAIN, + REQUEST_RESPONSE_FORMAT_JSON, + REQUEST_RESPONSE_FORMAT_TEXT, + THREAD_ID_FIELD, + THREAD_ID_HEADER, + WAIT_FOR_RESPONSE_FIELD, + WAIT_FOR_RESPONSE_HEADER, + ApiResponseFields, + ContentTypes, + DurableStateFields, +) +from ._durable_agent_state import ( + DurableAgentState, + DurableAgentStateContent, + DurableAgentStateData, + DurableAgentStateDataContent, + DurableAgentStateEntry, + DurableAgentStateEntryJsonType, + DurableAgentStateErrorContent, + DurableAgentStateFunctionCallContent, + DurableAgentStateFunctionResultContent, + DurableAgentStateHostedFileContent, + DurableAgentStateHostedVectorStoreContent, + DurableAgentStateMessage, + DurableAgentStateRequest, + DurableAgentStateResponse, + DurableAgentStateTextContent, + DurableAgentStateTextReasoningContent, + DurableAgentStateUnknownContent, + DurableAgentStateUriContent, + DurableAgentStateUsage, + DurableAgentStateUsageContent, +) +from ._entities import AgentEntity, AgentEntityStateProviderMixin +from ._executors import DurableAgentExecutor +from ._models import AgentSessionId, DurableAgentThread, RunRequest +from ._orchestration_context import DurableAIAgentOrchestrationContext +from ._response_utils import ensure_response_format, load_agent_response +from ._shim import DurableAIAgent +from ._worker import DurableAIAgentWorker + +try: + __version__ = importlib.metadata.version(__name__) +except importlib.metadata.PackageNotFoundError: + __version__ = "0.0.0" # Fallback for development mode + +__all__ = [ + "DEFAULT_MAX_POLL_RETRIES", + "DEFAULT_POLL_INTERVAL_SECONDS", + "MIMETYPE_APPLICATION_JSON", + "MIMETYPE_TEXT_PLAIN", + "REQUEST_RESPONSE_FORMAT_JSON", + "REQUEST_RESPONSE_FORMAT_TEXT", + "THREAD_ID_FIELD", + "THREAD_ID_HEADER", + "WAIT_FOR_RESPONSE_FIELD", + "WAIT_FOR_RESPONSE_HEADER", + "AgentCallbackContext", + "AgentEntity", + "AgentEntityStateProviderMixin", + "AgentResponseCallbackProtocol", + "AgentSessionId", + "ApiResponseFields", + "ContentTypes", + "DurableAIAgent", + "DurableAIAgentClient", + "DurableAIAgentOrchestrationContext", + "DurableAIAgentWorker", + "DurableAgentExecutor", + "DurableAgentState", + "DurableAgentStateContent", + "DurableAgentStateData", + "DurableAgentStateDataContent", + "DurableAgentStateEntry", + "DurableAgentStateEntryJsonType", + "DurableAgentStateErrorContent", + "DurableAgentStateFunctionCallContent", + "DurableAgentStateFunctionResultContent", + "DurableAgentStateHostedFileContent", + "DurableAgentStateHostedVectorStoreContent", + "DurableAgentStateMessage", + "DurableAgentStateRequest", + "DurableAgentStateResponse", + "DurableAgentStateTextContent", + "DurableAgentStateTextReasoningContent", + "DurableAgentStateUnknownContent", + "DurableAgentStateUriContent", + "DurableAgentStateUsage", + "DurableAgentStateUsageContent", + "DurableAgentThread", + "DurableStateFields", + "RunRequest", + "__version__", + "ensure_response_format", + "load_agent_response", +] diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_callbacks.py b/python/packages/durabletask/agent_framework_durabletask/_callbacks.py similarity index 100% rename from python/packages/azurefunctions/agent_framework_azurefunctions/_callbacks.py rename to python/packages/durabletask/agent_framework_durabletask/_callbacks.py diff --git a/python/packages/durabletask/agent_framework_durabletask/_client.py b/python/packages/durabletask/agent_framework_durabletask/_client.py new file mode 100644 index 0000000000..258b710c94 --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/_client.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Client wrapper for Durable Task Agent Framework. + +This module provides the DurableAIAgentClient class for external clients to interact +with durable agents via gRPC. +""" + +from __future__ import annotations + +from agent_framework import AgentResponse, get_logger +from durabletask.client import TaskHubGrpcClient + +from ._constants import DEFAULT_MAX_POLL_RETRIES, DEFAULT_POLL_INTERVAL_SECONDS +from ._executors import ClientAgentExecutor +from ._shim import DurableAgentProvider, DurableAIAgent + +logger = get_logger("agent_framework.durabletask.client") + + +class DurableAIAgentClient(DurableAgentProvider[AgentResponse]): + """Client wrapper for interacting with durable agents externally. + + This class wraps a durabletask TaskHubGrpcClient and provides a convenient + interface for retrieving and executing durable agents from external contexts. + + Example: + ```python + from durabletask import TaskHubGrpcClient + from agent_framework.azure import DurableAIAgentClient + + # Create the underlying client + client = TaskHubGrpcClient(host_address="localhost:4001") + + # Wrap it with the agent client + agent_client = DurableAIAgentClient(client) + + # Get an agent reference + agent = agent_client.get_agent("assistant") + + # Run the agent (synchronous call that waits for completion) + response = agent.run("Hello, how are you?") + print(response.text) + ``` + """ + + def __init__( + self, + client: TaskHubGrpcClient, + max_poll_retries: int = DEFAULT_MAX_POLL_RETRIES, + poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS, + ): + """Initialize the client wrapper. + + Args: + client: The durabletask client instance to wrap + max_poll_retries: Maximum polling attempts when waiting for responses + poll_interval_seconds: Delay in seconds between polling attempts + """ + self._client = client + + # Validate and set polling parameters + self.max_poll_retries = max(1, max_poll_retries) + self.poll_interval_seconds = ( + poll_interval_seconds if poll_interval_seconds > 0 else DEFAULT_POLL_INTERVAL_SECONDS + ) + + self._executor = ClientAgentExecutor(self._client, self.max_poll_retries, self.poll_interval_seconds) + logger.debug("[DurableAIAgentClient] Initialized with client type: %s", type(client).__name__) + + def get_agent(self, agent_name: str) -> DurableAIAgent[AgentResponse]: + """Retrieve a DurableAIAgent shim for the specified agent. + + This method returns a proxy object that can be used to execute the agent. + The actual agent must be registered on a worker with the same name. + + Args: + agent_name: Name of the agent to retrieve (without the dafx- prefix) + + Returns: + DurableAIAgent instance that can be used to run the agent + + Note: + This method does not validate that the agent exists. Validation + will occur when the agent is executed. If the entity doesn't exist, + the execution will fail with an appropriate error. + """ + logger.debug("[DurableAIAgentClient] Creating agent proxy for: %s", agent_name) + + return DurableAIAgent(self._executor, agent_name) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_constants.py b/python/packages/durabletask/agent_framework_durabletask/_constants.py similarity index 100% rename from python/packages/azurefunctions/agent_framework_azurefunctions/_constants.py rename to python/packages/durabletask/agent_framework_durabletask/_constants.py diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_durable_agent_state.py b/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py similarity index 89% rename from python/packages/azurefunctions/agent_framework_azurefunctions/_durable_agent_state.py rename to python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py index d33d9ea91c..a72f3fb07f 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_durable_agent_state.py +++ b/python/packages/durabletask/agent_framework_durabletask/_durable_agent_state.py @@ -30,9 +30,10 @@ from __future__ import annotations import json +from collections.abc import MutableMapping from datetime import datetime, timezone from enum import Enum -from typing import Any, cast +from typing import Any, ClassVar, cast from agent_framework import ( AgentResponse, @@ -43,10 +44,10 @@ ) from dateutil import parser as date_parser -from ._constants import ApiResponseFields, ContentTypes, DurableStateFields +from ._constants import ContentTypes, DurableStateFields from ._models import RunRequest, serialize_response_format -logger = get_logger("agent_framework.azurefunctions.durable_agent_state") +logger = get_logger("agent_framework.durabletask.durable_agent_state") class DurableAgentStateEntryJsonType(str, Enum): @@ -72,7 +73,10 @@ def _parse_created_at(value: Any) -> datetime: except (ValueError, TypeError): pass - logger.warning("Invalid or missing created_at value in durable agent state; defaulting to current UTC time.") + logger.warning( + f"Invalid or missing created_at value in durable agent state; defaulting to current UTC time, {value}", + stack_info=True, + ) return datetime.now(tz=timezone.utc) @@ -258,7 +262,7 @@ def to_ai_content(self) -> Any: """Convert this durable state content back to an agent framework content object. Returns: - An agent framework content object (TextContent, FunctionCallContent, etc.) + An agent framework content object (Content of type `text`, `function_call`, etc.) Raises: NotImplementedError: Must be implemented by subclasses @@ -269,38 +273,42 @@ def to_ai_content(self) -> Any: def from_ai_content(content: Any) -> DurableAgentStateContent: """Create a durable state content object from an agent framework content object. - This factory method maps agent framework content types (TextContent, FunctionCallContent, - etc.) to their corresponding durable state representations. Unknown content types are - wrapped in DurableAgentStateUnknownContent. + This factory method maps agent framework content types to their corresponding durable state representations. + Unknown content types are wrapped in DurableAgentStateUnknownContent. Args: - content: An agent framework content object (TextContent, FunctionCallContent, etc.) + content: An agent framework content object (Content of type `text`, `function_call`, etc.) Returns: The corresponding DurableAgentStateContent subclass instance """ # Map AI content type to appropriate DurableAgentStateContent subclass - if isinstance(content, Content) and content.type == "data": - return DurableAgentStateDataContent.from_data_content(content) - if isinstance(content, Content) and content.type == "error": - return DurableAgentStateErrorContent.from_error_content(content) - if isinstance(content, Content) and content.type == "function_call": - return DurableAgentStateFunctionCallContent.from_function_call_content(content) - if isinstance(content, Content) and content.type == "function_result": - return DurableAgentStateFunctionResultContent.from_function_result_content(content) - if isinstance(content, Content) and content.type == "hosted_file": - return DurableAgentStateHostedFileContent.from_hosted_file_content(content) - if isinstance(content, Content) and content.type == "hosted_vector_store": - return DurableAgentStateHostedVectorStoreContent.from_hosted_vector_store_content(content) - if isinstance(content, Content) and content.type == "text": - return DurableAgentStateTextContent.from_text_content(content) - if isinstance(content, Content) and content.type == "text_reasoning": - return DurableAgentStateTextReasoningContent.from_text_reasoning_content(content) - if isinstance(content, Content) and content.type == "uri": - return DurableAgentStateUriContent.from_uri_content(content) - if isinstance(content, Content) and content.type == "usage": - return DurableAgentStateUsageContent.from_usage_content(content) - return DurableAgentStateUnknownContent.from_unknown_content(content) + if not isinstance(content, Content): + return DurableAgentStateUnknownContent.from_unknown_content(content) + + match content.type: + case "data": + return DurableAgentStateDataContent.from_data_content(content) + case "error": + return DurableAgentStateErrorContent.from_error_content(content) + case "function_call": + return DurableAgentStateFunctionCallContent.from_function_call_content(content) + case "function_result": + return DurableAgentStateFunctionResultContent.from_function_result_content(content) + case "hosted_file": + return DurableAgentStateHostedFileContent.from_hosted_file_content(content) + case "hosted_vector_store": + return DurableAgentStateHostedVectorStoreContent.from_hosted_vector_store_content(content) + case "text": + return DurableAgentStateTextContent.from_text_content(content) + case "reasoning": + return DurableAgentStateTextReasoningContent.from_text_reasoning_content(content) + case "uri": + return DurableAgentStateUriContent.from_uri_content(content) + case "usage": + return DurableAgentStateUsageContent.from_usage_content(content) + case _: + return DurableAgentStateUnknownContent.from_unknown_content(content) # Core state classes @@ -439,7 +447,7 @@ def message_count(self) -> int: """Get the count of conversation entries (requests + responses).""" return len(self.data.conversation_history) - def try_get_agent_response(self, correlation_id: str) -> dict[str, Any] | None: + def try_get_agent_response(self, correlation_id: str) -> AgentResponse | None: """Try to get an agent response by correlation ID. This method searches the conversation history for a response entry matching the given @@ -461,14 +469,8 @@ def try_get_agent_response(self, correlation_id: str) -> dict[str, Any] | None: for entry in self.data.conversation_history: if entry.correlation_id == correlation_id and isinstance(entry, DurableAgentStateResponse): # Found the entry, extract response data - # Get the text content from assistant messages only - content = "\n".join(message.text for message in entry.messages if message.text) + return DurableAgentStateResponse.to_run_response(entry) - return { - ApiResponseFields.CONTENT: content, - ApiResponseFields.MESSAGE_COUNT: self.message_count, - ApiResponseFields.CORRELATION_ID: correlation_id, - } return None @@ -689,7 +691,22 @@ def from_run_response(correlation_id: str, response: AgentResponse) -> DurableAg correlation_id=correlation_id, created_at=_parse_created_at(response.created_at), messages=[DurableAgentStateMessage.from_chat_message(m) for m in response.messages], - usage=DurableAgentStateUsage.from_usage(response.usage_details), # type: ignore[arg-type] + usage=DurableAgentStateUsage.from_usage(response.usage_details), + ) + + @staticmethod + def to_run_response( + response_entry: DurableAgentStateResponse, + ) -> AgentResponse: + """Converts a DurableAgentStateResponse back to an AgentResponse.""" + messages = [m.to_chat_message() for m in response_entry.messages] + + usage_details = response_entry.usage.to_usage_details() if response_entry.usage is not None else UsageDetails() + + return AgentResponse( + created_at=response_entry.created_at.isoformat(), + messages=messages, + usage_details=usage_details, ) @@ -859,7 +876,9 @@ def to_dict(self) -> dict[str, Any]: @staticmethod def from_data_content(content: Content) -> DurableAgentStateDataContent: - return DurableAgentStateDataContent(uri=content.uri, media_type=content.media_type) # type: ignore[arg-type] + if content.uri is None: + raise ValueError("uri is required for data content") + return DurableAgentStateDataContent(uri=content.uri, media_type=content.media_type) def to_ai_content(self) -> Content: return Content.from_uri(uri=self.uri, media_type=self.media_type) @@ -940,6 +959,10 @@ def to_dict(self) -> dict[str, Any]: @staticmethod def from_function_call_content(content: Content) -> DurableAgentStateFunctionCallContent: + if content.call_id is None: + raise ValueError("call_id is required for function call content") + if content.name is None: + raise ValueError("name is required for function call content") # Ensure arguments is a dict; parse string if needed arguments: dict[str, Any] = {} if content.arguments: @@ -952,7 +975,7 @@ def from_function_call_content(content: Content) -> DurableAgentStateFunctionCal except json.JSONDecodeError: arguments = {} - return DurableAgentStateFunctionCallContent(call_id=content.call_id, name=content.name, arguments=arguments) # type: ignore[arg-type] + return DurableAgentStateFunctionCallContent(call_id=content.call_id, name=content.name, arguments=arguments) def to_ai_content(self) -> Content: return Content.from_function_call(call_id=self.call_id, name=self.name, arguments=self.arguments) @@ -988,7 +1011,9 @@ def to_dict(self) -> dict[str, Any]: @staticmethod def from_function_result_content(content: Content) -> DurableAgentStateFunctionResultContent: - return DurableAgentStateFunctionResultContent(call_id=content.call_id, result=content.result) # type: ignore[arg-type] + if content.call_id is None: + raise ValueError("call_id is required for function result content") + return DurableAgentStateFunctionResultContent(call_id=content.call_id, result=content.result) def to_ai_content(self) -> Content: return Content.from_function_result(call_id=self.call_id, result=self.result) @@ -1016,7 +1041,9 @@ def to_dict(self) -> dict[str, Any]: @staticmethod def from_hosted_file_content(content: Content) -> DurableAgentStateHostedFileContent: - return DurableAgentStateHostedFileContent(file_id=content.file_id) # type: ignore[arg-type] + if content.file_id is None: + raise ValueError("file_id is required for hosted file content") + return DurableAgentStateHostedFileContent(file_id=content.file_id) def to_ai_content(self) -> Content: return Content.from_hosted_file(file_id=self.file_id) @@ -1050,7 +1077,9 @@ def to_dict(self) -> dict[str, Any]: def from_hosted_vector_store_content( content: Content, ) -> DurableAgentStateHostedVectorStoreContent: - return DurableAgentStateHostedVectorStoreContent(vector_store_id=content.vector_store_id) # type: ignore[arg-type] + if content.vector_store_id is None: + raise ValueError("vector_store_id is required for hosted vector store content") + return DurableAgentStateHostedVectorStoreContent(vector_store_id=content.vector_store_id) def to_ai_content(self) -> Content: return Content.from_hosted_vector_store(vector_store_id=self.vector_store_id) @@ -1137,7 +1166,11 @@ def to_dict(self) -> dict[str, Any]: @staticmethod def from_uri_content(content: Content) -> DurableAgentStateUriContent: - return DurableAgentStateUriContent(uri=content.uri, media_type=content.media_type) # type: ignore[arg-type] + if content.uri is None: + raise ValueError("uri is required for uri content") + if content.media_type is None: + raise ValueError("media_type is required for uri content") + return DurableAgentStateUriContent(uri=content.uri, media_type=content.media_type) def to_ai_content(self) -> Content: return Content.from_uri(uri=self.uri, media_type=self.media_type) @@ -1157,6 +1190,14 @@ class DurableAgentStateUsage: extensionData: Optional additional metadata """ + # UsageDetails field name constants (snake_case keys from agent_framework.UsageDetails) + _INPUT_TOKEN_COUNT = "input_token_count" # noqa: S105 # nosec B105 + _OUTPUT_TOKEN_COUNT = "output_token_count" # noqa: S105 # nosec B105 + _TOTAL_TOKEN_COUNT = "total_token_count" # noqa: S105 # nosec B105 + + # Standard fields in UsageDetails that are mapped to dedicated attributes + _STANDARD_USAGE_FIELDS: ClassVar[set[str]] = {_INPUT_TOKEN_COUNT, _OUTPUT_TOKEN_COUNT, _TOTAL_TOKEN_COUNT} + input_token_count: int | None = None output_token_count: int | None = None total_token_count: int | None = None @@ -1194,22 +1235,32 @@ def from_dict(cls, data: dict[str, Any]) -> DurableAgentStateUsage: ) @staticmethod - def from_usage(usage: UsageDetails | dict[str, int] | None) -> DurableAgentStateUsage | None: + def from_usage(usage: UsageDetails | MutableMapping[str, Any] | None) -> DurableAgentStateUsage | None: if usage is None: return None + + # Collect all non-standard fields into extension_data + extension_data: dict[str, Any] = { + k: v for k, v in usage.items() if k not in DurableAgentStateUsage._STANDARD_USAGE_FIELDS + } + return DurableAgentStateUsage( - input_token_count=usage.get("input_token_count"), - output_token_count=usage.get("output_token_count"), - total_token_count=usage.get("total_token_count"), + input_token_count=cast("int | None", usage.get(DurableAgentStateUsage._INPUT_TOKEN_COUNT)), + output_token_count=cast("int | None", usage.get(DurableAgentStateUsage._OUTPUT_TOKEN_COUNT)), + total_token_count=cast("int | None", usage.get(DurableAgentStateUsage._TOTAL_TOKEN_COUNT)), + extensionData=extension_data if extension_data else None, ) def to_usage_details(self) -> UsageDetails: # Convert back to AI SDK UsageDetails - return { - "input_token_count": self.input_token_count, - "output_token_count": self.output_token_count, - "total_token_count": self.total_token_count, - } + result = UsageDetails( + input_token_count=self.input_token_count, + output_token_count=self.output_token_count, + total_token_count=self.total_token_count, + ) + if self.extensionData: + result.update(self.extensionData) # type: ignore[typeddict-item] + return result class DurableAgentStateUsageContent(DurableAgentStateContent): diff --git a/python/packages/durabletask/agent_framework_durabletask/_entities.py b/python/packages/durabletask/agent_framework_durabletask/_entities.py new file mode 100644 index 0000000000..1f816b6b9d --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/_entities.py @@ -0,0 +1,347 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Durable Task entity implementations for Microsoft Agent Framework.""" + +from __future__ import annotations + +import inspect +from collections.abc import AsyncIterable +from typing import Any, cast + +from agent_framework import ( + AgentProtocol, + AgentResponse, + AgentResponseUpdate, + ChatMessage, + Content, + Role, + get_logger, +) +from durabletask.entities import DurableEntity + +from ._callbacks import AgentCallbackContext, AgentResponseCallbackProtocol +from ._durable_agent_state import ( + DurableAgentState, + DurableAgentStateEntry, + DurableAgentStateRequest, + DurableAgentStateResponse, +) +from ._models import RunRequest + +logger = get_logger("agent_framework.durabletask.entities") + + +class AgentEntityStateProviderMixin: + """Mixin implementing durable agent state caching + (de)serialization + persistence. + + Concrete classes must implement: + - _get_state_dict(): fetch raw persisted state dict (default should be {}) + - _set_state_dict(): persist raw state dict + - _get_thread_id_from_entity(): fetch the thread ID from the underlying context + """ + + _state_cache: DurableAgentState | None = None + + def _get_state_dict(self) -> dict[str, Any]: + raise NotImplementedError + + def _set_state_dict(self, state: dict[str, Any]) -> None: + raise NotImplementedError + + def _get_thread_id_from_entity(self) -> str: + raise NotImplementedError + + @property + def thread_id(self) -> str: + return self._get_thread_id_from_entity() + + @property + def state(self) -> DurableAgentState: + if self._state_cache is None: + raw_state = self._get_state_dict() + self._state_cache = DurableAgentState.from_dict(raw_state) if raw_state else DurableAgentState() + return self._state_cache + + @state.setter + def state(self, value: DurableAgentState) -> None: + self._state_cache = value + self.persist_state() + + def persist_state(self) -> None: + """Persist the current state to the underlying storage provider.""" + if self._state_cache is None: + self._state_cache = DurableAgentState() + self._set_state_dict(self._state_cache.to_dict()) + + def reset(self) -> None: + """Clear conversation history by resetting state to a fresh DurableAgentState.""" + self._state_cache = DurableAgentState() + self.persist_state() + logger.debug("[AgentEntityStateProviderMixin.reset] State reset complete") + + +class AgentEntity: + """Platform-agnostic agent execution logic. + + This class encapsulates the core logic for executing an agent within a durable entity context. + """ + + agent: AgentProtocol + callback: AgentResponseCallbackProtocol | None + + def __init__( + self, + agent: AgentProtocol, + callback: AgentResponseCallbackProtocol | None = None, + *, + state_provider: AgentEntityStateProviderMixin, + ) -> None: + self.agent = agent + self.callback = callback + self._state_provider = state_provider + + logger.debug("[AgentEntity] Initialized with agent type: %s", type(agent).__name__) + + @property + def state(self) -> DurableAgentState: + return self._state_provider.state + + @state.setter + def state(self, value: DurableAgentState) -> None: + self._state_provider.state = value + + def persist_state(self) -> None: + self._state_provider.persist_state() + + def reset(self) -> None: + self._state_provider.reset() + + def _is_error_response(self, entry: DurableAgentStateEntry) -> bool: + """Check if a conversation history entry is an error response.""" + if isinstance(entry, DurableAgentStateResponse): + return entry.is_error + return False + + async def run( + self, + request: RunRequest | dict[str, Any] | str, + ) -> AgentResponse: + """Execute the agent with a message.""" + if isinstance(request, str): + run_request = RunRequest.from_json(request) + elif isinstance(request, dict): + run_request = RunRequest.from_dict(request) + else: + run_request = request + + message = run_request.message + thread_id = self._state_provider.thread_id + correlation_id = run_request.correlation_id + if not thread_id: + raise ValueError("Entity State Provider must provide a thread_id") + options: dict[str, Any] = dict(run_request.options) + options.setdefault("response_format", run_request.response_format) + if not run_request.enable_tool_calls: + options.setdefault("tools", None) + + logger.debug("[AgentEntity.run] Received ThreadId %s Message: %s", thread_id, run_request) + + state_request = DurableAgentStateRequest.from_run_request(run_request) + self.state.data.conversation_history.append(state_request) + + try: + chat_messages: list[ChatMessage] = [ + m.to_chat_message() + for entry in self.state.data.conversation_history + if not self._is_error_response(entry) + for m in entry.messages + ] + + run_kwargs: dict[str, Any] = {"messages": chat_messages, "options": options} + + agent_run_response: AgentResponse = await self._invoke_agent( + run_kwargs=run_kwargs, + correlation_id=correlation_id, + thread_id=thread_id, + request_message=message, + ) + + state_response = DurableAgentStateResponse.from_run_response(correlation_id, agent_run_response) + self.state.data.conversation_history.append(state_response) + self.persist_state() + + return agent_run_response + + except Exception as exc: + logger.exception("[AgentEntity.run] Agent execution failed.") + + error_message = ChatMessage( + role=Role.ASSISTANT, contents=[Content.from_error(message=str(exc), error_code=type(exc).__name__)] + ) + error_response = AgentResponse(messages=[error_message]) + + error_state_response = DurableAgentStateResponse.from_run_response(correlation_id, error_response) + error_state_response.is_error = True + self.state.data.conversation_history.append(error_state_response) + self.persist_state() + + return error_response + + async def _invoke_agent( + self, + run_kwargs: dict[str, Any], + correlation_id: str, + thread_id: str, + request_message: str, + ) -> AgentResponse: + """Execute the agent, preferring streaming when available.""" + callback_context: AgentCallbackContext | None = None + if self.callback is not None: + callback_context = self._build_callback_context( + correlation_id=correlation_id, + thread_id=thread_id, + request_message=request_message, + ) + + run_stream_callable = getattr(self.agent, "run_stream", None) + if callable(run_stream_callable): + try: + stream_candidate = run_stream_callable(**run_kwargs) + if inspect.isawaitable(stream_candidate): + stream_candidate = await stream_candidate + + return await self._consume_stream( + stream=cast(AsyncIterable[AgentResponseUpdate], stream_candidate), + callback_context=callback_context, + ) + except TypeError as type_error: + if "__aiter__" not in str(type_error): + raise + logger.debug( + "run_stream returned a non-async result; falling back to run(): %s", + type_error, + ) + except Exception as stream_error: + logger.warning( + "run_stream failed; falling back to run(): %s", + stream_error, + exc_info=True, + ) + else: + logger.debug("Agent does not expose run_stream; falling back to run().") + + agent_run_response = await self._invoke_non_stream(run_kwargs) + await self._notify_final_response(agent_run_response, callback_context) + return agent_run_response + + async def _consume_stream( + self, + stream: AsyncIterable[AgentResponseUpdate], + callback_context: AgentCallbackContext | None = None, + ) -> AgentResponse: + """Consume streaming responses and build the final AgentResponse.""" + updates: list[AgentResponseUpdate] = [] + + async for update in stream: + updates.append(update) + await self._notify_stream_update(update, callback_context) + + if updates: + response = AgentResponse.from_agent_run_response_updates(updates) + else: + logger.debug("[AgentEntity] No streaming updates received; creating empty response") + response = AgentResponse(messages=[]) + + await self._notify_final_response(response, callback_context) + return response + + async def _invoke_non_stream(self, run_kwargs: dict[str, Any]) -> AgentResponse: + """Invoke the agent without streaming support.""" + run_callable = getattr(self.agent, "run", None) + if run_callable is None or not callable(run_callable): + raise AttributeError("Agent does not implement run() method") + + result = run_callable(**run_kwargs) + if inspect.isawaitable(result): + result = await result + + if not isinstance(result, AgentResponse): + raise TypeError(f"Agent run() must return an AgentResponse instance; received {type(result).__name__}") + + return result + + async def _notify_stream_update( + self, + update: AgentResponseUpdate, + context: AgentCallbackContext | None, + ) -> None: + """Invoke the streaming callback if one is registered.""" + if self.callback is None or context is None: + return + + try: + callback_result = self.callback.on_streaming_response_update(update, context) + if inspect.isawaitable(callback_result): + await callback_result + except Exception as exc: + logger.warning( + "[AgentEntity] Streaming callback raised an exception: %s", + exc, + exc_info=True, + ) + + async def _notify_final_response( + self, + response: AgentResponse, + context: AgentCallbackContext | None, + ) -> None: + """Invoke the final response callback if one is registered.""" + if self.callback is None or context is None: + return + + try: + callback_result = self.callback.on_agent_response(response, context) + if inspect.isawaitable(callback_result): + await callback_result + except Exception as exc: + logger.warning( + "[AgentEntity] Response callback raised an exception: %s", + exc, + exc_info=True, + ) + + def _build_callback_context( + self, + correlation_id: str, + thread_id: str, + request_message: str, + ) -> AgentCallbackContext: + """Create the callback context provided to consumers.""" + agent_name = getattr(self.agent, "name", None) or type(self.agent).__name__ + return AgentCallbackContext( + agent_name=agent_name, + correlation_id=correlation_id, + thread_id=thread_id, + request_message=request_message, + ) + + +class DurableTaskEntityStateProvider(DurableEntity, AgentEntityStateProviderMixin): + """DurableTask Durable Entity state provider for AgentEntity. + + This class utilizes the Durable Entity context from `durabletask` package + to get and set the state of the agent entity. + """ + + def __init__(self) -> None: + super().__init__() + + def _get_state_dict(self) -> dict[str, Any]: + raw = self.get_state(dict, default={}) + return cast(dict[str, Any], raw) + + def _set_state_dict(self, state: dict[str, Any]) -> None: + self.set_state(state) + + def _get_thread_id_from_entity(self) -> str: + return self.entity_context.entity_id.key diff --git a/python/packages/durabletask/agent_framework_durabletask/_executors.py b/python/packages/durabletask/agent_framework_durabletask/_executors.py new file mode 100644 index 0000000000..15bbb4ecb3 --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/_executors.py @@ -0,0 +1,516 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Provider strategies for Durable Agent execution. + +These classes are internal execution strategies used by the DurableAIAgent shim. +They are intentionally separate from the public client/orchestration APIs to keep +only `get_agent` exposed to consumers. Executors implement the execution contract +and are injected into the shim. +""" + +from __future__ import annotations + +import time +import uuid +from abc import ABC, abstractmethod +from datetime import datetime, timezone +from typing import Any, Generic, TypeVar + +from agent_framework import AgentResponse, AgentThread, ChatMessage, Content, Role, get_logger +from durabletask.client import TaskHubGrpcClient +from durabletask.entities import EntityInstanceId +from durabletask.task import CompletableTask, CompositeTask, OrchestrationContext, Task +from pydantic import BaseModel + +from ._constants import DEFAULT_MAX_POLL_RETRIES, DEFAULT_POLL_INTERVAL_SECONDS +from ._durable_agent_state import DurableAgentState +from ._models import AgentSessionId, DurableAgentThread, RunRequest +from ._response_utils import ensure_response_format, load_agent_response + +logger = get_logger("agent_framework.durabletask.executors") + +# TypeVar for the task type returned by executors +TaskT = TypeVar("TaskT") + + +class DurableAgentTask(CompositeTask[AgentResponse], CompletableTask[AgentResponse]): + """A custom Task that wraps entity calls and provides typed AgentResponse results. + + This task wraps the underlying entity call task and intercepts its completion + to convert the raw result into a typed AgentResponse object. + + When yielded in an orchestration, this task returns an AgentResponse: + response: AgentResponse = yield durable_agent_task + """ + + def __init__( + self, + entity_task: CompletableTask[Any], + response_format: type[BaseModel] | None, + correlation_id: str, + ): + """Initialize the DurableAgentTask. + + Args: + entity_task: The underlying entity call task + response_format: Optional Pydantic model for response parsing + correlation_id: Correlation ID for logging + """ + self._response_format = response_format + self._correlation_id = correlation_id + super().__init__([entity_task]) # type: ignore + + def on_child_completed(self, task: Task[Any]) -> None: + """Handle completion of the underlying entity task. + + Parameters + ---------- + task : Task + The entity call task that just completed + """ + if self.is_complete: + return + + if task.is_failed: + # Propagate the failure - pass the original exception directly + self.fail("call_entity Task failed", task.get_exception()) + return + + # Task succeeded - transform the raw result + raw_result = task.get_result() + logger.debug( + "[DurableAgentTask] Converting raw result for correlation_id %s", + self._correlation_id, + ) + + try: + response = load_agent_response(raw_result) + + if self._response_format is not None: + ensure_response_format( + self._response_format, + self._correlation_id, + response, + ) + + # Set the typed AgentResponse as this task's result + self.complete(response) + + except Exception as ex: + err_msg = "[DurableAgentTask] Failed to convert result for correlation_id: " + self._correlation_id + logger.exception(err_msg) + self.fail(err_msg, ex) + + +class DurableAgentExecutor(ABC, Generic[TaskT]): + """Abstract base class for durable agent execution strategies. + + Type Parameters: + TaskT: The task type returned by this executor + """ + + @abstractmethod + def run_durable_agent( + self, + agent_name: str, + run_request: RunRequest, + thread: AgentThread | None = None, + ) -> TaskT: + """Execute the durable agent. + + Returns: + TaskT: The task type specific to this executor implementation + """ + raise NotImplementedError + + def get_new_thread(self, agent_name: str, **kwargs: Any) -> DurableAgentThread: + """Create a new DurableAgentThread with random session ID.""" + session_id = self._create_session_id(agent_name) + return DurableAgentThread.from_session_id(session_id, **kwargs) + + def _create_session_id( + self, + agent_name: str, + thread: AgentThread | None = None, + ) -> AgentSessionId: + """Create the AgentSessionId for the execution.""" + if isinstance(thread, DurableAgentThread) and thread.session_id is not None: + return thread.session_id + # Create new session ID - either no thread provided or it's a regular AgentThread + key = self.generate_unique_id() + return AgentSessionId(name=agent_name, key=key) + + def generate_unique_id(self) -> str: + """Generate a new Unique ID.""" + return uuid.uuid4().hex + + def get_run_request( + self, + message: str, + *, + options: dict[str, Any] | None = None, + ) -> RunRequest: + """Create a RunRequest from message and options.""" + correlation_id = self.generate_unique_id() + + # Create a copy to avoid modifying the caller's dict + opts = dict(options) if options else {} + + # Extract and REMOVE known keys from options copy + response_format = opts.pop("response_format", None) + enable_tool_calls = opts.pop("enable_tool_calls", True) + wait_for_response = opts.pop("wait_for_response", True) + + return RunRequest( + message=message, + response_format=response_format, + enable_tool_calls=enable_tool_calls, + wait_for_response=wait_for_response, + correlation_id=correlation_id, + options=opts, + ) + + def _create_acceptance_response(self, correlation_id: str) -> AgentResponse: + """Create an acceptance response for fire-and-forget mode. + + Args: + correlation_id: Correlation ID for tracking the request + + Returns: + AgentResponse: Acceptance response with correlation ID + """ + acceptance_message = ChatMessage( + role=Role.SYSTEM, + contents=[ + Content.from_text( + f"Request accepted for processing (correlation_id: {correlation_id}). " + f"Agent is executing in the background. " + f"Retrieve response via your configured streaming or callback mechanism." + ) + ], + ) + return AgentResponse( + messages=[acceptance_message], + created_at=datetime.now(timezone.utc).isoformat(), + ) + + +class ClientAgentExecutor(DurableAgentExecutor[AgentResponse]): + """Execution strategy for external clients. + + Note: Returns AgentResponse directly since the execution + is blocking until response is available via polling + as per the design of TaskHubGrpcClient. + """ + + def __init__( + self, + client: TaskHubGrpcClient, + max_poll_retries: int = DEFAULT_MAX_POLL_RETRIES, + poll_interval_seconds: float = DEFAULT_POLL_INTERVAL_SECONDS, + ): + self._client = client + self.max_poll_retries = max_poll_retries + self.poll_interval_seconds = poll_interval_seconds + + def run_durable_agent( + self, + agent_name: str, + run_request: RunRequest, + thread: AgentThread | None = None, + ) -> AgentResponse: + """Execute the agent via the durabletask client. + + Signals the agent entity with a message request, then polls the entity + state to retrieve the response once processing is complete. + + Note: This is a blocking/synchronous operation (in line with how + TaskHubGrpcClient works) that polls until a response is available or + timeout occurs. + + Args: + agent_name: Name of the agent to execute + run_request: The run request containing message and optional response format + thread: Optional conversation thread (creates new if not provided) + + Returns: + AgentResponse: The agent's response after execution completes, or an immediate + acknowledgement if wait_for_response is False + """ + # Signal the entity with the request + entity_id = self._signal_agent_entity(agent_name, run_request, thread) + + # If fire-and-forget mode, return immediately without polling + if not run_request.wait_for_response: + logger.info( + "[ClientAgentExecutor] Fire-and-forget mode: request signaled (correlation: %s)", + run_request.correlation_id, + ) + return self._create_acceptance_response(run_request.correlation_id) + + # Poll for the response + agent_response = self._poll_for_agent_response(entity_id, run_request.correlation_id) + + # Handle and return the result + return self._handle_agent_response(agent_response, run_request.response_format, run_request.correlation_id) + + def _signal_agent_entity( + self, + agent_name: str, + run_request: RunRequest, + thread: AgentThread | None, + ) -> EntityInstanceId: + """Signal the agent entity with a run request. + + Args: + agent_name: Name of the agent to execute + run_request: The run request containing message and optional response format + thread: Optional conversation thread + + Returns: + entity_id + """ + # Get or create session ID + session_id = self._create_session_id(agent_name, thread) + + # Create the entity ID + entity_id = EntityInstanceId( + entity=session_id.entity_name, + key=session_id.key, + ) + + logger.debug( + "[ClientAgentExecutor] Signaling entity '%s' (session: %s, correlation: %s)", + agent_name, + session_id, + run_request.correlation_id, + ) + + self._client.signal_entity(entity_id, "run", run_request.to_dict()) + return entity_id + + def _poll_for_agent_response( + self, + entity_id: EntityInstanceId, + correlation_id: str, + ) -> AgentResponse | None: + """Poll the entity for a response with retries. + + Args: + entity_id: Entity instance identifier + correlation_id: Correlation ID to track the request + + Returns: + The agent response if found, None if timeout occurs + """ + agent_response = None + + for attempt in range(1, self.max_poll_retries + 1): + # Initial sleep is intentional - give the entity time to process before first poll + time.sleep(self.poll_interval_seconds) + + agent_response = self._poll_entity_for_response(entity_id, correlation_id) + if agent_response is not None: + logger.info( + "[ClientAgentExecutor] Found response (attempt %d/%d, correlation: %s)", + attempt, + self.max_poll_retries, + correlation_id, + ) + break + + logger.debug( + "[ClientAgentExecutor] Response not ready (attempt %d/%d)", + attempt, + self.max_poll_retries, + ) + + return agent_response + + def _handle_agent_response( + self, + agent_response: AgentResponse | None, + response_format: type[BaseModel] | None, + correlation_id: str, + ) -> AgentResponse: + """Handle the agent response or create an error response. + + Args: + agent_response: The response from polling, or None if timeout + response_format: Optional response format for validation + correlation_id: Correlation ID for logging + + Returns: + AgentResponse with either the agent's response or an error message + """ + if agent_response is not None: + try: + # Validate response format if specified + if response_format is not None: + ensure_response_format( + response_format, + correlation_id, + agent_response, + ) + + return agent_response + + except Exception as e: + logger.exception( + "[ClientAgentExecutor] Error converting response for correlation: %s", + correlation_id, + ) + error_message = ChatMessage( + role=Role.SYSTEM, + contents=[ + Content.from_error( + message=f"Error processing agent response: {e}", + error_code="response_processing_error", + ) + ], + ) + else: + logger.warning( + "[ClientAgentExecutor] Timeout after %d attempts (correlation: %s)", + self.max_poll_retries, + correlation_id, + ) + error_message = ChatMessage( + role=Role.SYSTEM, + contents=[ + Content.from_error( + message=f"Timeout waiting for agent response after {self.max_poll_retries} attempts", + error_code="response_timeout", + ) + ], + ) + + return AgentResponse( + messages=[error_message], + created_at=datetime.now(timezone.utc).isoformat(), + ) + + def _poll_entity_for_response( + self, + entity_id: EntityInstanceId, + correlation_id: str, + ) -> AgentResponse | None: + """Poll the entity state for a response matching the correlation ID. + + Args: + entity_id: Entity instance identifier + correlation_id: Correlation ID to search for + + Returns: + Response AgentResponse, None otherwise + """ + try: + entity_metadata = self._client.get_entity(entity_id, include_state=True) + + if entity_metadata is None: + return None + + state_json = entity_metadata.get_state() + if not state_json: + return None + + state = DurableAgentState.from_json(state_json) + + # Use the helper method to get response by correlation ID + return state.try_get_agent_response(correlation_id) + + except Exception as e: + logger.warning( + "[ClientAgentExecutor] Error reading entity state: %s", + e, + ) + return None + + +class OrchestrationAgentExecutor(DurableAgentExecutor[DurableAgentTask]): + """Execution strategy for orchestrations (sync/yield).""" + + def __init__(self, context: OrchestrationContext): + self._context = context + logger.debug("[OrchestrationAgentExecutor] Initialized") + + def generate_unique_id(self) -> str: + """Create a new UUID that is safe for replay within an orchestration or operation.""" + return self._context.new_uuid() + + def get_run_request( + self, + message: str, + *, + options: dict[str, Any] | None = None, + ) -> RunRequest: + """Get the current run request from the orchestration context. + + Returns: + RunRequest: The current run request + """ + request = super().get_run_request( + message, + options=options, + ) + request.orchestration_id = self._context.instance_id + return request + + def run_durable_agent( + self, + agent_name: str, + run_request: RunRequest, + thread: AgentThread | None = None, + ) -> DurableAgentTask: + """Execute the agent via orchestration context. + + Calls the agent entity and returns a DurableAgentTask that can be yielded + in orchestrations to wait for the entity's response. + + Args: + agent_name: Name of the agent to execute + run_request: The run request containing message and optional response format + thread: Optional conversation thread (creates new if not provided) + + Returns: + DurableAgentTask: A task wrapping the entity call that yields AgentResponse + """ + # Resolve session + session_id = self._create_session_id(agent_name, thread) + + # Create the entity ID + entity_id = EntityInstanceId( + entity=session_id.entity_name, + key=session_id.key, + ) + + logger.debug( + "[OrchestrationAgentExecutor] correlation_id: %s entity_id: %s session_id: %s", + run_request.correlation_id, + entity_id, + session_id, + ) + + # Branch based on wait_for_response + if not run_request.wait_for_response: + # Fire-and-forget mode: signal entity and return pre-completed task + logger.info( + "[OrchestrationAgentExecutor] Fire-and-forget mode: signaling entity (correlation: %s)", + run_request.correlation_id, + ) + self._context.signal_entity(entity_id, "run", run_request.to_dict()) + + # Create a pre-completed task with acceptance response + acceptance_response = self._create_acceptance_response(run_request.correlation_id) + entity_task: CompletableTask[AgentResponse] = CompletableTask() # type: ignore[no-untyped-call] + entity_task.complete(acceptance_response) + else: + # Blocking mode: call entity and wait for response + entity_task = self._context.call_entity(entity_id, "run", run_request.to_dict()) # type: ignore + + # Wrap in DurableAgentTask for response transformation + return DurableAgentTask( + entity_task=entity_task, + response_format=run_request.response_format, + correlation_id=run_request.correlation_id, + ) diff --git a/python/packages/azurefunctions/agent_framework_azurefunctions/_models.py b/python/packages/durabletask/agent_framework_durabletask/_models.py similarity index 70% rename from python/packages/azurefunctions/agent_framework_azurefunctions/_models.py rename to python/packages/durabletask/agent_framework_durabletask/_models.py index ffee3b77fe..971aad8e54 100644 --- a/python/packages/azurefunctions/agent_framework_azurefunctions/_models.py +++ b/python/packages/durabletask/agent_framework_durabletask/_models.py @@ -8,13 +8,14 @@ from __future__ import annotations import inspect +import json import uuid from collections.abc import MutableMapping -from dataclasses import dataclass +from dataclasses import dataclass, field +from datetime import datetime, timezone from importlib import import_module from typing import TYPE_CHECKING, Any, cast -import azure.durable_functions as df from agent_framework import AgentThread, Role from ._constants import REQUEST_RESPONSE_FORMAT_TEXT @@ -32,195 +33,6 @@ _PydanticBaseModel = _RuntimeBaseModel -@dataclass -class AgentSessionId: - """Represents an agent session ID, which is used to identify a long-running agent session. - - Attributes: - name: The name of the agent that owns the session (case-insensitive) - key: The unique key of the agent session (case-sensitive) - """ - - name: str - key: str - - ENTITY_NAME_PREFIX: str = "dafx-" - - @staticmethod - def to_entity_name(name: str) -> str: - """Converts an agent name to an entity name by adding the DAFx prefix. - - Args: - name: The agent name - - Returns: - The entity name with the dafx- prefix - """ - return f"{AgentSessionId.ENTITY_NAME_PREFIX}{name}" - - @staticmethod - def with_random_key(name: str) -> AgentSessionId: - """Creates a new AgentSessionId with the specified name and a randomly generated key. - - Args: - name: The name of the agent that owns the session - - Returns: - A new AgentSessionId with the specified name and a random GUID key - """ - return AgentSessionId(name=name, key=uuid.uuid4().hex) - - def to_entity_id(self) -> df.EntityId: - """Converts this AgentSessionId to a Durable Functions EntityId. - - Returns: - EntityId for use with Durable Functions APIs - """ - return df.EntityId(self.to_entity_name(self.name), self.key) - - @staticmethod - def from_entity_id(entity_id: df.EntityId) -> AgentSessionId: - """Creates an AgentSessionId from a Durable Functions EntityId. - - Args: - entity_id: The EntityId to convert - - Returns: - AgentSessionId instance - - Raises: - ValueError: If the entity ID does not have the expected prefix - """ - if not entity_id.name.startswith(AgentSessionId.ENTITY_NAME_PREFIX): - raise ValueError( - f"'{entity_id}' is not a valid agent session ID. " - f"Expected entity name to start with '{AgentSessionId.ENTITY_NAME_PREFIX}'" - ) - - agent_name = entity_id.name[len(AgentSessionId.ENTITY_NAME_PREFIX) :] - return AgentSessionId(name=agent_name, key=entity_id.key) - - def __str__(self) -> str: - """Returns a string representation in the form @name@key.""" - return f"@{self.name}@{self.key}" - - def __repr__(self) -> str: - """Returns a detailed string representation.""" - return f"AgentSessionId(name='{self.name}', key='{self.key}')" - - @staticmethod - def parse(session_id_string: str, agent_name: str | None = None) -> AgentSessionId: - """Parses a string representation of an agent session ID. - - Args: - session_id_string: A string in the form @name@key, or a plain key string - when agent_name is provided. - agent_name: Optional agent name to use instead of parsing from the string. - If provided, only the key portion is extracted from session_id_string - (for @name@key format) or the entire string is used as the key - (for plain strings). - - Returns: - AgentSessionId instance - - Raises: - ValueError: If the string format is invalid and agent_name is not provided - """ - # Check if string is in @name@key format - if session_id_string.startswith("@") and "@" in session_id_string[1:]: - parts = session_id_string[1:].split("@", 1) - name = agent_name if agent_name is not None else parts[0] - return AgentSessionId(name=name, key=parts[1]) - - # Plain string format - only valid when agent_name is provided - if agent_name is not None: - return AgentSessionId(name=agent_name, key=session_id_string) - - raise ValueError(f"Invalid agent session ID format: {session_id_string}") - - -class DurableAgentThread(AgentThread): - """Durable agent thread that tracks the owning :class:`AgentSessionId`.""" - - _SERIALIZED_SESSION_ID_KEY = "durable_session_id" - - def __init__( - self, - *, - session_id: AgentSessionId | None = None, - service_thread_id: str | None = None, - message_store: Any = None, - context_provider: Any = None, - ) -> None: - super().__init__( - service_thread_id=service_thread_id, - message_store=message_store, - context_provider=context_provider, - ) - self._session_id: AgentSessionId | None = session_id - - @property - def session_id(self) -> AgentSessionId | None: - """Returns the durable agent session identifier for this thread.""" - return self._session_id - - def attach_session(self, session_id: AgentSessionId) -> None: - """Associates the thread with the provided :class:`AgentSessionId`.""" - self._session_id = session_id - - @classmethod - def from_session_id( - cls, - session_id: AgentSessionId, - *, - service_thread_id: str | None = None, - message_store: Any = None, - context_provider: Any = None, - ) -> DurableAgentThread: - """Creates a durable thread pre-associated with the supplied session ID.""" - return cls( - session_id=session_id, - service_thread_id=service_thread_id, - message_store=message_store, - context_provider=context_provider, - ) - - async def serialize(self, **kwargs: Any) -> dict[str, Any]: - """Serializes thread state including the durable session identifier.""" - state = await super().serialize(**kwargs) - if self._session_id is not None: - state[self._SERIALIZED_SESSION_ID_KEY] = str(self._session_id) - return state - - @classmethod - async def deserialize( - cls, - serialized_thread_state: MutableMapping[str, Any], - *, - message_store: Any = None, - **kwargs: Any, - ) -> DurableAgentThread: - """Restores a durable thread, rehydrating the stored session identifier.""" - state_payload = dict(serialized_thread_state) - session_id_value = state_payload.pop(cls._SERIALIZED_SESSION_ID_KEY, None) - thread = await super().deserialize( - state_payload, - message_store=message_store, - **kwargs, - ) - if not isinstance(thread, DurableAgentThread): - raise TypeError("Deserialized thread is not a DurableAgentThread instance") - - if session_id_value is None: - return thread - - if not isinstance(session_id_value, str): - raise ValueError("durable_session_id must be a string when present in serialized state") - - thread.attach_session(AgentSessionId.parse(session_id_value)) - return thread - - def serialize_response_format(response_format: type[BaseModel] | None) -> Any: """Serialize response format for transport across durable function boundaries.""" if response_format is None: @@ -292,43 +104,48 @@ class RunRequest: role: The role of the message sender (user, system, or assistant) response_format: Optional Pydantic BaseModel type describing the structured response format enable_tool_calls: Whether to enable tool calls for this request - thread_id: Optional thread ID for tracking - correlation_id: Optional correlation ID for tracking the response to this specific request + wait_for_response: If True (default), caller will wait for agent response. If False, + returns immediately after signaling (fire-and-forget mode) + correlation_id: Correlation ID for tracking the response to this specific request created_at: Optional timestamp when the request was created orchestration_id: Optional ID of the orchestration that initiated this request + options: Optional options dictionary forwarded to the agent """ message: str request_response_format: str + correlation_id: str role: Role = Role.USER response_format: type[BaseModel] | None = None enable_tool_calls: bool = True - thread_id: str | None = None - correlation_id: str | None = None - created_at: str | None = None + wait_for_response: bool = True + created_at: datetime | None = None orchestration_id: str | None = None + options: dict[str, Any] = field(default_factory=lambda: {}) def __init__( self, message: str, + correlation_id: str, request_response_format: str = REQUEST_RESPONSE_FORMAT_TEXT, role: Role | str | None = Role.USER, response_format: type[BaseModel] | None = None, enable_tool_calls: bool = True, - thread_id: str | None = None, - correlation_id: str | None = None, - created_at: str | None = None, + wait_for_response: bool = True, + created_at: datetime | None = None, orchestration_id: str | None = None, + options: dict[str, Any] | None = None, ) -> None: self.message = message + self.correlation_id = correlation_id self.role = self.coerce_role(role) self.response_format = response_format self.request_response_format = request_response_format self.enable_tool_calls = enable_tool_calls - self.thread_id = thread_id - self.correlation_id = correlation_id - self.created_at = created_at + self.wait_for_response = wait_for_response + self.created_at = created_at if created_at is not None else datetime.now(tz=timezone.utc) self.orchestration_id = orchestration_id + self.options = options if options is not None else {} @staticmethod def coerce_role(value: Role | str | None) -> Role: @@ -347,33 +164,177 @@ def to_dict(self) -> dict[str, Any]: result = { "message": self.message, "enable_tool_calls": self.enable_tool_calls, + "wait_for_response": self.wait_for_response, "role": self.role.value, "request_response_format": self.request_response_format, + "correlationId": self.correlation_id, + "options": self.options, } if self.response_format: result["response_format"] = serialize_response_format(self.response_format) - if self.thread_id: - result["thread_id"] = self.thread_id - if self.correlation_id: - result["correlationId"] = self.correlation_id if self.created_at: - result["created_at"] = self.created_at + result["created_at"] = self.created_at.isoformat() if self.orchestration_id: result["orchestrationId"] = self.orchestration_id - return result + @classmethod + def from_json(cls, data: str) -> RunRequest: + """Create RunRequest from JSON string.""" + try: + dict_data = json.loads(data) + except json.JSONDecodeError as e: + raise ValueError("The durable agent state is not valid JSON.") from e + + return cls.from_dict(dict_data) + @classmethod def from_dict(cls, data: dict[str, Any]) -> RunRequest: """Create RunRequest from dictionary.""" + created_at = data.get("created_at") + if isinstance(created_at, str): + try: + created_at = datetime.fromisoformat(created_at) + except ValueError: + created_at = None + + correlation_id = data.get("correlationId") + if not correlation_id: + raise ValueError("correlationId is required in RunRequest data") + + options = data.get("options") + return cls( message=data.get("message", ""), + correlation_id=correlation_id, request_response_format=data.get("request_response_format", REQUEST_RESPONSE_FORMAT_TEXT), role=cls.coerce_role(data.get("role")), response_format=_deserialize_response_format(data.get("response_format")), + wait_for_response=data.get("wait_for_response", True), enable_tool_calls=data.get("enable_tool_calls", True), - thread_id=data.get("thread_id"), - correlation_id=data.get("correlationId"), - created_at=data.get("created_at"), + created_at=created_at, orchestration_id=data.get("orchestrationId"), + options=cast(dict[str, Any], options) if isinstance(options, dict) else {}, + ) + + +@dataclass +class AgentSessionId: + """Represents an agent session identifier (name + key).""" + + name: str + key: str + + ENTITY_NAME_PREFIX: str = "dafx-" + + @staticmethod + def to_entity_name(name: str) -> str: + return f"{AgentSessionId.ENTITY_NAME_PREFIX}{name}" + + @staticmethod + def with_random_key(name: str) -> AgentSessionId: + return AgentSessionId(name=name, key=uuid.uuid4().hex) + + @property + def entity_name(self) -> str: + return self.to_entity_name(self.name) + + def __str__(self) -> str: + return f"@{self.name}@{self.key}" + + def __repr__(self) -> str: + return f"AgentSessionId(name='{self.name}', key='{self.key}')" + + @staticmethod + def parse(session_id_string: str, agent_name: str | None = None) -> AgentSessionId: + """Parses a string representation of an agent session ID. + + Args: + session_id_string: A string in the form @name@key, or a plain key string + when agent_name is provided. + agent_name: Optional agent name to use instead of parsing from the string. + If provided, only the key portion is extracted from session_id_string + (for @name@key format) or the entire string is used as the key + (for plain strings). + + Returns: + AgentSessionId instance + + Raises: + ValueError: If the string format is invalid and agent_name is not provided + """ + # Check if string is in @name@key format + if session_id_string.startswith("@") and "@" in session_id_string[1:]: + parts = session_id_string[1:].split("@", 1) + name = agent_name if agent_name is not None else parts[0] + return AgentSessionId(name=name, key=parts[1]) + + # Plain string format - only valid when agent_name is provided + if agent_name is not None: + return AgentSessionId(name=agent_name, key=session_id_string) + + raise ValueError(f"Invalid agent session ID format: {session_id_string}") + + +class DurableAgentThread(AgentThread): + """Durable agent thread that tracks the owning :class:`AgentSessionId`.""" + + _SERIALIZED_SESSION_ID_KEY = "durable_session_id" + + def __init__( + self, + *, + session_id: AgentSessionId | None = None, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + self._session_id: AgentSessionId | None = session_id + + @property + def session_id(self) -> AgentSessionId | None: + return self._session_id + + @session_id.setter + def session_id(self, value: AgentSessionId | None) -> None: + self._session_id = value + + @classmethod + def from_session_id( + cls, + session_id: AgentSessionId, + **kwargs: Any, + ) -> DurableAgentThread: + return cls(session_id=session_id, **kwargs) + + async def serialize(self, **kwargs: Any) -> dict[str, Any]: + state = await super().serialize(**kwargs) + if self._session_id is not None: + state[self._SERIALIZED_SESSION_ID_KEY] = str(self._session_id) + return state + + @classmethod + async def deserialize( + cls, + serialized_thread_state: MutableMapping[str, Any], + *, + message_store: Any = None, + **kwargs: Any, + ) -> DurableAgentThread: + state_payload = dict(serialized_thread_state) + session_id_value = state_payload.pop(cls._SERIALIZED_SESSION_ID_KEY, None) + thread = await super().deserialize( + state_payload, + message_store=message_store, + **kwargs, ) + if not isinstance(thread, DurableAgentThread): + raise TypeError("Deserialized thread is not a DurableAgentThread instance") + + if session_id_value is None: + return thread + + if not isinstance(session_id_value, str): + raise ValueError("durable_session_id must be a string when present in serialized state") + + thread.session_id = AgentSessionId.parse(session_id_value) + return thread diff --git a/python/packages/durabletask/agent_framework_durabletask/_orchestration_context.py b/python/packages/durabletask/agent_framework_durabletask/_orchestration_context.py new file mode 100644 index 0000000000..2dd78efe1c --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/_orchestration_context.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Orchestration context wrapper for Durable Task Agent Framework. + +This module provides the DurableAIAgentOrchestrationContext class for use inside +orchestration functions to interact with durable agents. +""" + +from __future__ import annotations + +from agent_framework import get_logger +from durabletask.task import OrchestrationContext + +from ._executors import DurableAgentTask, OrchestrationAgentExecutor +from ._shim import DurableAgentProvider, DurableAIAgent + +logger = get_logger("agent_framework.durabletask.orchestration_context") + + +class DurableAIAgentOrchestrationContext(DurableAgentProvider[DurableAgentTask]): + """Orchestration context wrapper for interacting with durable agents internally. + + This class wraps a durabletask OrchestrationContext and provides a convenient + interface for retrieving and executing durable agents from within orchestration + functions. + + Example: + ```python + from durabletask import Orchestration + from agent_framework.azure import DurableAIAgentOrchestrationContext + + + def my_orchestration(context: OrchestrationContext): + # Wrap the context + agent_context = DurableAIAgentOrchestrationContext(context) + + # Get an agent reference + agent = agent_context.get_agent("assistant") + + # Run the agent (returns a Task to be yielded) + result = yield agent.run("Hello, how are you?") + + return result.text + ``` + """ + + def __init__(self, context: OrchestrationContext): + """Initialize the orchestration context wrapper. + + Args: + context: The durabletask orchestration context to wrap + """ + self._context = context + self._executor = OrchestrationAgentExecutor(self._context) + logger.debug("[DurableAIAgentOrchestrationContext] Initialized") + + def get_agent(self, agent_name: str) -> DurableAIAgent[DurableAgentTask]: + """Retrieve a DurableAIAgent shim for the specified agent. + + This method returns a proxy object that can be used to execute the agent + within an orchestration. The agent's run() method will return a Task that + must be yielded. + + Args: + agent_name: Name of the agent to retrieve (without the dafx- prefix) + + Returns: + DurableAIAgent instance that can be used to run the agent + + Note: + Validation is deferred to execution time. The entity must be registered + on a worker with the name f"dafx-{agent_name}". + """ + logger.debug("[DurableAIAgentOrchestrationContext] Creating agent proxy for: %s", agent_name) + return DurableAIAgent(self._executor, agent_name) diff --git a/python/packages/durabletask/agent_framework_durabletask/_response_utils.py b/python/packages/durabletask/agent_framework_durabletask/_response_utils.py new file mode 100644 index 0000000000..fd622d9b35 --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/_response_utils.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Shared utilities for handling AgentResponse parsing and validation.""" + +from typing import Any + +from agent_framework import AgentResponse, get_logger +from pydantic import BaseModel + +logger = get_logger("agent_framework.durabletask.response_utils") + + +def load_agent_response(agent_response: AgentResponse | dict[str, Any] | None) -> AgentResponse: + """Convert raw payloads into AgentResponse instance. + + Args: + agent_response: The response to convert, can be an AgentResponse, dict, or None + + Returns: + AgentResponse: The converted response object + + Raises: + ValueError: If agent_response is None + TypeError: If agent_response is an unsupported type + """ + if agent_response is None: + raise ValueError("agent_response cannot be None") + + logger.debug("[load_agent_response] Loading agent response of type: %s", type(agent_response)) + + if isinstance(agent_response, AgentResponse): + return agent_response + if isinstance(agent_response, dict): + logger.debug("[load_agent_response] Converting dict payload using AgentResponse.from_dict") + return AgentResponse.from_dict(agent_response) + + raise TypeError(f"Unsupported type for agent_response: {type(agent_response)}") + + +def ensure_response_format( + response_format: type[BaseModel] | None, + correlation_id: str, + response: AgentResponse, +) -> None: + """Ensure the AgentResponse value is parsed into the expected response_format. + + This function modifies the response in-place by parsing its value attribute + into the specified Pydantic model format. + + Args: + response_format: Optional Pydantic model class to parse the response value into + correlation_id: Correlation ID for logging purposes + response: The AgentResponse object to validate and parse + + Raises: + ValueError: If response_format is specified but response.value cannot be parsed + """ + if response_format is not None and not isinstance(response.value, response_format): + response.try_parse_value(response_format) + + # Validate that parsing succeeded + if not isinstance(response.value, response_format): + raise ValueError( + f"Response value could not be parsed into required format {response_format.__name__} " + f"for correlation_id {correlation_id}" + ) + + logger.debug( + "[ensure_response_format] Loaded AgentResponse.value for correlation_id %s with type: %s", + correlation_id, + type(response.value).__name__, + ) diff --git a/python/packages/durabletask/agent_framework_durabletask/_shim.py b/python/packages/durabletask/agent_framework_durabletask/_shim.py new file mode 100644 index 0000000000..a624cdc8b5 --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/_shim.py @@ -0,0 +1,177 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Durable Agent Shim for Durable Task Framework. + +This module provides the DurableAIAgent shim that implements AgentProtocol +and provides a consistent interface for both Client and Orchestration contexts. +The actual execution is delegated to the context-specific providers. +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator +from typing import Any, Generic, TypeVar + +from agent_framework import AgentProtocol, AgentResponseUpdate, AgentThread, ChatMessage + +from ._executors import DurableAgentExecutor +from ._models import DurableAgentThread + +# TypeVar for the task type returned by executors +# Covariant because TaskT only appears in return positions (output) +TaskT = TypeVar("TaskT", covariant=True) + + +class DurableAgentProvider(ABC, Generic[TaskT]): + """Abstract provider for constructing durable agent proxies. + + Implemented by context-specific wrappers (client/orchestration) to return a + `DurableAIAgent` shim backed by their respective `DurableAgentExecutor` + implementation, ensuring a consistent `get_agent` entry point regardless of + execution context. + """ + + @abstractmethod + def get_agent(self, agent_name: str) -> DurableAIAgent[TaskT]: + """Retrieve a DurableAIAgent shim for the specified agent. + + Args: + agent_name: Name of the agent to retrieve + + Returns: + DurableAIAgent instance that can be used to run the agent + + Raises: + NotImplementedError: Must be implemented by subclasses + """ + raise NotImplementedError("Subclasses must implement get_agent()") + + +class DurableAIAgent(AgentProtocol, Generic[TaskT]): + """A durable agent proxy that delegates execution to the provider. + + This class implements AgentProtocol but with one critical difference: + - AgentProtocol.run() returns a Coroutine (async, must await) + - DurableAIAgent.run() returns TaskT (sync Task object - must yield + or the AgentResponse directly in the case of TaskHubGrpcClient) + + This represents fundamentally different execution models but maintains the same + interface contract for all other properties and methods. + + The underlying provider determines how execution occurs (entity calls, HTTP requests, etc.) + and what type of Task object is returned. + + Type Parameters: + TaskT: The task type returned by this agent (e.g., AgentResponse, DurableAgentTask, AgentTask) + """ + + id: str + name: str + display_name: str + description: str | None + + def __init__(self, executor: DurableAgentExecutor[TaskT], name: str, *, agent_id: str | None = None): + """Initialize the shim with a provider and agent name. + + Args: + executor: The execution provider (Client or OrchestrationContext) + name: The name of the agent to execute + agent_id: Optional unique identifier for the agent (defaults to name) + """ + self._executor = executor + self.name = name # pyright: ignore[reportIncompatibleVariableOverride] + self.id = agent_id if agent_id is not None else name + self.display_name = name + self.description = f"Durable agent proxy for {name}" + + def run( # type: ignore[override] + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + options: dict[str, Any] | None = None, + ) -> TaskT: + """Execute the agent via the injected provider. + + Args: + messages: The message(s) to send to the agent + thread: Optional agent thread for conversation context + options: Optional options dictionary. Supported keys include + ``response_format``, ``enable_tool_calls``, and ``wait_for_response``. + Additional keys are forwarded to the agent execution. + + Note: + This method overrides AgentProtocol.run() with a different return type: + - AgentProtocol.run() returns Coroutine[Any, Any, AgentResponse] (async) + - DurableAIAgent.run() returns TaskT (Task object for yielding) + + This is intentional to support orchestration contexts that use yield patterns + instead of async/await patterns. + + Returns: + TaskT: The task type specific to the executor + + Raises: + ValueError: If wait_for_response=False is used in an unsupported context + """ + message_str = self._normalize_messages(messages) + + run_request = self._executor.get_run_request( + message=message_str, + options=options, + ) + + return self._executor.run_durable_agent( + agent_name=self.name, + run_request=run_request, + thread=thread, + ) + + def run_stream( # type: ignore[override] + self, + messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, + *, + thread: AgentThread | None = None, + **kwargs: Any, + ) -> AsyncIterator[AgentResponseUpdate]: + """Run the agent with streaming (not supported for durable agents). + + Args: + messages: The message(s) to send to the agent + thread: Optional agent thread for conversation context + **kwargs: Additional arguments + + Raises: + NotImplementedError: Streaming is not supported for durable agents + """ + raise NotImplementedError("Streaming is not supported for durable agents") + + def get_new_thread(self, **kwargs: Any) -> DurableAgentThread: + """Create a new agent thread via the provider.""" + return self._executor.get_new_thread(self.name, **kwargs) + + def _normalize_messages(self, messages: str | ChatMessage | list[str] | list[ChatMessage] | None) -> str: + """Convert supported message inputs to a single string. + + Args: + messages: The messages to normalize + + Returns: + A single string representation of the messages + """ + if messages is None: + return "" + if isinstance(messages, str): + return messages + if isinstance(messages, ChatMessage): + return messages.text or "" + if isinstance(messages, list): + if not messages: + return "" + first_item = messages[0] + if isinstance(first_item, str): + return "\n".join(messages) # type: ignore[arg-type] + # List of ChatMessage + return "\n".join([msg.text or "" for msg in messages]) # type: ignore[union-attr] + return "" diff --git a/python/packages/durabletask/agent_framework_durabletask/_worker.py b/python/packages/durabletask/agent_framework_durabletask/_worker.py new file mode 100644 index 0000000000..f812a4a148 --- /dev/null +++ b/python/packages/durabletask/agent_framework_durabletask/_worker.py @@ -0,0 +1,200 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Worker wrapper for Durable Task Agent Framework. + +This module provides the DurableAIAgentWorker class that wraps a durabletask worker +and enables registration of agents as durable entities. +""" + +from __future__ import annotations + +import asyncio +from typing import Any + +from agent_framework import AgentProtocol, get_logger +from durabletask.worker import TaskHubGrpcWorker + +from ._callbacks import AgentResponseCallbackProtocol +from ._entities import AgentEntity, DurableTaskEntityStateProvider + +logger = get_logger("agent_framework.durabletask.worker") + + +class DurableAIAgentWorker: + """Wrapper for durabletask worker that enables agent registration. + + This class wraps an existing TaskHubGrpcWorker instance and provides + a convenient interface for registering agents as durable entities. + + Example: + ```python + from durabletask import TaskHubGrpcWorker + from agent_framework import ChatAgent + from agent_framework.azure import DurableAIAgentWorker + + # Create the underlying worker + worker = TaskHubGrpcWorker(host_address="localhost:4001") + + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker) + + # Register agents + my_agent = ChatAgent(chat_client=client, name="assistant") + agent_worker.add_agent(my_agent) + + # Start the worker + worker.start() + ``` + """ + + def __init__( + self, + worker: TaskHubGrpcWorker, + callback: AgentResponseCallbackProtocol | None = None, + ): + """Initialize the worker wrapper. + + Args: + worker: The durabletask worker instance to wrap + callback: Optional callback for agent response notifications + """ + self._worker = worker + self._callback = callback + self._registered_agents: dict[str, AgentProtocol] = {} + logger.debug("[DurableAIAgentWorker] Initialized with worker type: %s", type(worker).__name__) + + def add_agent( + self, + agent: AgentProtocol, + callback: AgentResponseCallbackProtocol | None = None, + ) -> None: + """Register an agent with the worker. + + This method creates a durable entity class for the agent and registers + it with the underlying durabletask worker. The entity will be accessible + by the name "dafx-{agent_name}". + + Args: + agent: The agent to register (must have a name) + callback: Optional callback for this specific agent (overrides worker-level callback) + + Raises: + ValueError: If the agent doesn't have a name or is already registered + """ + agent_name = agent.name + if not agent_name: + raise ValueError("Agent must have a name to be registered") + + if agent_name in self._registered_agents: + raise ValueError(f"Agent '{agent_name}' is already registered") + + logger.info("[DurableAIAgentWorker] Registering agent: %s as entity: dafx-%s", agent_name, agent_name) + + # Store the agent reference + self._registered_agents[agent_name] = agent + + # Use agent-specific callback if provided, otherwise use worker-level callback + effective_callback = callback or self._callback + + # Create a configured entity class using the factory + entity_class = self.__create_agent_entity(agent, effective_callback) + + # Register the entity class with the worker + # The worker.add_entity method takes a class + entity_registered: str = self._worker.add_entity(entity_class) # pyright: ignore[reportUnknownMemberType] + + logger.debug( + "[DurableAIAgentWorker] Successfully registered entity class %s for agent: %s", + entity_registered, + agent_name, + ) + + def start(self) -> None: + """Start the worker to begin processing tasks. + + Note: + This method delegates to the underlying worker's start method. + The worker will block until stopped. + """ + logger.info("[DurableAIAgentWorker] Starting worker with %d registered agents", len(self._registered_agents)) + self._worker.start() # type: ignore[no-untyped-call] + + def stop(self) -> None: + """Stop the worker gracefully. + + Note: + This method delegates to the underlying worker's stop method. + """ + logger.info("[DurableAIAgentWorker] Stopping worker") + self._worker.stop() # type: ignore[no-untyped-call] + + @property + def registered_agent_names(self) -> list[str]: + """Get the names of all registered agents. + + Returns: + List of agent names (without the dafx- prefix) + """ + return list(self._registered_agents.keys()) + + def __create_agent_entity( + self, + agent: AgentProtocol, + callback: AgentResponseCallbackProtocol | None = None, + ) -> type[DurableTaskEntityStateProvider]: + """Factory function to create a DurableEntity class configured with an agent. + + This factory creates a new class that combines the entity state provider + with the agent execution logic. Each agent gets its own entity class. + + Args: + agent: The agent instance to wrap + callback: Optional callback for agent responses + + Returns: + A new DurableEntity subclass configured for this agent + """ + agent_name = agent.name or type(agent).__name__ + entity_name = f"dafx-{agent_name}" + + class ConfiguredAgentEntity(DurableTaskEntityStateProvider): + """Durable entity configured with a specific agent instance.""" + + def __init__(self) -> None: + super().__init__() + # Create the AgentEntity with this state provider + self._agent_entity = AgentEntity( + agent=agent, + callback=callback, + state_provider=self, + ) + logger.debug( + "[ConfiguredAgentEntity] Initialized entity for agent: %s (entity name: %s)", + agent_name, + entity_name, + ) + + def run(self, request: Any) -> Any: + """Handle run requests from clients or orchestrations. + + Args: + request: RunRequest as dict or string + + Returns: + AgentResponse as dict + """ + logger.debug("[ConfiguredAgentEntity.run] Executing agent: %s", agent_name) + response = asyncio.run(self._agent_entity.run(request)) + return response.to_dict() + + def reset(self) -> None: + """Reset the agent's conversation history.""" + logger.debug("[ConfiguredAgentEntity.reset] Resetting agent: %s", agent_name) + self._agent_entity.reset() + + # Set the entity name to match the prefixed agent name + # This is used by durabletask to register the entity + ConfiguredAgentEntity.__name__ = entity_name + ConfiguredAgentEntity.__qualname__ = entity_name + + return ConfiguredAgentEntity diff --git a/python/packages/durabletask/agent_framework_durabletask/py.typed b/python/packages/durabletask/agent_framework_durabletask/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/packages/durabletask/pyproject.toml b/python/packages/durabletask/pyproject.toml new file mode 100644 index 0000000000..aa21b6f05d --- /dev/null +++ b/python/packages/durabletask/pyproject.toml @@ -0,0 +1,101 @@ +[project] +name = "agent-framework-durabletask" +description = "Durable Task integration for Microsoft Agent Framework." +authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] +readme = "README.md" +requires-python = ">=3.10" +version = "0.0.2b260126" +license-files = ["LICENSE"] +urls.homepage = "https://aka.ms/agent-framework" +urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" +urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true" +urls.issues = "https://github.com/microsoft/agent-framework/issues" +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Typing :: Typed", +] +dependencies = [ + "agent-framework-core", + "durabletask>=1.3.0", + "durabletask-azuremanaged>=1.3.0" +] + +[dependency-groups] +dev = [ + "types-python-dateutil>=2.9.0", +] + +[tool.uv] +prerelease = "if-necessary-or-explicit" +environments = [ + "sys_platform == 'darwin'", + "sys_platform == 'linux'", + "sys_platform == 'win32'" +] + +[tool.uv-dynamic-versioning] +fallback-version = "0.0.0" +[tool.pytest.ini_options] +testpaths = 'tests' +addopts = "-ra -q -r fEX" +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = [ + "ignore:Support for class-based `config` is deprecated:DeprecationWarning:pydantic.*" +] +timeout = 120 +markers = [ + "integration: marks tests as integration tests", + "integration_test: marks tests as integration tests (alternative marker)", + "sample: marks tests as sample tests", + "requires_azure_openai: marks tests that require Azure OpenAI", + "requires_dts: marks tests that require Durable Task Scheduler", + "requires_redis: marks tests that require Redis" +] + +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.coverage.run] +omit = [ + "**/__init__.py" +] + +[tool.pyright] +extends = "../../pyproject.toml" + +[tool.mypy] +plugins = ['pydantic.mypy'] +strict = true +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true +no_implicit_optional = true +check_untyped_defs = true +warn_return_any = true +show_error_codes = true +warn_unused_ignores = false +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +[tool.bandit] +targets = ["agent_framework_durabletask"] +exclude_dirs = ["tests"] + +[tool.poe] +executor.type = "uv" +include = "../../shared_tasks.toml" +[tool.poe.tasks] +mypy = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_durabletask" +test = "pytest --cov=agent_framework_durabletask --cov-report=term-missing:skip-covered tests" + +[build-system] +requires = ["flit-core >= 3.11,<4.0"] +build-backend = "flit_core.buildapi" \ No newline at end of file diff --git a/python/packages/durabletask/tests/integration_tests/.env.example b/python/packages/durabletask/tests/integration_tests/.env.example new file mode 100644 index 0000000000..a36cf771f8 --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/.env.example @@ -0,0 +1,17 @@ +# Azure OpenAI Configuration +AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=your-deployment-name +# Optional: Use Azure CLI authentication if not provided +# AZURE_OPENAI_API_KEY=your-api-key + +# Durable Task Scheduler Configuration +ENDPOINT=http://localhost:8080 +TASKHUB=default + +# Redis Configuration (for streaming tests) +REDIS_CONNECTION_STRING=redis://localhost:6379 +REDIS_STREAM_TTL_MINUTES=10 + +# Integration Test Control +# Set to 'true' to enable integration tests +RUN_INTEGRATION_TESTS=true diff --git a/python/packages/durabletask/tests/integration_tests/README.md b/python/packages/durabletask/tests/integration_tests/README.md new file mode 100644 index 0000000000..59da266460 --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/README.md @@ -0,0 +1,111 @@ +# Sample Integration Tests + +Integration tests that validate the Durable Agent Framework samples by running them against a Durable Task Scheduler (DTS) instance. + +## Setup + +### 1. Create `.env` file + +Copy `.env.example` to `.env` and fill in your Azure credentials: + +```bash +cp .env.example .env +``` + +Required variables: +- `AZURE_OPENAI_ENDPOINT` +- `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` +- `AZURE_OPENAI_API_KEY` (optional if using Azure CLI authentication) +- `RUN_INTEGRATION_TESTS` (set to `true`) +- `ENDPOINT` (default: http://localhost:8080) +- `TASKHUB` (default: default) + +Optional variables (for streaming tests): +- `REDIS_CONNECTION_STRING` (default: redis://localhost:6379) +- `REDIS_STREAM_TTL_MINUTES` (default: 10) + +### 2. Start required services + +**Durable Task Scheduler:** +```bash +docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 mcr.microsoft.com/dts/dts-emulator:latest +``` +- Port 8080: gRPC endpoint (used by tests) +- Port 8082: Web dashboard (optional, for monitoring) + +**Redis (for streaming tests):** +```bash +docker run -d --name redis -p 6379:6379 redis:latest +``` +- Port 6379: Redis server endpoint + +## Running Tests + +The tests automatically start and stop worker processes for each sample. + +### Run all sample tests +```bash +uv run pytest packages/durabletask/tests/integration_tests -v +``` + +### Run specific sample +```bash +uv run pytest packages/durabletask/tests/integration_tests/test_01_single_agent.py -v +``` + +### Run with verbose output +```bash +uv run pytest packages/durabletask/tests/integration_tests -sv +``` + +## How It Works + +Each test file uses pytest markers to automatically configure and start the worker process: + +```python +pytestmark = [ + pytest.mark.sample("03_single_agent_streaming"), + pytest.mark.integration_test, + pytest.mark.requires_azure_openai, + pytest.mark.requires_dts, + pytest.mark.requires_redis, +] +``` + +## Troubleshooting + +**Tests are skipped:** +Ensure `RUN_INTEGRATION_TESTS=true` is set in your `.env` file. + +**DTS connection failed:** +Check that the DTS emulator container is running: `docker ps | grep dts-emulator` + +**Redis connection failed:** +Check that Redis is running: `docker ps | grep redis` + +**Missing environment variables:** +Ensure your `.env` file contains all required variables from `.env.example`. + +**Tests timeout:** +Check that Azure OpenAI credentials are valid and the service is accessible. + +If you see "DTS emulator is not available": +- Ensure Docker container is running: `docker ps | grep dts-emulator` +- Check port 8080 is not in use by another process +- Restart the container if needed + +### Azure OpenAI Errors + +If you see authentication or deployment errors: +- Verify your `AZURE_OPENAI_ENDPOINT` is correct +- Confirm `AZURE_OPENAI_CHAT_DEPLOYMENT_NAME` matches your deployment +- If using API key, check `AZURE_OPENAI_API_KEY` is valid +- If using Azure CLI, ensure you're logged in: `az login` + +## CI/CD + +For automated testing in CI/CD pipelines: + +1. Use Docker Compose to start DTS emulator +2. Set environment variables via CI/CD secrets +3. Run tests with appropriate markers: `pytest -m integration_test` diff --git a/python/packages/durabletask/tests/integration_tests/conftest.py b/python/packages/durabletask/tests/integration_tests/conftest.py new file mode 100644 index 0000000000..2cd045f291 --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/conftest.py @@ -0,0 +1,234 @@ +# Copyright (c) Microsoft. All rights reserved. +"""Pytest configuration and fixtures for durabletask integration tests.""" + +import asyncio +import logging +import os +import subprocess +import sys +import time +import uuid +from collections.abc import Generator +from pathlib import Path +from typing import Any, cast + +import pytest +import redis.asyncio as aioredis +from dotenv import load_dotenv +from durabletask.azuremanaged.client import DurableTaskSchedulerClient + +# Add the integration_tests directory to the path so testutils can be imported +sys.path.insert(0, str(Path(__file__).parent)) + +# Load environment variables from .env file +load_dotenv(Path(__file__).parent / ".env") + +# Configure logging to reduce noise during tests +logging.basicConfig(level=logging.WARNING) + + +def _get_dts_endpoint() -> str: + """Get the DTS endpoint from environment or use default.""" + return os.getenv("ENDPOINT", "http://localhost:8080") + + +def _check_dts_available(endpoint: str | None = None) -> bool: + """Check if DTS emulator is available at the given endpoint.""" + try: + resolved_endpoint: str = _get_dts_endpoint() if endpoint is None else endpoint + DurableTaskSchedulerClient( + host_address=resolved_endpoint, + secure_channel=False, + taskhub="test", + token_credential=None, + ) + return True + except Exception: + return False + + +def _check_redis_available() -> bool: + """Check if Redis is available at the default connection string.""" + try: + + async def test_connection() -> bool: + redis_url = os.getenv("REDIS_CONNECTION_STRING", "redis://localhost:6379") + try: + client = aioredis.from_url(redis_url, socket_timeout=2) # type: ignore[reportUnknownMemberType] + await client.ping() # type: ignore[reportUnknownMemberType] + await client.aclose() # type: ignore[reportUnknownMemberType] + return True + except Exception: + return False + + return asyncio.run(test_connection()) + except Exception: + return False + + +def pytest_configure(config: pytest.Config) -> None: + """Register custom markers.""" + config.addinivalue_line("markers", "integration_test: mark test as integration test") + config.addinivalue_line("markers", "requires_dts: mark test as requiring DTS emulator") + config.addinivalue_line("markers", "requires_azure_openai: mark test as requiring Azure OpenAI") + config.addinivalue_line("markers", "requires_redis: mark test as requiring Redis") + config.addinivalue_line( + "markers", + "sample(path): specify the sample directory name for the test (e.g., @pytest.mark.sample('01_single_agent'))", + ) + + +def pytest_collection_modifyitems(config: pytest.Config, items: list[pytest.Item]) -> None: + """Skip tests based on markers and environment availability.""" + run_integration = os.getenv("RUN_INTEGRATION_TESTS", "false").lower() == "true" + skip_integration = pytest.mark.skip(reason="RUN_INTEGRATION_TESTS not set to 'true'") + + # Check Azure OpenAI environment variables + azure_openai_vars = ["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] + azure_openai_available = all(os.getenv(var) for var in azure_openai_vars) + skip_azure_openai = pytest.mark.skip( + reason=f"Missing required environment variables: {', '.join(azure_openai_vars)}" + ) + + # Check DTS availability + dts_available = _check_dts_available() + skip_dts = pytest.mark.skip(reason=f"DTS emulator is not available at {_get_dts_endpoint()}") + + # Check Redis availability + redis_available = _check_redis_available() + skip_redis = pytest.mark.skip(reason="Redis is not available at redis://localhost:6379") + + for item in items: + if "integration_test" in item.keywords and not run_integration: + item.add_marker(skip_integration) + if "requires_azure_openai" in item.keywords and not azure_openai_available: + item.add_marker(skip_azure_openai) + if "requires_dts" in item.keywords and not dts_available: + item.add_marker(skip_dts) + if "requires_redis" in item.keywords and not redis_available: + item.add_marker(skip_redis) + + +@pytest.fixture(scope="session") +def dts_endpoint() -> str: + """Get the DTS endpoint from environment or use default.""" + return _get_dts_endpoint() + + +@pytest.fixture(scope="session") +def dts_available(dts_endpoint: str) -> bool: + """Check if DTS emulator is available and responding.""" + if _check_dts_available(dts_endpoint): + return True + pytest.skip(f"DTS emulator is not available at {dts_endpoint}") + return False + + +@pytest.fixture(scope="session") +def check_azure_openai_env() -> None: + """Verify Azure OpenAI environment variables are set.""" + required_vars = ["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"] + missing = [var for var in required_vars if not os.getenv(var)] + + if missing: + pytest.skip(f"Missing required environment variables: {', '.join(missing)}") + + +@pytest.fixture(scope="module") +def unique_taskhub() -> str: + """Generate a unique task hub name for test isolation.""" + # Use a shorter UUID to avoid naming issues + return f"test-{uuid.uuid4().hex[:8]}" + + +@pytest.fixture(scope="module") +def worker_process( + dts_available: bool, + check_azure_openai_env: None, + dts_endpoint: str, + unique_taskhub: str, + request: pytest.FixtureRequest, +) -> Generator[dict[str, Any], None, None]: + """ + Start a worker process for the current test module by running the sample worker.py. + + This fixture: + 1. Determines which sample to run from @pytest.mark.sample() + 2. Starts the sample's worker.py as a subprocess + 3. Waits for the worker to be ready + 4. Tears down the worker after tests complete + + Usage: + @pytest.mark.sample("01_single_agent") + class TestSingleAgent: + ... + """ + # Get sample path from marker + sample_marker = request.node.get_closest_marker("sample") # type: ignore[union-attr] + if not sample_marker: + pytest.fail("Test class must have @pytest.mark.sample() marker") + + sample_name: str = cast(str, sample_marker.args[0]) # type: ignore[union-attr] + sample_path: Path = Path(__file__).parents[4] / "samples" / "getting_started" / "durabletask" / sample_name + worker_file: Path = sample_path / "worker.py" + + if not worker_file.exists(): + pytest.fail(f"Sample worker not found: {worker_file}") + + # Set up environment for worker subprocess + env = os.environ.copy() + env["ENDPOINT"] = dts_endpoint + env["TASKHUB"] = unique_taskhub + + # Start worker subprocess + try: + # On Windows, use CREATE_NEW_PROCESS_GROUP to allow proper termination + # shell=True only on Windows to handle PATH resolution + if sys.platform == "win32": + process = subprocess.Popen( + [sys.executable, str(worker_file)], + cwd=str(sample_path), + creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, + shell=True, + env=env, + text=True, + ) + # On Unix, don't use shell=True to avoid shell wrapper issues + else: + process = subprocess.Popen( + [sys.executable, str(worker_file)], + cwd=str(sample_path), + env=env, + text=True, + ) + except Exception as e: + pytest.fail(f"Failed to start worker subprocess: {e}") + + # Wait for worker to initialize + time.sleep(2) + + # Check if process is still running + if process.poll() is not None: + stderr_output = process.stderr.read() if process.stderr else "" + pytest.fail(f"Worker process exited prematurely. stderr: {stderr_output}") + + # Provide worker info to tests + worker_info = { + "process": process, + "endpoint": dts_endpoint, + "taskhub": unique_taskhub, + } + + try: + yield worker_info + finally: + # Cleanup: terminate worker subprocess + try: + process.terminate() + try: + process.wait(timeout=5) + except subprocess.TimeoutExpired: + process.kill() + process.wait() + except Exception as e: + logging.warning(f"Error during worker process cleanup: {e}") diff --git a/python/packages/durabletask/tests/integration_tests/dt_testutils.py b/python/packages/durabletask/tests/integration_tests/dt_testutils.py new file mode 100644 index 0000000000..34696b42ff --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/dt_testutils.py @@ -0,0 +1,205 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Test utilities for durabletask integration tests.""" + +import json +import time +from typing import Any + +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.client import OrchestrationStatus + +from agent_framework_durabletask import DurableAIAgentClient + + +def create_dts_client(endpoint: str, taskhub: str) -> DurableTaskSchedulerClient: + """ + Create a DurableTaskSchedulerClient with common configuration. + + Args: + endpoint: The DTS endpoint address + taskhub: The task hub name + + Returns: + A configured DurableTaskSchedulerClient instance + """ + return DurableTaskSchedulerClient( + host_address=endpoint, + secure_channel=False, + taskhub=taskhub, + token_credential=None, + ) + + +def create_agent_client( + endpoint: str, + taskhub: str, + max_poll_retries: int = 90, +) -> tuple[DurableTaskSchedulerClient, DurableAIAgentClient]: + """ + Create a DurableAIAgentClient with the underlying DTS client. + + Args: + endpoint: The DTS endpoint address + taskhub: The task hub name + max_poll_retries: Max poll retries for the agent client + + Returns: + A tuple of (DurableTaskSchedulerClient, DurableAIAgentClient) + """ + dts_client = create_dts_client(endpoint, taskhub) + agent_client = DurableAIAgentClient(dts_client, max_poll_retries=max_poll_retries) + return dts_client, agent_client + + +class OrchestrationHelper: + """Helper class for orchestration-related test operations.""" + + def __init__(self, dts_client: DurableTaskSchedulerClient): + """ + Initialize the orchestration helper. + + Args: + dts_client: The DurableTaskSchedulerClient instance to use + """ + self.client = dts_client + + def wait_for_orchestration( + self, + instance_id: str, + timeout: float = 60.0, + ) -> Any: + """ + Wait for an orchestration to complete. + + Args: + instance_id: The orchestration instance ID + timeout: Maximum time to wait in seconds + + Returns: + The final OrchestrationMetadata + + Raises: + TimeoutError: If the orchestration doesn't complete within timeout + RuntimeError: If the orchestration fails + """ + # Use the built-in wait_for_orchestration_completion method + metadata = self.client.wait_for_orchestration_completion( + instance_id=instance_id, + timeout=int(timeout), + ) + + if metadata is None: + raise TimeoutError(f"Orchestration {instance_id} did not complete within {timeout} seconds") + + # Check if failed or terminated + if metadata.runtime_status == OrchestrationStatus.FAILED: + raise RuntimeError(f"Orchestration {instance_id} failed: {metadata.serialized_custom_status}") + if metadata.runtime_status == OrchestrationStatus.TERMINATED: + raise RuntimeError(f"Orchestration {instance_id} was terminated") + + return metadata + + def wait_for_orchestration_with_output( + self, + instance_id: str, + timeout: float = 60.0, + ) -> tuple[Any, Any]: + """ + Wait for an orchestration to complete and return its output. + + Args: + instance_id: The orchestration instance ID + timeout: Maximum time to wait in seconds + + Returns: + A tuple of (OrchestrationMetadata, output) + + Raises: + TimeoutError: If the orchestration doesn't complete within timeout + RuntimeError: If the orchestration fails + """ + metadata = self.wait_for_orchestration(instance_id, timeout) + + # The output should be available in the metadata + return metadata, metadata.serialized_output + + def get_orchestration_status(self, instance_id: str) -> Any | None: + """ + Get the current status of an orchestration. + + Args: + instance_id: The orchestration instance ID + + Returns: + The OrchestrationMetadata or None if not found + """ + try: + # Try to wait with a short timeout to get current status + return self.client.wait_for_orchestration_completion( + instance_id=instance_id, + timeout=1, # Very short timeout, just checking status + ) + except Exception: + return None + + def raise_event( + self, + instance_id: str, + event_name: str, + event_data: Any = None, + ) -> None: + """ + Raise an external event to an orchestration. + + Args: + instance_id: The orchestration instance ID + event_name: The name of the event + event_data: The event data payload + """ + self.client.raise_orchestration_event(instance_id, event_name, data=event_data) + + def wait_for_notification(self, instance_id: str, timeout_seconds: int = 30) -> bool: + """Wait for the orchestration to reach a notification point. + + Polls the orchestration status until it appears to be waiting for approval. + + Args: + instance_id: The orchestration instance ID + timeout_seconds: Maximum time to wait + + Returns: + True if notification detected, False if timeout + """ + start_time = time.time() + while time.time() - start_time < timeout_seconds: + try: + metadata = self.client.get_orchestration_state( + instance_id=instance_id, + ) + + if metadata: + # Check if we're waiting for approval by examining custom status + if metadata.serialized_custom_status: + try: + custom_status = json.loads(metadata.serialized_custom_status) + # Handle both string and dict custom status + status_str = custom_status if isinstance(custom_status, str) else str(custom_status) + if status_str.lower().startswith("requesting human feedback"): + return True + except (json.JSONDecodeError, AttributeError): + # If it's not JSON, treat as plain string + if metadata.serialized_custom_status.lower().startswith("requesting human feedback"): + return True + + # Check for terminal states + if metadata.runtime_status.name == "COMPLETED" or metadata.runtime_status.name == "FAILED": + return False + except Exception: + # Silently ignore transient errors during polling (e.g., network issues, service unavailable). + # The loop will retry until timeout, allowing the service to recover. + pass + + time.sleep(1) + + return False diff --git a/python/packages/durabletask/tests/integration_tests/test_01_dt_single_agent.py b/python/packages/durabletask/tests/integration_tests/test_01_dt_single_agent.py new file mode 100644 index 0000000000..38ca54050c --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/test_01_dt_single_agent.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Integration tests for single agent functionality. + +Tests basic agent operations including: +- Agent registration and retrieval +- Single agent interactions +- Conversation continuity across multiple messages +- Multi-threaded agent usage +- Empty thread ID handling +""" + +from typing import Any + +import pytest +from dt_testutils import create_agent_client + +# Module-level markers - applied to all tests in this module +pytestmark = [ + pytest.mark.sample("01_single_agent"), + pytest.mark.integration_test, + pytest.mark.requires_azure_openai, + pytest.mark.requires_dts, +] + + +class TestSingleAgent: + """Test suite for single agent functionality.""" + + @pytest.fixture(autouse=True) + def setup(self, worker_process: dict[str, Any], dts_endpoint: str) -> None: + """Setup test fixtures.""" + self.endpoint: str = dts_endpoint + self.taskhub: str = str(worker_process["taskhub"]) + + # Create agent client + _, self.agent_client = create_agent_client(self.endpoint, self.taskhub) + + def test_agent_registration(self) -> None: + """Test that the Joker agent is registered and accessible.""" + agent = self.agent_client.get_agent("Joker") + assert agent is not None + assert agent.name == "Joker" + + def test_single_interaction(self): + """Test a single interaction with the agent.""" + agent = self.agent_client.get_agent("Joker") + thread = agent.get_new_thread() + + response = agent.run("Tell me a short joke about programming.", thread=thread) + + assert response is not None + assert response.text is not None + assert len(response.text) > 0 + + def test_conversation_continuity(self): + """Test that conversation context is maintained across turns.""" + agent = self.agent_client.get_agent("Joker") + thread = agent.get_new_thread() + + # First turn: Ask for a joke about a specific topic + response1 = agent.run("Tell me a joke about cats.", thread=thread) + assert response1 is not None + assert len(response1.text) > 0 + + # Second turn: Ask a follow-up that requires context + response2 = agent.run("Can you make it funnier?", thread=thread) + assert response2 is not None + assert len(response2.text) > 0 + + # The agent should understand "it" refers to the previous joke + + def test_multiple_threads(self): + """Test that different threads maintain separate contexts.""" + agent = self.agent_client.get_agent("Joker") + + # Create two separate threads + thread1 = agent.get_new_thread() + thread2 = agent.get_new_thread() + + assert thread1.session_id != thread2.session_id + + # Send different messages to each thread + response1 = agent.run("Tell me a joke about dogs.", thread=thread1) + response2 = agent.run("Tell me a joke about birds.", thread=thread2) + + assert response1 is not None + assert response2 is not None + assert response1.text != response2.text diff --git a/python/packages/durabletask/tests/integration_tests/test_02_dt_multi_agent.py b/python/packages/durabletask/tests/integration_tests/test_02_dt_multi_agent.py new file mode 100644 index 0000000000..da5f12abe4 --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/test_02_dt_multi_agent.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Integration tests for multi-agent functionality. + +Tests operations with multiple specialized agents: +- Multiple agent registration +- Agent-specific tool usage +- Independent thread management per agent +- Concurrent agent operations +- Agent isolation and tool routing +""" + +from typing import Any + +import pytest +from dt_testutils import create_agent_client + +# Agent names from the 02_multi_agent sample +WEATHER_AGENT_NAME: str = "WeatherAgent" +MATH_AGENT_NAME: str = "MathAgent" + +# Module-level markers - applied to all tests in this module +pytestmark = [ + pytest.mark.sample("02_multi_agent"), + pytest.mark.integration_test, + pytest.mark.requires_azure_openai, + pytest.mark.requires_dts, +] + + +class TestMultiAgent: + """Test suite for multi-agent functionality.""" + + @pytest.fixture(autouse=True) + def setup(self, worker_process: dict[str, Any], dts_endpoint: str) -> None: + """Setup test fixtures.""" + self.endpoint: str = dts_endpoint + self.taskhub: str = str(worker_process["taskhub"]) + + # Create agent client + _, self.agent_client = create_agent_client(self.endpoint, self.taskhub) + + def test_multiple_agents_registered(self) -> None: + """Test that both agents are registered and accessible.""" + weather_agent = self.agent_client.get_agent(WEATHER_AGENT_NAME) + math_agent = self.agent_client.get_agent(MATH_AGENT_NAME) + + assert weather_agent is not None + assert weather_agent.name == WEATHER_AGENT_NAME + assert math_agent is not None + assert math_agent.name == MATH_AGENT_NAME + + def test_weather_agent_with_tool(self): + """Test weather agent with weather tool execution.""" + agent = self.agent_client.get_agent(WEATHER_AGENT_NAME) + thread = agent.get_new_thread() + + response = agent.run("What's the weather in Seattle?", thread=thread) + + assert response is not None + assert response.text is not None + # Should contain weather information from the tool + assert len(response.text) > 0 + + # Verify that the get_weather tool was actually invoked + tool_calls = [ + content for msg in response.messages for content in msg.contents if content.type == "function_call" + ] + assert len(tool_calls) > 0, "Expected at least one tool call" + assert any(call.name == "get_weather" for call in tool_calls), "Expected get_weather tool to be called" + + def test_math_agent_with_tool(self): + """Test math agent with calculation tool execution.""" + agent = self.agent_client.get_agent(MATH_AGENT_NAME) + thread = agent.get_new_thread() + + response = agent.run("Calculate a 20% tip on a $50 bill.", thread=thread) + + assert response is not None + assert response.text is not None + # Should contain calculation results from the tool + assert len(response.text) > 0 + + # Verify that the calculate_tip tool was actually invoked + tool_calls = [ + content for msg in response.messages for content in msg.contents if content.type == "function_call" + ] + assert len(tool_calls) > 0, "Expected at least one tool call" + assert any(call.name == "calculate_tip" for call in tool_calls), "Expected calculate_tip tool to be called" + + def test_multiple_calls_to_same_agent(self): + """Test multiple sequential calls to the same agent.""" + agent = self.agent_client.get_agent(WEATHER_AGENT_NAME) + thread = agent.get_new_thread() + + # Multiple weather queries + response1 = agent.run("What's the weather in Chicago?", thread=thread) + response2 = agent.run("And what about Los Angeles?", thread=thread) + + assert response1 is not None + assert response2 is not None + assert len(response1.text) > 0 + assert len(response2.text) > 0 diff --git a/python/packages/durabletask/tests/integration_tests/test_03_dt_single_agent_streaming.py b/python/packages/durabletask/tests/integration_tests/test_03_dt_single_agent_streaming.py new file mode 100644 index 0000000000..d127a87356 --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/test_03_dt_single_agent_streaming.py @@ -0,0 +1,226 @@ +# Copyright (c) Microsoft. All rights reserved. + +""" +Integration Tests for Reliable Streaming Sample + +Tests the reliable streaming sample using Redis Streams for persistent message delivery. + +The worker process is automatically started by the test fixture. + +Prerequisites: +- Azure OpenAI credentials configured (see packages/durabletask/tests/integration_tests/.env.example) +- DTS emulator running (docker run -d -p 8080:8080 mcr.microsoft.com/durabletask/emulator:latest) +- Redis running (docker run -d --name redis -p 6379:6379 redis:latest) + +Usage: + uv run pytest packages/durabletask/tests/integration_tests/test_03_single_agent_streaming.py -v +""" + +import asyncio +import os +import sys +import time +from datetime import timedelta +from pathlib import Path +from typing import Any + +import pytest +import redis.asyncio as aioredis +from dt_testutils import OrchestrationHelper, create_agent_client + +# Add sample directory to path to import RedisStreamResponseHandler +SAMPLE_DIR = Path(__file__).parents[4] / "samples" / "getting_started" / "durabletask" / "03_single_agent_streaming" +sys.path.insert(0, str(SAMPLE_DIR)) + +from redis_stream_response_handler import RedisStreamResponseHandler # type: ignore[reportMissingImports] # noqa: E402 + +# Module-level markers - applied to all tests in this file +pytestmark = [ + pytest.mark.sample("03_single_agent_streaming"), + pytest.mark.integration_test, + pytest.mark.requires_azure_openai, + pytest.mark.requires_dts, + pytest.mark.requires_redis, +] + + +class TestSampleReliableStreaming: + """Tests for 03_single_agent_streaming sample.""" + + @pytest.fixture(autouse=True) + def setup(self, worker_process: dict[str, Any], dts_endpoint: str) -> None: + """Setup test fixtures.""" + self.endpoint: str = dts_endpoint + self.taskhub: str = str(worker_process["taskhub"]) + + # Create agent client + dts_client, self.agent_client = create_agent_client(self.endpoint, self.taskhub) + self.helper = OrchestrationHelper(dts_client) + + # Redis configuration + self.redis_connection_string = os.environ.get("REDIS_CONNECTION_STRING", "redis://localhost:6379") + self.redis_stream_ttl_minutes = int(os.environ.get("REDIS_STREAM_TTL_MINUTES", "10")) + + async def _get_stream_handler(self) -> RedisStreamResponseHandler: # type: ignore[reportMissingTypeStubs] + """Create a new Redis stream handler for each request.""" + redis_client = aioredis.from_url( # type: ignore[reportUnknownMemberType] + self.redis_connection_string, + encoding="utf-8", + decode_responses=False, + ) + return RedisStreamResponseHandler( # type: ignore[reportUnknownMemberType] + redis_client=redis_client, + stream_ttl=timedelta(minutes=self.redis_stream_ttl_minutes), + ) + + async def _stream_from_redis( + self, + thread_id: str, + cursor: str | None = None, + timeout: float = 30.0, + ) -> tuple[str, bool, str]: + """ + Stream responses from Redis using the sample's RedisStreamResponseHandler. + + Args: + thread_id: The conversation/thread ID to stream from + cursor: Optional cursor to resume from + timeout: Maximum time to wait for stream completion + + Returns: + Tuple of (accumulated text, completion status, last entry_id) + """ + accumulated_text = "" + is_complete = False + last_entry_id = cursor if cursor else "0-0" + start_time = time.time() + + async with await self._get_stream_handler() as stream_handler: # type: ignore[reportUnknownMemberType] + try: + async for chunk in stream_handler.read_stream(thread_id, cursor): # type: ignore[reportUnknownMemberType] + if time.time() - start_time > timeout: + break + + last_entry_id = chunk.entry_id # type: ignore[reportUnknownMemberType] + + if chunk.error: # type: ignore[reportUnknownMemberType] + # Stream not found or timeout - this is expected if agent hasn't written yet + # Don't raise an error, just return what we have + break + + if chunk.is_done: # type: ignore[reportUnknownMemberType] + is_complete = True + break + + if chunk.text: # type: ignore[reportUnknownMemberType] + accumulated_text += chunk.text # type: ignore[reportUnknownMemberType] + + except Exception as ex: + # For test purposes, we catch exceptions and return what we have + if "timed out" not in str(ex).lower(): + raise + + return accumulated_text, is_complete, last_entry_id # type: ignore[reportReturnType] + + def test_agent_run_and_stream(self) -> None: + """Test agent execution with Redis streaming.""" + # Get the TravelPlanner agent + travel_planner = self.agent_client.get_agent("TravelPlanner") + assert travel_planner is not None + assert travel_planner.name == "TravelPlanner" + + # Create a new thread + thread = travel_planner.get_new_thread() + assert thread.session_id is not None + assert thread.session_id.key is not None + thread_id = str(thread.session_id.key) + + # Start agent run with wait_for_response=False for non-blocking execution + travel_planner.run( + "Plan a 1-day trip to Seattle in 1 sentence", thread=thread, options={"wait_for_response": False} + ) + + # Poll Redis stream with retries to handle race conditions + # The agent may take a few seconds to process and start writing to Redis + # We use cursor-based resumption to continue reading from where we left off + max_retries = 20 + retry_count = 0 + accumulated_text = "" + is_complete = False + cursor: str | None = None + + while retry_count < max_retries and not is_complete: + text, is_complete, last_cursor = asyncio.run( + self._stream_from_redis(thread_id, cursor=cursor, timeout=10.0) + ) + accumulated_text += text + cursor = last_cursor # Resume from last position on next read + + if is_complete: + # Stream completed successfully + break + + if len(accumulated_text) > 0: + # Got content but not completion marker yet - keep reading without delay + # The agent may still be streaming or about to write completion marker + continue + + # No content yet - wait before retrying + time.sleep(2) + retry_count += 1 + + # Verify we got content + assert len(accumulated_text) > 0, ( + f"Expected text content but got empty string for thread_id: {thread_id} after {retry_count} retries" + ) + assert "seattle" in accumulated_text.lower(), f"Expected 'seattle' in response but got: {accumulated_text}" + assert is_complete, "Expected stream to be complete" + + def test_stream_with_cursor_resumption(self) -> None: + """Test streaming with cursor-based resumption.""" + # Get the TravelPlanner agent + travel_planner = self.agent_client.get_agent("TravelPlanner") + thread = travel_planner.get_new_thread() + assert thread.session_id is not None + assert thread.session_id.key is not None + thread_id = str(thread.session_id.key) + + # Start agent run + travel_planner.run("What's the weather like?", thread=thread, options={"wait_for_response": False}) + + # Wait for agent to start writing + time.sleep(3) + + # Read partial stream to get a cursor + async def get_partial_stream() -> tuple[str, str]: + async with await self._get_stream_handler() as stream_handler: # type: ignore[reportUnknownMemberType] + accumulated_text = "" + last_entry_id = "0-0" + chunk_count = 0 + + # Read just first 2 chunks + async for chunk in stream_handler.read_stream(thread_id): # type: ignore[reportUnknownMemberType] + last_entry_id = chunk.entry_id # type: ignore[reportUnknownMemberType] + if chunk.text: # type: ignore[reportUnknownMemberType] + accumulated_text += chunk.text # type: ignore[reportUnknownMemberType] + chunk_count += 1 + if chunk_count >= 2: + break + + return accumulated_text, last_entry_id # type: ignore[reportReturnType] + + partial_text, cursor = asyncio.run(get_partial_stream()) + + # Resume from cursor + remaining_text, _, _ = asyncio.run(self._stream_from_redis(thread_id, cursor=cursor)) + + # Verify we got some initial content + assert len(partial_text) > 0 + + # Combined text should be coherent + full_text = partial_text + remaining_text + assert len(full_text) > 0 + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/python/packages/durabletask/tests/integration_tests/test_04_dt_single_agent_orchestration_chaining.py b/python/packages/durabletask/tests/integration_tests/test_04_dt_single_agent_orchestration_chaining.py new file mode 100644 index 0000000000..85cdde270e --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/test_04_dt_single_agent_orchestration_chaining.py @@ -0,0 +1,105 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Integration tests for single agent orchestration with chaining. + +Tests orchestration patterns with sequential agent calls: +- Orchestration registration and execution +- Sequential agent calls on same thread +- Conversation continuity in orchestrations +- Thread context preservation +""" + +import json +import logging +from typing import Any + +import pytest +from dt_testutils import OrchestrationHelper, create_agent_client +from durabletask.client import OrchestrationStatus + +# Agent name from the 04_single_agent_orchestration_chaining sample +WRITER_AGENT_NAME: str = "WriterAgent" + +# Configure logging +logging.basicConfig(level=logging.WARNING) + +# Module-level markers - applied to all tests in this module +pytestmark = [ + pytest.mark.sample("04_single_agent_orchestration_chaining"), + pytest.mark.integration_test, + pytest.mark.requires_azure_openai, + pytest.mark.requires_dts, +] + + +class TestSingleAgentOrchestrationChaining: + """Test suite for single agent orchestration with chaining.""" + + @pytest.fixture(autouse=True) + def setup(self, worker_process: dict[str, Any], dts_endpoint: str) -> None: + """Setup test fixtures.""" + self.endpoint: str = dts_endpoint + self.taskhub: str = str(worker_process["taskhub"]) + + # Create agent client and DTS client + self.dts_client, self.agent_client = create_agent_client(self.endpoint, self.taskhub) + + # Create orchestration helper + self.orch_helper = OrchestrationHelper(self.dts_client) + + def test_agent_registered(self): + """Test that the Writer agent is registered.""" + agent = self.agent_client.get_agent(WRITER_AGENT_NAME) + assert agent is not None + assert agent.name == WRITER_AGENT_NAME + + def test_chaining_context_preserved(self): + """Test that context is preserved across agent runs in orchestration.""" + # Start the orchestration + instance_id = self.dts_client.schedule_new_orchestration( + orchestrator="single_agent_chaining_orchestration", + input="", + ) + + # Wait for completion with output + metadata, output = self.orch_helper.wait_for_orchestration_with_output( + instance_id=instance_id, + timeout=120.0, + ) + + assert metadata is not None + assert output is not None + + # The final output should be a refined sentence + final_text = json.loads(output) + + # Should be a meaningful sentence (not empty or error message) + assert len(final_text) > 10 + assert not final_text.startswith("Error") + + def test_multiple_orchestration_instances(self): + """Test that multiple orchestration instances can run independently.""" + # Start two orchestrations + instance_id_1 = self.dts_client.schedule_new_orchestration( + orchestrator="single_agent_chaining_orchestration", + input="", + ) + instance_id_2 = self.dts_client.schedule_new_orchestration( + orchestrator="single_agent_chaining_orchestration", + input="", + ) + + assert instance_id_1 != instance_id_2 + + # Both should complete + metadata_1 = self.orch_helper.wait_for_orchestration( + instance_id=instance_id_1, + timeout=120.0, + ) + metadata_2 = self.orch_helper.wait_for_orchestration( + instance_id=instance_id_2, + timeout=120.0, + ) + + assert metadata_1.runtime_status == OrchestrationStatus.COMPLETED + assert metadata_2.runtime_status == OrchestrationStatus.COMPLETED diff --git a/python/packages/durabletask/tests/integration_tests/test_05_dt_multi_agent_orchestration_concurrency.py b/python/packages/durabletask/tests/integration_tests/test_05_dt_multi_agent_orchestration_concurrency.py new file mode 100644 index 0000000000..367100ef0c --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/test_05_dt_multi_agent_orchestration_concurrency.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Integration tests for multi-agent orchestration with concurrency. + +Tests concurrent execution patterns: +- Parallel agent execution +- Concurrent orchestration tasks +- Independent thread management in parallel +- Result aggregation from concurrent calls +""" + +import json +import logging +from typing import Any + +import pytest +from dt_testutils import OrchestrationHelper, create_agent_client +from durabletask.client import OrchestrationStatus + +# Agent names from the 05_multi_agent_orchestration_concurrency sample +PHYSICIST_AGENT_NAME: str = "PhysicistAgent" +CHEMIST_AGENT_NAME: str = "ChemistAgent" + +# Configure logging +logging.basicConfig(level=logging.WARNING) + +# Module-level markers +pytestmark = [ + pytest.mark.sample("05_multi_agent_orchestration_concurrency"), + pytest.mark.integration_test, + pytest.mark.requires_dts, +] + + +class TestMultiAgentOrchestrationConcurrency: + """Test suite for multi-agent orchestration with concurrency.""" + + @pytest.fixture(autouse=True) + def setup(self, worker_process: dict[str, Any], dts_endpoint: str) -> None: + """Setup test fixtures.""" + self.endpoint = dts_endpoint + self.taskhub = worker_process["taskhub"] + + # Create agent client and DTS client + self.dts_client, self.agent_client = create_agent_client(self.endpoint, self.taskhub) + + # Create orchestration helper + self.orch_helper = OrchestrationHelper(self.dts_client) + + def test_agents_registered(self): + """Test that both agents are registered.""" + physicist = self.agent_client.get_agent(PHYSICIST_AGENT_NAME) + chemist = self.agent_client.get_agent(CHEMIST_AGENT_NAME) + + assert physicist is not None + assert physicist.name == PHYSICIST_AGENT_NAME + assert chemist is not None + assert chemist.name == CHEMIST_AGENT_NAME + + def test_different_prompts(self): + """Test concurrent orchestration with different prompts.""" + prompts = [ + "What is temperature?", + "Explain molecules.", + ] + + for prompt in prompts: + instance_id = self.dts_client.schedule_new_orchestration( + orchestrator="multi_agent_concurrent_orchestration", + input=prompt, + ) + + metadata, output = self.orch_helper.wait_for_orchestration_with_output( + instance_id=instance_id, + timeout=120.0, + ) + + assert metadata.runtime_status == OrchestrationStatus.COMPLETED + result = json.loads(output) + assert "physicist" in result + assert "chemist" in result diff --git a/python/packages/durabletask/tests/integration_tests/test_06_dt_multi_agent_orchestration_conditionals.py b/python/packages/durabletask/tests/integration_tests/test_06_dt_multi_agent_orchestration_conditionals.py new file mode 100644 index 0000000000..9642cd3672 --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/test_06_dt_multi_agent_orchestration_conditionals.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Integration tests for multi-agent orchestration with conditionals. + +Tests conditional orchestration patterns: +- Conditional branching in orchestrations +- Agent-based decision making +- Activity function execution +- Structured output handling +- Conditional routing based on agent responses +""" + +import logging +from typing import Any + +import pytest +from dt_testutils import OrchestrationHelper, create_agent_client +from durabletask.client import OrchestrationStatus + +# Agent names from the 06_multi_agent_orchestration_conditionals sample +SPAM_AGENT_NAME: str = "SpamDetectionAgent" +EMAIL_AGENT_NAME: str = "EmailAssistantAgent" + +# Configure logging +logging.basicConfig(level=logging.WARNING) + +# Module-level markers +pytestmark = [ + pytest.mark.sample("06_multi_agent_orchestration_conditionals"), + pytest.mark.integration_test, + pytest.mark.requires_dts, +] + + +class TestMultiAgentOrchestrationConditionals: + """Test suite for multi-agent orchestration with conditionals.""" + + @pytest.fixture(autouse=True) + def setup(self, worker_process: dict[str, Any], dts_endpoint: str) -> None: + """Setup test fixtures.""" + self.endpoint: str = dts_endpoint + self.taskhub: str = str(worker_process["taskhub"]) + + # Create agent client and DTS client + self.dts_client, self.agent_client = create_agent_client(self.endpoint, self.taskhub) + + # Create orchestration helper + self.orch_helper = OrchestrationHelper(self.dts_client) + + def test_agents_registered(self): + """Test that both agents are registered.""" + spam_agent = self.agent_client.get_agent(SPAM_AGENT_NAME) + email_agent = self.agent_client.get_agent(EMAIL_AGENT_NAME) + + assert spam_agent is not None + assert spam_agent.name == SPAM_AGENT_NAME + assert email_agent is not None + assert email_agent.name == EMAIL_AGENT_NAME + + def test_conditional_branching(self): + """Test that conditional branching works correctly.""" + # Test with obvious spam + spam_payload = { + "email_id": "spam-001", + "email_content": "Buy cheap medications online! No prescription needed! Limited time offer!", + } + + spam_instance_id = self.dts_client.schedule_new_orchestration( + orchestrator="spam_detection_orchestration", + input=spam_payload, + ) + + # Test with legitimate email + legit_payload = { + "email_id": "legit-001", + "email_content": "Hi team, please review the attached document before our meeting tomorrow.", + } + + legit_instance_id = self.dts_client.schedule_new_orchestration( + orchestrator="spam_detection_orchestration", + input=legit_payload, + ) + + # Both should complete successfully (different branches) + spam_metadata = self.orch_helper.wait_for_orchestration( + instance_id=spam_instance_id, + timeout=120.0, + ) + legit_metadata = self.orch_helper.wait_for_orchestration( + instance_id=legit_instance_id, + timeout=120.0, + ) + + assert spam_metadata.runtime_status == OrchestrationStatus.COMPLETED + assert legit_metadata.runtime_status == OrchestrationStatus.COMPLETED diff --git a/python/packages/durabletask/tests/integration_tests/test_07_dt_single_agent_orchestration_hitl.py b/python/packages/durabletask/tests/integration_tests/test_07_dt_single_agent_orchestration_hitl.py new file mode 100644 index 0000000000..2a668e9ede --- /dev/null +++ b/python/packages/durabletask/tests/integration_tests/test_07_dt_single_agent_orchestration_hitl.py @@ -0,0 +1,170 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Integration tests for single agent orchestration with human-in-the-loop. + +Tests human-in-the-loop (HITL) patterns: +- External event waiting and handling +- Timeout handling in orchestrations +- Iterative refinement with human feedback +- Activity function integration +- Approval workflow patterns +""" + +import logging +from typing import Any + +import pytest +from dt_testutils import OrchestrationHelper, create_agent_client +from durabletask.client import OrchestrationStatus + +# Constants from the 07_single_agent_orchestration_hitl sample +WRITER_AGENT_NAME: str = "WriterAgent" +HUMAN_APPROVAL_EVENT: str = "HumanApproval" + +# Configure logging +logging.basicConfig(level=logging.WARNING) + +# Module-level markers +pytestmark = [ + pytest.mark.sample("07_single_agent_orchestration_hitl"), + pytest.mark.integration_test, + pytest.mark.requires_dts, +] + + +class TestSingleAgentOrchestrationHITL: + """Test suite for single agent orchestration with human-in-the-loop.""" + + @pytest.fixture(autouse=True) + def setup(self, worker_process: dict[str, Any], dts_endpoint: str) -> None: + """Setup test fixtures.""" + self.endpoint: str = str(worker_process["endpoint"]) + self.taskhub: str = str(worker_process["taskhub"]) + + logging.info(f"Using taskhub: {self.taskhub} at endpoint: {self.endpoint}") + + # Create agent client and DTS client + self.dts_client, self.agent_client = create_agent_client(self.endpoint, self.taskhub) + + # Create orchestration helper + self.orch_helper = OrchestrationHelper(self.dts_client) + + def test_agent_registered(self): + """Test that the Writer agent is registered.""" + agent = self.agent_client.get_agent(WRITER_AGENT_NAME) + assert agent is not None + assert agent.name == WRITER_AGENT_NAME + + def test_hitl_orchestration_with_approval(self): + """Test HITL orchestration with immediate approval.""" + payload = { + "topic": "The benefits of continuous learning", + "max_review_attempts": 3, + "approval_timeout_seconds": 60, + } + + # Start the orchestration + instance_id = self.dts_client.schedule_new_orchestration( + orchestrator="content_generation_hitl_orchestration", + input=payload, + ) + + assert instance_id is not None + + # Wait for orchestration to reach notification point + notification_received = self.orch_helper.wait_for_notification(instance_id, timeout_seconds=90) + assert notification_received, "Failed to receive notification from orchestration" + + # Send approval event + approval_data = {"approved": True, "feedback": ""} + self.orch_helper.raise_event( + instance_id=instance_id, + event_name=HUMAN_APPROVAL_EVENT, + event_data=approval_data, + ) + + # Wait for completion + metadata = self.orch_helper.wait_for_orchestration( + instance_id=instance_id, + timeout=90.0, + ) + + assert metadata is not None + assert metadata.runtime_status == OrchestrationStatus.COMPLETED + + def test_hitl_orchestration_with_rejection_and_feedback(self): + """Test HITL orchestration with rejection and iterative refinement.""" + payload = { + "topic": "Artificial Intelligence in healthcare", + "max_review_attempts": 3, + "approval_timeout_seconds": 60, + } + + # Start the orchestration + instance_id = self.dts_client.schedule_new_orchestration( + orchestrator="content_generation_hitl_orchestration", + input=payload, + ) + + # Wait for orchestration to reach notification point + notification_received = self.orch_helper.wait_for_notification(instance_id, timeout_seconds=90) + assert notification_received, "Failed to receive notification from orchestration" + + # First rejection with feedback + rejection_data = { + "approved": False, + "feedback": "Please make it more concise and add specific examples.", + } + self.orch_helper.raise_event( + instance_id=instance_id, + event_name=HUMAN_APPROVAL_EVENT, + event_data=rejection_data, + ) + + # Wait for orchestration to refine and reach notification point again + notification_received = self.orch_helper.wait_for_notification(instance_id, timeout_seconds=90) + assert notification_received, "Failed to receive notification after refinement" + + # Second approval + approval_data = {"approved": True, "feedback": ""} + self.orch_helper.raise_event( + instance_id=instance_id, + event_name=HUMAN_APPROVAL_EVENT, + event_data=approval_data, + ) + + # Wait for completion + metadata = self.orch_helper.wait_for_orchestration( + instance_id=instance_id, + timeout=90.0, + ) + + assert metadata is not None + assert metadata.runtime_status == OrchestrationStatus.COMPLETED + + def test_hitl_orchestration_timeout(self): + """Test HITL orchestration timeout behavior.""" + payload = { + "topic": "Cloud computing fundamentals", + "max_review_attempts": 1, + "approval_timeout_seconds": 0.1, # Short timeout for testing + } + + # Start the orchestration + instance_id = self.dts_client.schedule_new_orchestration( + orchestrator="content_generation_hitl_orchestration", + input=payload, + ) + + # Don't send any approval - let it timeout + # The orchestration should fail due to timeout + try: + metadata = self.orch_helper.wait_for_orchestration( + instance_id=instance_id, + timeout=90.0, + ) + # If it completes, it should be failed status due to timeout + assert metadata.runtime_status == OrchestrationStatus.FAILED + except (RuntimeError, TimeoutError): + # Expected - orchestration should timeout and fail + pass diff --git a/python/packages/durabletask/tests/test_agent_session_id.py b/python/packages/durabletask/tests/test_agent_session_id.py new file mode 100644 index 0000000000..5481e0109d --- /dev/null +++ b/python/packages/durabletask/tests/test_agent_session_id.py @@ -0,0 +1,300 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for AgentSessionId and DurableAgentThread.""" + +import pytest +from agent_framework import AgentThread + +from agent_framework_durabletask._models import AgentSessionId, DurableAgentThread + + +class TestAgentSessionId: + """Test suite for AgentSessionId.""" + + def test_init_creates_session_id(self) -> None: + """Test that AgentSessionId initializes correctly.""" + session_id = AgentSessionId(name="AgentEntity", key="test-key-123") + + assert session_id.name == "AgentEntity" + assert session_id.key == "test-key-123" + + def test_with_random_key_generates_guid(self) -> None: + """Test that with_random_key generates a GUID.""" + session_id = AgentSessionId.with_random_key(name="AgentEntity") + + assert session_id.name == "AgentEntity" + assert len(session_id.key) == 32 # UUID hex is 32 chars + # Verify it's a valid hex string + int(session_id.key, 16) + + def test_with_random_key_unique_keys(self) -> None: + """Test that with_random_key generates unique keys.""" + session_id1 = AgentSessionId.with_random_key(name="AgentEntity") + session_id2 = AgentSessionId.with_random_key(name="AgentEntity") + + assert session_id1.key != session_id2.key + + def test_str_representation(self) -> None: + """Test string representation.""" + session_id = AgentSessionId(name="AgentEntity", key="test-key-123") + str_repr = str(session_id) + + assert str_repr == "@AgentEntity@test-key-123" + + def test_repr_representation(self) -> None: + """Test repr representation.""" + session_id = AgentSessionId(name="AgentEntity", key="test-key") + repr_str = repr(session_id) + + assert "AgentSessionId" in repr_str + assert "AgentEntity" in repr_str + assert "test-key" in repr_str + + def test_parse_valid_session_id(self) -> None: + """Test parsing valid session ID string.""" + session_id = AgentSessionId.parse("@AgentEntity@test-key-123") + + assert session_id.name == "AgentEntity" + assert session_id.key == "test-key-123" + + def test_parse_invalid_format_no_prefix(self) -> None: + """Test parsing invalid format without @ prefix.""" + with pytest.raises(ValueError) as exc_info: + AgentSessionId.parse("AgentEntity@test-key") + + assert "Invalid agent session ID format" in str(exc_info.value) + + def test_parse_invalid_format_single_part(self) -> None: + """Test parsing invalid format with single part.""" + with pytest.raises(ValueError) as exc_info: + AgentSessionId.parse("@AgentEntity") + + assert "Invalid agent session ID format" in str(exc_info.value) + + def test_parse_with_multiple_at_signs_in_key(self) -> None: + """Test parsing with @ signs in the key.""" + session_id = AgentSessionId.parse("@AgentEntity@key-with@symbols") + + assert session_id.name == "AgentEntity" + assert session_id.key == "key-with@symbols" + + def test_parse_round_trip(self) -> None: + """Test round-trip parse and string conversion.""" + original = AgentSessionId(name="AgentEntity", key="test-key") + str_repr = str(original) + parsed = AgentSessionId.parse(str_repr) + + assert parsed.name == original.name + assert parsed.key == original.key + + def test_to_entity_name_adds_prefix(self) -> None: + """Test that to_entity_name adds the dafx- prefix.""" + entity_name = AgentSessionId.to_entity_name("TestAgent") + assert entity_name == "dafx-TestAgent" + + def test_parse_with_agent_name_override(self) -> None: + """Test parsing @name@key format with agent_name parameter overrides the name.""" + session_id = AgentSessionId.parse("@OriginalAgent@test-key-123", agent_name="OverriddenAgent") + + assert session_id.name == "OverriddenAgent" + assert session_id.key == "test-key-123" + + def test_parse_without_agent_name_uses_parsed_name(self) -> None: + """Test parsing @name@key format without agent_name uses name from string.""" + session_id = AgentSessionId.parse("@ParsedAgent@test-key-123") + + assert session_id.name == "ParsedAgent" + assert session_id.key == "test-key-123" + + def test_parse_plain_string_with_agent_name(self) -> None: + """Test parsing plain string with agent_name uses entire string as key.""" + session_id = AgentSessionId.parse("simple-thread-123", agent_name="TestAgent") + + assert session_id.name == "TestAgent" + assert session_id.key == "simple-thread-123" + + def test_parse_plain_string_without_agent_name_raises(self) -> None: + """Test parsing plain string without agent_name raises ValueError.""" + with pytest.raises(ValueError) as exc_info: + AgentSessionId.parse("simple-thread-123") + + assert "Invalid agent session ID format" in str(exc_info.value) + + +class TestDurableAgentThread: + """Test suite for DurableAgentThread.""" + + def test_init_with_session_id(self) -> None: + """Test DurableAgentThread initialization with session ID.""" + session_id = AgentSessionId(name="TestAgent", key="test-key") + thread = DurableAgentThread(session_id=session_id) + + assert thread.session_id is not None + assert thread.session_id == session_id + + def test_init_without_session_id(self) -> None: + """Test DurableAgentThread initialization without session ID.""" + thread = DurableAgentThread() + + assert thread.session_id is None + + def test_session_id_setter(self) -> None: + """Test setting a session ID to an existing thread.""" + thread = DurableAgentThread() + assert thread.session_id is None + + session_id = AgentSessionId(name="TestAgent", key="test-key") + thread.session_id = session_id + + assert thread.session_id is not None + assert thread.session_id == session_id + assert thread.session_id.name == "TestAgent" + + def test_from_session_id(self) -> None: + """Test creating DurableAgentThread from session ID.""" + session_id = AgentSessionId(name="TestAgent", key="test-key") + thread = DurableAgentThread.from_session_id(session_id) + + assert isinstance(thread, DurableAgentThread) + assert thread.session_id is not None + assert thread.session_id == session_id + assert thread.session_id.name == "TestAgent" + assert thread.session_id.key == "test-key" + + def test_from_session_id_with_service_thread_id(self) -> None: + """Test creating DurableAgentThread with service thread ID.""" + session_id = AgentSessionId(name="TestAgent", key="test-key") + thread = DurableAgentThread.from_session_id(session_id, service_thread_id="service-123") + + assert thread.session_id is not None + assert thread.session_id == session_id + assert thread.service_thread_id == "service-123" + + async def test_serialize_with_session_id(self) -> None: + """Test serialization includes session ID.""" + session_id = AgentSessionId(name="TestAgent", key="test-key") + thread = DurableAgentThread(session_id=session_id) + + serialized = await thread.serialize() + + assert isinstance(serialized, dict) + assert "durable_session_id" in serialized + assert serialized["durable_session_id"] == "@TestAgent@test-key" + + async def test_serialize_without_session_id(self) -> None: + """Test serialization without session ID.""" + thread = DurableAgentThread() + + serialized = await thread.serialize() + + assert isinstance(serialized, dict) + assert "durable_session_id" not in serialized + + async def test_deserialize_with_session_id(self) -> None: + """Test deserialization restores session ID.""" + serialized = { + "service_thread_id": "thread-123", + "durable_session_id": "@TestAgent@test-key", + } + + thread = await DurableAgentThread.deserialize(serialized) + + assert isinstance(thread, DurableAgentThread) + assert thread.session_id is not None + assert thread.session_id.name == "TestAgent" + assert thread.session_id.key == "test-key" + assert thread.service_thread_id == "thread-123" + + async def test_deserialize_without_session_id(self) -> None: + """Test deserialization without session ID.""" + serialized = { + "service_thread_id": "thread-456", + } + + thread = await DurableAgentThread.deserialize(serialized) + + assert isinstance(thread, DurableAgentThread) + assert thread.session_id is None + assert thread.service_thread_id == "thread-456" + + async def test_round_trip_serialization(self) -> None: + """Test round-trip serialization preserves session ID.""" + session_id = AgentSessionId(name="TestAgent", key="test-key-789") + original = DurableAgentThread(session_id=session_id) + + serialized = await original.serialize() + restored = await DurableAgentThread.deserialize(serialized) + + assert isinstance(restored, DurableAgentThread) + assert restored.session_id is not None + assert restored.session_id.name == session_id.name + assert restored.session_id.key == session_id.key + + async def test_deserialize_invalid_session_id_type(self) -> None: + """Test deserialization with invalid session ID type raises error.""" + serialized = { + "service_thread_id": "thread-123", + "durable_session_id": 12345, # Invalid type + } + + with pytest.raises(ValueError, match="durable_session_id must be a string"): + await DurableAgentThread.deserialize(serialized) + + +class TestAgentThreadCompatibility: + """Test suite for compatibility between AgentThread and DurableAgentThread.""" + + async def test_agent_thread_serialize(self) -> None: + """Test that base AgentThread can be serialized.""" + thread = AgentThread() + + serialized = await thread.serialize() + + assert isinstance(serialized, dict) + assert "service_thread_id" in serialized + + async def test_agent_thread_deserialize(self) -> None: + """Test that base AgentThread can be deserialized.""" + thread = AgentThread() + serialized = await thread.serialize() + + restored = await AgentThread.deserialize(serialized) + + assert isinstance(restored, AgentThread) + assert restored.service_thread_id == thread.service_thread_id + + async def test_durable_thread_is_agent_thread(self) -> None: + """Test that DurableAgentThread is an AgentThread.""" + thread = DurableAgentThread() + + assert isinstance(thread, AgentThread) + assert isinstance(thread, DurableAgentThread) + + +class TestModelIntegration: + """Test suite for integration between models.""" + + def test_session_id_string_format(self) -> None: + """Test that AgentSessionId string format is consistent.""" + session_id = AgentSessionId.with_random_key("AgentEntity") + session_id_str = str(session_id) + + assert session_id_str.startswith("@AgentEntity@") + + async def test_thread_with_session_preserves_on_serialization(self) -> None: + """Test that thread with session ID preserves it through serialization.""" + session_id = AgentSessionId(name="TestAgent", key="preserved-key") + thread = DurableAgentThread.from_session_id(session_id) + + # Serialize and deserialize + serialized = await thread.serialize() + restored = await DurableAgentThread.deserialize(serialized) + + # Session ID should be preserved + assert restored.session_id is not None + assert restored.session_id.name == "TestAgent" + assert restored.session_id.key == "preserved-key" + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_client.py b/python/packages/durabletask/tests/test_client.py new file mode 100644 index 0000000000..cf2ccfe1af --- /dev/null +++ b/python/packages/durabletask/tests/test_client.py @@ -0,0 +1,142 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for DurableAIAgentClient. + +Focuses on critical client workflows: agent retrieval, protocol compliance, and integration. +Run with: pytest tests/test_client.py -v +""" + +from unittest.mock import Mock + +import pytest +from agent_framework import AgentProtocol + +from agent_framework_durabletask import DurableAgentThread, DurableAIAgentClient +from agent_framework_durabletask._constants import DEFAULT_MAX_POLL_RETRIES, DEFAULT_POLL_INTERVAL_SECONDS +from agent_framework_durabletask._shim import DurableAIAgent + + +@pytest.fixture +def mock_grpc_client() -> Mock: + """Create a mock TaskHubGrpcClient for testing.""" + return Mock() + + +@pytest.fixture +def agent_client(mock_grpc_client: Mock) -> DurableAIAgentClient: + """Create a DurableAIAgentClient with mock gRPC client.""" + return DurableAIAgentClient(mock_grpc_client) + + +@pytest.fixture +def agent_client_with_custom_polling(mock_grpc_client: Mock) -> DurableAIAgentClient: + """Create a DurableAIAgentClient with custom polling parameters.""" + return DurableAIAgentClient( + mock_grpc_client, + max_poll_retries=15, + poll_interval_seconds=0.5, + ) + + +class TestDurableAIAgentClientGetAgent: + """Test core workflow: retrieving agents from the client.""" + + def test_get_agent_returns_durable_agent_shim(self, agent_client: DurableAIAgentClient) -> None: + """Verify get_agent returns a DurableAIAgent instance.""" + agent = agent_client.get_agent("assistant") + + assert isinstance(agent, DurableAIAgent) + assert isinstance(agent, AgentProtocol) + + def test_get_agent_shim_has_correct_name(self, agent_client: DurableAIAgentClient) -> None: + """Verify retrieved agent has the correct name.""" + agent = agent_client.get_agent("my_agent") + + assert agent.name == "my_agent" + + def test_get_agent_multiple_times_returns_new_instances(self, agent_client: DurableAIAgentClient) -> None: + """Verify multiple get_agent calls return independent instances.""" + agent1 = agent_client.get_agent("assistant") + agent2 = agent_client.get_agent("assistant") + + assert agent1 is not agent2 # Different object instances + + def test_get_agent_different_agents(self, agent_client: DurableAIAgentClient) -> None: + """Verify client can retrieve multiple different agents.""" + agent1 = agent_client.get_agent("agent1") + agent2 = agent_client.get_agent("agent2") + + assert agent1.name == "agent1" + assert agent2.name == "agent2" + + +class TestDurableAIAgentClientIntegration: + """Test integration scenarios between client and agent shim.""" + + def test_client_agent_has_working_run_method(self, agent_client: DurableAIAgentClient) -> None: + """Verify agent from client has callable run method (even if not yet implemented).""" + agent = agent_client.get_agent("assistant") + + assert hasattr(agent, "run") + assert callable(agent.run) + + def test_client_agent_can_create_threads(self, agent_client: DurableAIAgentClient) -> None: + """Verify agent from client can create DurableAgentThread instances.""" + agent = agent_client.get_agent("assistant") + + thread = agent.get_new_thread() + + assert isinstance(thread, DurableAgentThread) + + def test_client_agent_thread_with_parameters(self, agent_client: DurableAIAgentClient) -> None: + """Verify agent can create threads with custom parameters.""" + agent = agent_client.get_agent("assistant") + + thread = agent.get_new_thread(service_thread_id="client-session-123") + + assert isinstance(thread, DurableAgentThread) + assert thread.service_thread_id == "client-session-123" + + +class TestDurableAIAgentClientPollingConfiguration: + """Test polling configuration parameters for DurableAIAgentClient.""" + + def test_client_uses_default_polling_parameters(self, agent_client: DurableAIAgentClient) -> None: + """Verify client initializes with default polling parameters.""" + assert agent_client.max_poll_retries == DEFAULT_MAX_POLL_RETRIES + assert agent_client.poll_interval_seconds == DEFAULT_POLL_INTERVAL_SECONDS + + def test_client_accepts_custom_polling_parameters( + self, agent_client_with_custom_polling: DurableAIAgentClient + ) -> None: + """Verify client accepts and stores custom polling parameters.""" + assert agent_client_with_custom_polling.max_poll_retries == 15 + assert agent_client_with_custom_polling.poll_interval_seconds == 0.5 + + def test_client_validates_max_poll_retries(self, mock_grpc_client: Mock) -> None: + """Verify client validates and normalizes max_poll_retries.""" + # Test with zero - should enforce minimum of 1 + client = DurableAIAgentClient(mock_grpc_client, max_poll_retries=0) + assert client.max_poll_retries == 1 + + # Test with negative - should enforce minimum of 1 + client = DurableAIAgentClient(mock_grpc_client, max_poll_retries=-5) + assert client.max_poll_retries == 1 + + def test_client_validates_poll_interval_seconds(self, mock_grpc_client: Mock) -> None: + """Verify client validates and normalizes poll_interval_seconds.""" + # Test with zero - should use default + client = DurableAIAgentClient(mock_grpc_client, poll_interval_seconds=0) + assert client.poll_interval_seconds == DEFAULT_POLL_INTERVAL_SECONDS + + # Test with negative - should use default + client = DurableAIAgentClient(mock_grpc_client, poll_interval_seconds=-0.5) + assert client.poll_interval_seconds == DEFAULT_POLL_INTERVAL_SECONDS + + # Test with valid float + client = DurableAIAgentClient(mock_grpc_client, poll_interval_seconds=2.5) + assert client.poll_interval_seconds == 2.5 + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_durable_agent_state.py b/python/packages/durabletask/tests/test_durable_agent_state.py new file mode 100644 index 0000000000..24b31a747e --- /dev/null +++ b/python/packages/durabletask/tests/test_durable_agent_state.py @@ -0,0 +1,377 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for DurableAgentState and related classes.""" + +from datetime import datetime + +import pytest +from agent_framework import UsageDetails + +from agent_framework_durabletask._durable_agent_state import ( + DurableAgentState, + DurableAgentStateMessage, + DurableAgentStateRequest, + DurableAgentStateTextContent, + DurableAgentStateUsage, +) +from agent_framework_durabletask._models import RunRequest + + +class TestDurableAgentStateRequestOrchestrationId: + """Test suite for DurableAgentStateRequest orchestration_id field.""" + + def test_request_with_orchestration_id(self) -> None: + """Test creating a request with an orchestration_id.""" + request = DurableAgentStateRequest( + correlation_id="corr-123", + created_at=datetime.now(), + messages=[ + DurableAgentStateMessage( + role="user", + contents=[DurableAgentStateTextContent(text="test")], + ) + ], + orchestration_id="orch-456", + ) + + assert request.orchestration_id == "orch-456" + + def test_request_to_dict_includes_orchestration_id(self) -> None: + """Test that to_dict includes orchestrationId when set.""" + request = DurableAgentStateRequest( + correlation_id="corr-123", + created_at=datetime.now(), + messages=[ + DurableAgentStateMessage( + role="user", + contents=[DurableAgentStateTextContent(text="test")], + ) + ], + orchestration_id="orch-789", + ) + + data = request.to_dict() + + assert "orchestrationId" in data + assert data["orchestrationId"] == "orch-789" + + def test_request_to_dict_excludes_orchestration_id_when_none(self) -> None: + """Test that to_dict excludes orchestrationId when not set.""" + request = DurableAgentStateRequest( + correlation_id="corr-123", + created_at=datetime.now(), + messages=[ + DurableAgentStateMessage( + role="user", + contents=[DurableAgentStateTextContent(text="test")], + ) + ], + ) + + data = request.to_dict() + + assert "orchestrationId" not in data + + def test_request_from_dict_with_orchestration_id(self) -> None: + """Test from_dict correctly parses orchestrationId.""" + data = { + "$type": "request", + "correlationId": "corr-123", + "createdAt": "2024-01-01T00:00:00Z", + "messages": [{"role": "user", "contents": [{"$type": "text", "text": "test"}]}], + "orchestrationId": "orch-from-dict", + } + + request = DurableAgentStateRequest.from_dict(data) + + assert request.orchestration_id == "orch-from-dict" + + def test_request_from_run_request_with_orchestration_id(self) -> None: + """Test from_run_request correctly transfers orchestration_id.""" + run_request = RunRequest( + message="test message", + correlation_id="corr-run", + orchestration_id="orch-from-run-request", + ) + + durable_request = DurableAgentStateRequest.from_run_request(run_request) + + assert durable_request.orchestration_id == "orch-from-run-request" + + def test_request_from_run_request_without_orchestration_id(self) -> None: + """Test from_run_request correctly handles missing orchestration_id.""" + run_request = RunRequest( + message="test message", + correlation_id="corr-run", + ) + + durable_request = DurableAgentStateRequest.from_run_request(run_request) + + assert durable_request.orchestration_id is None + + +class TestDurableAgentStateMessageCreatedAt: + """Test suite for DurableAgentStateMessage created_at field handling.""" + + def test_message_from_run_request_without_created_at_preserves_none(self) -> None: + """Test from_run_request handles auto-populated created_at from RunRequest. + + When a RunRequest is created with None for created_at, RunRequest defaults it to + current UTC time. The resulting DurableAgentStateMessage should have this timestamp. + """ + run_request = RunRequest( + message="test message", + correlation_id="corr-run", + created_at=None, # RunRequest will default this to current time + ) + + durable_message = DurableAgentStateMessage.from_run_request(run_request) + + # RunRequest auto-populates created_at, so it should not be None + assert durable_message.created_at is not None + + def test_message_from_run_request_with_created_at_parses_correctly(self) -> None: + """Test from_run_request correctly parses a valid created_at timestamp.""" + run_request = RunRequest( + message="test message", + correlation_id="corr-run", + created_at=datetime(2024, 1, 15, 10, 30, 0), + ) + + durable_message = DurableAgentStateMessage.from_run_request(run_request) + + assert durable_message.created_at is not None + assert durable_message.created_at.year == 2024 + assert durable_message.created_at.month == 1 + assert durable_message.created_at.day == 15 + + +class TestDurableAgentState: + """Test suite for DurableAgentState.""" + + def test_schema_version(self) -> None: + """Test that schema version is set correctly.""" + state = DurableAgentState() + assert state.schema_version == "1.1.0" + + def test_to_dict_serialization(self) -> None: + """Test that to_dict produces correct structure.""" + state = DurableAgentState() + data = state.to_dict() + + assert "schemaVersion" in data + assert "data" in data + assert data["schemaVersion"] == "1.1.0" + assert "conversationHistory" in data["data"] + + def test_from_dict_deserialization(self) -> None: + """Test that from_dict restores state correctly.""" + original_data = { + "schemaVersion": "1.1.0", + "data": { + "conversationHistory": [ + { + "$type": "request", + "correlationId": "test-123", + "createdAt": "2024-01-01T00:00:00Z", + "messages": [ + { + "role": "user", + "contents": [{"$type": "text", "text": "Hello"}], + } + ], + } + ] + }, + } + + state = DurableAgentState.from_dict(original_data) + + assert state.schema_version == "1.1.0" + assert len(state.data.conversation_history) == 1 + assert isinstance(state.data.conversation_history[0], DurableAgentStateRequest) + + def test_round_trip_serialization(self) -> None: + """Test that round-trip serialization preserves data.""" + state = DurableAgentState() + state.data.conversation_history.append( + DurableAgentStateRequest( + correlation_id="test-456", + created_at=datetime.now(), + messages=[ + DurableAgentStateMessage( + role="user", + contents=[DurableAgentStateTextContent(text="Test message")], + ) + ], + ) + ) + + data = state.to_dict() + restored = DurableAgentState.from_dict(data) + + assert restored.schema_version == state.schema_version + assert len(restored.data.conversation_history) == len(state.data.conversation_history) + assert restored.data.conversation_history[0].correlation_id == "test-456" + + +class TestDurableAgentStateUsage: + """Test suite for DurableAgentStateUsage.""" + + def test_usage_init_with_defaults(self) -> None: + """Test creating usage with default values.""" + usage = DurableAgentStateUsage() + + assert usage.input_token_count is None + assert usage.output_token_count is None + assert usage.total_token_count is None + assert usage.extensionData is None + + def test_usage_init_with_values(self) -> None: + """Test creating usage with specific values.""" + usage = DurableAgentStateUsage( + input_token_count=100, + output_token_count=200, + total_token_count=300, + extensionData={"custom_field": "value"}, + ) + + assert usage.input_token_count == 100 + assert usage.output_token_count == 200 + assert usage.total_token_count == 300 + assert usage.extensionData == {"custom_field": "value"} + + def test_usage_to_dict(self) -> None: + """Test that to_dict produces correct structure.""" + usage = DurableAgentStateUsage( + input_token_count=50, + output_token_count=75, + total_token_count=125, + ) + + data = usage.to_dict() + + assert data["inputTokenCount"] == 50 + assert data["outputTokenCount"] == 75 + assert data["totalTokenCount"] == 125 + + def test_usage_to_dict_with_extension_data(self) -> None: + """Test that to_dict includes extensionData when present.""" + usage = DurableAgentStateUsage( + input_token_count=10, + output_token_count=20, + total_token_count=30, + extensionData={"provider_specific": 123}, + ) + + data = usage.to_dict() + + assert "extensionData" in data + assert data["extensionData"] == {"provider_specific": 123} + + def test_usage_from_dict(self) -> None: + """Test that from_dict restores usage correctly.""" + data = { + "inputTokenCount": 100, + "outputTokenCount": 200, + "totalTokenCount": 300, + "extensionData": {"extra": "data"}, + } + + usage = DurableAgentStateUsage.from_dict(data) + + assert usage.input_token_count == 100 + assert usage.output_token_count == 200 + assert usage.total_token_count == 300 + assert usage.extensionData == {"extra": "data"} + + def test_usage_from_usage_details(self) -> None: + """Test creating DurableAgentStateUsage from UsageDetails.""" + usage_details: UsageDetails = { + "input_token_count": 150, + "output_token_count": 250, + "total_token_count": 400, + } + + usage = DurableAgentStateUsage.from_usage(usage_details) + + assert usage is not None + assert usage.input_token_count == 150 + assert usage.output_token_count == 250 + assert usage.total_token_count == 400 + + def test_usage_from_usage_details_with_extension_fields(self) -> None: + """Test that non-standard fields are captured in extensionData.""" + usage_details: UsageDetails = { + "input_token_count": 100, + "output_token_count": 200, + "total_token_count": 300, + } + # Add provider-specific fields (UsageDetails is a TypedDict but allows extra keys) + usage_details["prompt_tokens"] = 100 # type: ignore[typeddict-unknown-key] + usage_details["completion_tokens"] = 200 # type: ignore[typeddict-unknown-key] + + usage = DurableAgentStateUsage.from_usage(usage_details) + + assert usage is not None + assert usage.extensionData is not None + assert usage.extensionData["prompt_tokens"] == 100 + assert usage.extensionData["completion_tokens"] == 200 + + def test_usage_from_usage_none(self) -> None: + """Test that from_usage returns None for None input.""" + usage = DurableAgentStateUsage.from_usage(None) + + assert usage is None + + def test_usage_to_usage_details(self) -> None: + """Test converting back to UsageDetails.""" + usage = DurableAgentStateUsage( + input_token_count=100, + output_token_count=200, + total_token_count=300, + ) + + details = usage.to_usage_details() + + assert details.get("input_token_count") == 100 + assert details.get("output_token_count") == 200 + assert details.get("total_token_count") == 300 + + def test_usage_to_usage_details_with_extension_data(self) -> None: + """Test that extensionData is merged into UsageDetails.""" + usage = DurableAgentStateUsage( + input_token_count=50, + output_token_count=75, + total_token_count=125, + extensionData={"prompt_tokens": 50, "completion_tokens": 75}, + ) + + details = usage.to_usage_details() + + assert details.get("input_token_count") == 50 + assert details.get("output_token_count") == 75 + assert details.get("total_token_count") == 125 + # Extension data should be merged into the result + assert details.get("prompt_tokens") == 50 + assert details.get("completion_tokens") == 75 + + def test_usage_round_trip(self) -> None: + """Test round-trip conversion from UsageDetails to DurableAgentStateUsage and back.""" + original: UsageDetails = { + "input_token_count": 100, + "output_token_count": 200, + "total_token_count": 300, + } + + usage = DurableAgentStateUsage.from_usage(original) + assert usage is not None + restored = usage.to_usage_details() + + assert restored.get("input_token_count") == original.get("input_token_count") + assert restored.get("output_token_count") == original.get("output_token_count") + assert restored.get("total_token_count") == original.get("total_token_count") + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_durable_entities.py b/python/packages/durabletask/tests/test_durable_entities.py new file mode 100644 index 0000000000..35babc44c0 --- /dev/null +++ b/python/packages/durabletask/tests/test_durable_entities.py @@ -0,0 +1,695 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for AgentEntity. + +Run with: pytest tests/test_entities.py -v +""" + +from collections.abc import AsyncIterator +from datetime import datetime +from typing import Any, TypeVar +from unittest.mock import AsyncMock, Mock + +import pytest +from agent_framework import AgentResponse, AgentResponseUpdate, ChatMessage, Content, Role +from pydantic import BaseModel + +from agent_framework_durabletask import ( + AgentEntity, + AgentEntityStateProviderMixin, + DurableAgentState, + DurableAgentStateData, + DurableAgentStateMessage, + DurableAgentStateRequest, + DurableAgentStateTextContent, + RunRequest, +) +from agent_framework_durabletask._entities import DurableTaskEntityStateProvider + +TState = TypeVar("TState") + + +class MockEntityContext: + """Minimal durabletask EntityContext shim for tests.""" + + def __init__(self, initial_state: Any = None) -> None: + self._state = initial_state + + def get_state( + self, + intended_type: type[TState] | None = None, + default: TState | None = None, + ) -> Any: + del intended_type + if self._state is None: + return default + return self._state + + def set_state(self, new_state: Any) -> None: + self._state = new_state + + +class _InMemoryStateProvider(AgentEntityStateProviderMixin): + """Test-only state provider for AgentEntity.""" + + def __init__(self, *, thread_id: str, initial_state: dict[str, Any] | None = None) -> None: + self._thread_id = thread_id + self._state_dict: dict[str, Any] = initial_state or {} + + def _get_state_dict(self) -> dict[str, Any]: + return self._state_dict + + def _set_state_dict(self, state: dict[str, Any]) -> None: + self._state_dict = state + + def _get_thread_id_from_entity(self) -> str: + return self._thread_id + + +def _make_entity(agent: Any, callback: Any = None, *, thread_id: str = "test-thread") -> AgentEntity: + return AgentEntity(agent, callback=callback, state_provider=_InMemoryStateProvider(thread_id=thread_id)) + + +def _role_value(chat_message: DurableAgentStateMessage) -> str: + """Helper to extract the string role from a ChatMessage.""" + role = getattr(chat_message, "role", None) + role_value = getattr(role, "value", role) + if role_value is None: + return "" + return str(role_value) + + +def _agent_response(text: str | None) -> AgentResponse: + """Create an AgentResponse with a single assistant message.""" + message = ( + ChatMessage(role="assistant", text=text) if text is not None else ChatMessage(role="assistant", contents=[]) + ) + return AgentResponse(messages=[message]) + + +class RecordingCallback: + """Callback implementation capturing streaming and final responses for assertions.""" + + def __init__(self): + self.stream_mock = AsyncMock() + self.response_mock = AsyncMock() + + async def on_streaming_response_update( + self, + update: AgentResponseUpdate, + context: Any, + ) -> None: + await self.stream_mock(update, context) + + async def on_agent_response(self, response: AgentResponse, context: Any) -> None: + await self.response_mock(response, context) + + +class EntityStructuredResponse(BaseModel): + answer: float + + +class TestAgentEntityInit: + """Test suite for AgentEntity initialization.""" + + def test_init_creates_entity(self) -> None: + """Test that AgentEntity initializes correctly.""" + mock_agent = Mock() + + entity = _make_entity(mock_agent) + + assert entity.agent == mock_agent + assert len(entity.state.data.conversation_history) == 0 + assert entity.state.data.extension_data is None + assert entity.state.schema_version == DurableAgentState.SCHEMA_VERSION + + def test_init_stores_agent_reference(self) -> None: + """Test that the agent reference is stored correctly.""" + mock_agent = Mock() + mock_agent.name = "TestAgent" + + entity = _make_entity(mock_agent) + + assert entity.agent.name == "TestAgent" + + def test_init_with_different_agent_types(self) -> None: + """Test initialization with different agent types.""" + agent1 = Mock() + agent1.__class__.__name__ = "AzureOpenAIAgent" + + agent2 = Mock() + agent2.__class__.__name__ = "CustomAgent" + + entity1 = _make_entity(agent1) + entity2 = _make_entity(agent2) + + assert entity1.agent.__class__.__name__ == "AzureOpenAIAgent" + assert entity2.agent.__class__.__name__ == "CustomAgent" + + +class TestDurableTaskEntityStateProvider: + """Tests for DurableTaskEntityStateProvider wrapper behavior and persistence wiring.""" + + def _make_durabletask_entity_provider( + self, + agent: Any, + *, + initial_state: dict[str, Any] | None = None, + ) -> tuple[DurableTaskEntityStateProvider, MockEntityContext]: + """Create a DurableTaskEntityStateProvider wired to an in-memory durabletask context.""" + entity = DurableTaskEntityStateProvider() + ctx = MockEntityContext(initial_state) + # DurableEntity provides this hook; required for get_state/set_state to work in unit tests. + entity._initialize_entity_context(ctx) # type: ignore[attr-defined] + return entity, ctx + + def test_reset_persists_cleared_state(self) -> None: + mock_agent = Mock() + + existing_state = { + "schemaVersion": "1.0.0", + "data": { + "conversationHistory": [ + { + "$type": "request", + "correlationId": "corr-existing-1", + "createdAt": "2024-01-01T00:00:00Z", + "messages": [{"role": "user", "contents": [{"$type": "text", "text": "msg1"}]}], + } + ] + }, + } + + entity, ctx = self._make_durabletask_entity_provider(mock_agent, initial_state=existing_state) + + entity.reset() + + persisted = ctx.get_state(dict, default={}) + assert isinstance(persisted, dict) + assert persisted["data"]["conversationHistory"] == [] + + +class TestAgentEntityRunAgent: + """Test suite for the run_agent operation.""" + + async def test_run_executes_agent(self) -> None: + """Test that run executes the agent.""" + mock_agent = Mock() + mock_response = _agent_response("Test response") + mock_agent.run = AsyncMock(return_value=mock_response) + + entity = _make_entity(mock_agent) + + result = await entity.run({ + "message": "Test message", + "correlationId": "corr-entity-1", + }) + + # Verify agent.run was called + mock_agent.run.assert_called_once() + _, kwargs = mock_agent.run.call_args + sent_messages: list[Any] = kwargs.get("messages") + assert len(sent_messages) == 1 + sent_message = sent_messages[0] + assert isinstance(sent_message, ChatMessage) + assert getattr(sent_message, "text", None) == "Test message" + assert getattr(sent_message.role, "value", sent_message.role) == "user" + + # Verify result + assert isinstance(result, AgentResponse) + assert result.text == "Test response" + + async def test_run_agent_streaming_callbacks_invoked(self) -> None: + """Ensure streaming updates trigger callbacks and run() is not used.""" + updates = [ + AgentResponseUpdate(text="Hello"), + AgentResponseUpdate(text=" world"), + ] + + async def update_generator() -> AsyncIterator[AgentResponseUpdate]: + for update in updates: + yield update + + mock_agent = Mock() + mock_agent.name = "StreamingAgent" + mock_agent.run_stream = Mock(return_value=update_generator()) + mock_agent.run = AsyncMock(side_effect=AssertionError("run() should not be called when streaming succeeds")) + + callback = RecordingCallback() + entity = _make_entity(mock_agent, callback=callback, thread_id="session-1") + + result = await entity.run( + { + "message": "Tell me something", + "correlationId": "corr-stream-1", + }, + ) + + assert isinstance(result, AgentResponse) + assert "Hello" in result.text + assert callback.stream_mock.await_count == len(updates) + assert callback.response_mock.await_count == 1 + mock_agent.run.assert_not_called() + + # Validate callback arguments + stream_calls = callback.stream_mock.await_args_list + for expected_update, recorded_call in zip(updates, stream_calls, strict=True): + assert recorded_call.args[0] is expected_update + context = recorded_call.args[1] + assert context.agent_name == "StreamingAgent" + assert context.correlation_id == "corr-stream-1" + assert context.thread_id == "session-1" + assert context.request_message == "Tell me something" + + final_call = callback.response_mock.await_args + assert final_call is not None + final_response, final_context = final_call.args + assert final_context.agent_name == "StreamingAgent" + assert final_context.correlation_id == "corr-stream-1" + assert final_context.thread_id == "session-1" + assert final_context.request_message == "Tell me something" + assert getattr(final_response, "text", "").strip() + + async def test_run_agent_final_callback_without_streaming(self) -> None: + """Ensure the final callback fires even when streaming is unavailable.""" + mock_agent = Mock() + mock_agent.name = "NonStreamingAgent" + mock_agent.run_stream = None + agent_response = _agent_response("Final response") + mock_agent.run = AsyncMock(return_value=agent_response) + + callback = RecordingCallback() + entity = _make_entity(mock_agent, callback=callback, thread_id="session-2") + + result = await entity.run( + { + "message": "Hi", + "correlationId": "corr-final-1", + }, + ) + + assert isinstance(result, AgentResponse) + assert result.text == "Final response" + assert callback.stream_mock.await_count == 0 + assert callback.response_mock.await_count == 1 + + final_call = callback.response_mock.await_args + assert final_call is not None + assert final_call.args[0] is agent_response + final_context = final_call.args[1] + assert final_context.agent_name == "NonStreamingAgent" + assert final_context.correlation_id == "corr-final-1" + assert final_context.thread_id == "session-2" + assert final_context.request_message == "Hi" + + async def test_run_agent_updates_conversation_history(self) -> None: + """Test that run_agent updates the conversation history.""" + mock_agent = Mock() + mock_response = _agent_response("Agent response") + mock_agent.run = AsyncMock(return_value=mock_response) + + entity = _make_entity(mock_agent) + + await entity.run({"message": "User message", "correlationId": "corr-entity-2"}) + + # Should have 2 entries: user message + assistant response + user_history = entity.state.data.conversation_history[0].messages + assistant_history = entity.state.data.conversation_history[1].messages + + assert len(user_history) == 1 + + user_msg = user_history[0] + assert _role_value(user_msg) == "user" + assert user_msg.text == "User message" + + assistant_msg = assistant_history[0] + assert _role_value(assistant_msg) == "assistant" + assert assistant_msg.text == "Agent response" + + async def test_run_agent_increments_message_count(self) -> None: + """Test that run_agent increments the message count.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + assert len(entity.state.data.conversation_history) == 0 + + await entity.run({"message": "Message 1", "correlationId": "corr-entity-3a"}) + assert len(entity.state.data.conversation_history) == 2 + + await entity.run({"message": "Message 2", "correlationId": "corr-entity-3b"}) + assert len(entity.state.data.conversation_history) == 4 + + await entity.run({"message": "Message 3", "correlationId": "corr-entity-3c"}) + assert len(entity.state.data.conversation_history) == 6 + + async def test_run_requires_entity_thread_id(self) -> None: + """Test that AgentEntity.run rejects missing entity thread identifiers.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent, thread_id="") + + with pytest.raises(ValueError, match="thread_id"): + await entity.run({"message": "Message", "correlationId": "corr-entity-5"}) + + async def test_run_agent_multiple_conversations(self) -> None: + """Test that run_agent maintains history across multiple messages.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + # Send multiple messages + await entity.run({"message": "Message 1", "correlationId": "corr-entity-8a"}) + await entity.run({"message": "Message 2", "correlationId": "corr-entity-8b"}) + await entity.run({"message": "Message 3", "correlationId": "corr-entity-8c"}) + + history = entity.state.data.conversation_history + assert len(history) == 6 + assert entity.state.message_count == 6 + + +class TestAgentEntityReset: + """Test suite for the reset operation.""" + + def test_reset_clears_conversation_history(self) -> None: + """Test that reset clears the conversation history.""" + mock_agent = Mock() + entity = _make_entity(mock_agent) + + # Add some history with proper DurableAgentStateEntry objects + entity.state.data.conversation_history = [ + DurableAgentStateRequest( + correlation_id="test-1", + created_at=datetime.now(), + messages=[ + DurableAgentStateMessage( + role="user", + contents=[DurableAgentStateTextContent(text="msg1")], + ) + ], + ), + ] + + entity.reset() + + assert entity.state.data.conversation_history == [] + + def test_reset_with_extension_data(self) -> None: + """Test that reset works when entity has extension data.""" + mock_agent = Mock() + entity = _make_entity(mock_agent) + + # Set up some initial state with conversation history + entity.state.data = DurableAgentStateData(conversation_history=[], extension_data={"some_key": "some_value"}) + + entity.reset() + + assert len(entity.state.data.conversation_history) == 0 + + def test_reset_clears_message_count(self) -> None: + """Test that reset clears the message count.""" + mock_agent = Mock() + entity = _make_entity(mock_agent) + + entity.reset() + + assert len(entity.state.data.conversation_history) == 0 + + async def test_reset_after_conversation(self) -> None: + """Test reset after a full conversation.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + # Have a conversation + await entity.run({"message": "Message 1", "correlationId": "corr-entity-10a"}) + await entity.run({"message": "Message 2", "correlationId": "corr-entity-10b"}) + + # Verify state before reset + assert entity.state.message_count == 4 + assert len(entity.state.data.conversation_history) == 4 + + # Reset + entity.reset() + + # Verify state after reset + assert entity.state.message_count == 0 + assert len(entity.state.data.conversation_history) == 0 + + +class TestErrorHandling: + """Test suite for error handling in entities.""" + + async def test_run_agent_handles_agent_exception(self) -> None: + """Test that run_agent handles agent exceptions.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(side_effect=Exception("Agent failed")) + + entity = _make_entity(mock_agent) + + result = await entity.run({"message": "Message", "correlationId": "corr-entity-error-1"}) + + assert isinstance(result, AgentResponse) + assert len(result.messages) == 1 + content = result.messages[0].contents[0] + assert isinstance(content, Content) + assert "Agent failed" in (content.message or "") + assert content.error_code == "Exception" + + async def test_run_agent_handles_value_error(self) -> None: + """Test that run_agent handles ValueError instances.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(side_effect=ValueError("Invalid input")) + + entity = _make_entity(mock_agent) + + result = await entity.run({"message": "Message", "correlationId": "corr-entity-error-2"}) + + assert isinstance(result, AgentResponse) + assert len(result.messages) == 1 + content = result.messages[0].contents[0] + assert isinstance(content, Content) + assert content.error_code == "ValueError" + assert "Invalid input" in str(content.message) + + async def test_run_agent_handles_timeout_error(self) -> None: + """Test that run_agent handles TimeoutError instances.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(side_effect=TimeoutError("Request timeout")) + + entity = _make_entity(mock_agent) + + result = await entity.run({"message": "Message", "correlationId": "corr-entity-error-3"}) + + assert isinstance(result, AgentResponse) + assert len(result.messages) == 1 + content = result.messages[0].contents[0] + assert isinstance(content, Content) + assert content.error_code == "TimeoutError" + + async def test_run_agent_preserves_message_on_error(self) -> None: + """Test that run_agent preserves message information on error.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(side_effect=Exception("Error")) + + entity = _make_entity(mock_agent) + + result = await entity.run( + {"message": "Test message", "correlationId": "corr-entity-error-4"}, + ) + + # Even on error, message info should be preserved + assert isinstance(result, AgentResponse) + assert len(result.messages) == 1 + content = result.messages[0].contents[0] + assert isinstance(content, Content) + + +class TestConversationHistory: + """Test suite for conversation history tracking.""" + + async def test_conversation_history_has_timestamps(self) -> None: + """Test that conversation history entries include timestamps.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + await entity.run({"message": "Message", "correlationId": "corr-entity-history-1"}) + + # Check both user and assistant messages have timestamps + for entry in entity.state.data.conversation_history: + timestamp = entry.created_at + assert timestamp is not None + # Verify timestamp is in ISO format + datetime.fromisoformat(str(timestamp)) + + async def test_conversation_history_ordering(self) -> None: + """Test that conversation history maintains the correct order.""" + mock_agent = Mock() + + entity = _make_entity(mock_agent) + + # Send multiple messages with different responses + mock_agent.run = AsyncMock(return_value=_agent_response("Response 1")) + await entity.run( + {"message": "Message 1", "correlationId": "corr-entity-history-2a"}, + ) + + mock_agent.run = AsyncMock(return_value=_agent_response("Response 2")) + await entity.run( + {"message": "Message 2", "correlationId": "corr-entity-history-2b"}, + ) + + mock_agent.run = AsyncMock(return_value=_agent_response("Response 3")) + await entity.run( + {"message": "Message 3", "correlationId": "corr-entity-history-2c"}, + ) + + # Verify order + history = entity.state.data.conversation_history + # Each conversation turn creates 2 entries: request and response + assert history[0].messages[0].text == "Message 1" # Request 1 + assert history[1].messages[0].text == "Response 1" # Response 1 + assert history[2].messages[0].text == "Message 2" # Request 2 + assert history[3].messages[0].text == "Response 2" # Response 2 + assert history[4].messages[0].text == "Message 3" # Request 3 + assert history[5].messages[0].text == "Response 3" # Response 3 + + async def test_conversation_history_role_alternation(self) -> None: + """Test that conversation history alternates between user and assistant roles.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + await entity.run( + {"message": "Message 1", "correlationId": "corr-entity-history-3a"}, + ) + await entity.run( + {"message": "Message 2", "correlationId": "corr-entity-history-3b"}, + ) + + # Check role alternation + history = entity.state.data.conversation_history + # Each conversation turn creates 2 entries: request and response + assert history[0].messages[0].role == "user" # Request 1 + assert history[1].messages[0].role == "assistant" # Response 1 + assert history[2].messages[0].role == "user" # Request 2 + assert history[3].messages[0].role == "assistant" # Response 2 + + +class TestRunRequestSupport: + """Test suite for RunRequest support in entities.""" + + async def test_run_agent_with_run_request_object(self) -> None: + """Test run_agent with a RunRequest object.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + request = RunRequest( + message="Test message", + role=Role.USER, + enable_tool_calls=True, + correlation_id="corr-runreq-1", + ) + + result = await entity.run(request) + + assert isinstance(result, AgentResponse) + assert result.text == "Response" + + async def test_run_agent_with_dict_request(self) -> None: + """Test run_agent with a dictionary request.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + request_dict = { + "message": "Test message", + "role": "system", + "enable_tool_calls": False, + "correlationId": "corr-runreq-2", + } + + result = await entity.run(request_dict) + + assert isinstance(result, AgentResponse) + assert result.text == "Response" + + async def test_run_agent_with_string_raises_without_correlation(self) -> None: + """Test that run_agent rejects legacy string input without correlation ID.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + with pytest.raises(ValueError): + await entity.run("Simple message") + + async def test_run_agent_stores_role_in_history(self) -> None: + """Test that run_agent stores the role in conversation history.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + # Send as system role + request = RunRequest( + message="System message", + role=Role.SYSTEM, + correlation_id="corr-runreq-3", + ) + + await entity.run(request) + + # Check that system role was stored + history = entity.state.data.conversation_history + assert history[0].messages[0].role == "system" + assert history[0].messages[0].text == "System message" + + async def test_run_agent_with_response_format(self) -> None: + """Test run_agent with a JSON response format.""" + mock_agent = Mock() + # Return JSON response + mock_agent.run = AsyncMock(return_value=_agent_response('{"answer": 42}')) + + entity = _make_entity(mock_agent) + + request = RunRequest( + message="What is the answer?", + response_format=EntityStructuredResponse, + correlation_id="corr-runreq-4", + ) + + result = await entity.run(request) + + assert isinstance(result, AgentResponse) + assert result.text == '{"answer": 42}' + assert result.value is None + + async def test_run_agent_disable_tool_calls(self) -> None: + """Test run_agent with tool calls disabled.""" + mock_agent = Mock() + mock_agent.run = AsyncMock(return_value=_agent_response("Response")) + + entity = _make_entity(mock_agent) + + request = RunRequest(message="Test", enable_tool_calls=False, correlation_id="corr-runreq-5") + + result = await entity.run(request) + + assert isinstance(result, AgentResponse) + # Agent should have been called (tool disabling is framework-dependent) + mock_agent.run.assert_called_once() + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_executors.py b/python/packages/durabletask/tests/test_executors.py new file mode 100644 index 0000000000..46fe8bbdbc --- /dev/null +++ b/python/packages/durabletask/tests/test_executors.py @@ -0,0 +1,571 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for DurableAgentExecutor implementations. + +Focuses on critical behavioral flows for executor strategies. +Run with: pytest tests/test_executors.py -v +""" + +import time +from typing import Any +from unittest.mock import Mock + +import pytest +from agent_framework import AgentResponse, Role +from durabletask.entities import EntityInstanceId +from durabletask.task import Task +from pydantic import BaseModel + +from agent_framework_durabletask import DurableAgentThread +from agent_framework_durabletask._constants import DEFAULT_MAX_POLL_RETRIES, DEFAULT_POLL_INTERVAL_SECONDS +from agent_framework_durabletask._executors import ( + ClientAgentExecutor, + DurableAgentTask, + OrchestrationAgentExecutor, +) +from agent_framework_durabletask._models import AgentSessionId, RunRequest + + +# Fixtures +@pytest.fixture +def mock_client() -> Mock: + """Provide a mock client for ClientAgentExecutor tests.""" + client = Mock() + client.signal_entity = Mock() + client.get_entity = Mock(return_value=None) + return client + + +@pytest.fixture +def mock_entity_task() -> Mock: + """Provide a mock entity task.""" + task = Mock(spec=Task) + task.is_complete = False + task.is_failed = False + return task + + +@pytest.fixture +def mock_orchestration_context(mock_entity_task: Mock) -> Mock: + """Provide a mock orchestration context with call_entity configured.""" + context = Mock() + context.call_entity = Mock(return_value=mock_entity_task) + return context + + +@pytest.fixture +def sample_run_request() -> RunRequest: + """Provide a sample RunRequest for tests.""" + return RunRequest(message="test message", correlation_id="test-123") + + +@pytest.fixture +def client_executor(mock_client: Mock) -> ClientAgentExecutor: + """Provide a ClientAgentExecutor with minimal polling for fast tests.""" + return ClientAgentExecutor(mock_client, max_poll_retries=1, poll_interval_seconds=0.01) + + +@pytest.fixture +def orchestration_executor(mock_orchestration_context: Mock) -> OrchestrationAgentExecutor: + """Provide an OrchestrationAgentExecutor.""" + return OrchestrationAgentExecutor(mock_orchestration_context) + + +@pytest.fixture +def successful_agent_response() -> dict[str, Any]: + """Provide a successful agent response dictionary.""" + return { + "messages": [{"role": "assistant", "contents": [{"type": "text", "text": "Hello!"}]}], + "created_at": "2025-12-30T10:00:00Z", + } + + +@pytest.fixture +def configure_successful_entity_task(mock_entity_task: Mock) -> Any: + """Provide a helper to configure mock_entity_task with a successful response.""" + + def _configure(response: dict[str, Any]) -> Mock: + mock_entity_task.is_failed = False + mock_entity_task.is_complete = False + mock_entity_task.get_result = Mock(return_value=response) + return mock_entity_task + + return _configure + + +@pytest.fixture +def configure_failed_entity_task(mock_entity_task: Mock) -> Any: + """Provide a helper to configure mock_entity_task with a failure.""" + + def _configure(exception: Exception) -> Mock: + mock_entity_task.is_failed = True + mock_entity_task.is_complete = True + mock_entity_task.get_exception = Mock(return_value=exception) + return mock_entity_task + + return _configure + + +class TestExecutorThreadCreation: + """Test that executors properly create DurableAgentThread with parameters.""" + + def test_client_executor_creates_durable_thread(self, mock_client: Mock) -> None: + """Verify ClientAgentExecutor creates DurableAgentThread instances.""" + executor = ClientAgentExecutor(mock_client) + + thread = executor.get_new_thread("test_agent") + + assert isinstance(thread, DurableAgentThread) + + def test_client_executor_forwards_kwargs_to_thread(self, mock_client: Mock) -> None: + """Verify ClientAgentExecutor forwards kwargs to DurableAgentThread creation.""" + executor = ClientAgentExecutor(mock_client) + + thread = executor.get_new_thread("test_agent", service_thread_id="client-123") + + assert isinstance(thread, DurableAgentThread) + assert thread.service_thread_id == "client-123" + + def test_orchestration_executor_creates_durable_thread( + self, orchestration_executor: OrchestrationAgentExecutor + ) -> None: + """Verify OrchestrationAgentExecutor creates DurableAgentThread instances.""" + thread = orchestration_executor.get_new_thread("test_agent") + + assert isinstance(thread, DurableAgentThread) + + def test_orchestration_executor_forwards_kwargs_to_thread( + self, orchestration_executor: OrchestrationAgentExecutor + ) -> None: + """Verify OrchestrationAgentExecutor forwards kwargs to DurableAgentThread creation.""" + thread = orchestration_executor.get_new_thread("test_agent", service_thread_id="orch-456") + + assert isinstance(thread, DurableAgentThread) + assert thread.service_thread_id == "orch-456" + + +class TestClientAgentExecutorRun: + """Test that ClientAgentExecutor.run_durable_agent works as implemented.""" + + def test_client_executor_run_returns_response( + self, client_executor: ClientAgentExecutor, sample_run_request: RunRequest + ) -> None: + """Verify ClientAgentExecutor.run_durable_agent returns AgentResponse (synchronous).""" + result = client_executor.run_durable_agent("test_agent", sample_run_request) + + # Verify it returns an AgentResponse (synchronous, not a coroutine) + assert isinstance(result, AgentResponse) + assert result is not None + + +class TestClientAgentExecutorPollingConfiguration: + """Test polling configuration parameters for ClientAgentExecutor.""" + + def test_executor_uses_default_polling_parameters(self, mock_client: Mock) -> None: + """Verify executor initializes with default polling parameters.""" + executor = ClientAgentExecutor(mock_client) + + assert executor.max_poll_retries == DEFAULT_MAX_POLL_RETRIES + assert executor.poll_interval_seconds == DEFAULT_POLL_INTERVAL_SECONDS + + def test_executor_accepts_custom_polling_parameters(self, mock_client: Mock) -> None: + """Verify executor accepts and stores custom polling parameters.""" + executor = ClientAgentExecutor(mock_client, max_poll_retries=20, poll_interval_seconds=0.5) + + assert executor.max_poll_retries == 20 + assert executor.poll_interval_seconds == 0.5 + + def test_executor_respects_custom_max_poll_retries(self, mock_client: Mock, sample_run_request: RunRequest) -> None: + """Verify executor respects custom max_poll_retries during polling.""" + # Create executor with only 2 retries + executor = ClientAgentExecutor(mock_client, max_poll_retries=2, poll_interval_seconds=0.01) + + # Run the agent + result = executor.run_durable_agent("test_agent", sample_run_request) + + # Verify it returns AgentResponse (should timeout after 2 attempts) + assert isinstance(result, AgentResponse) + + # Verify get_entity was called 2 times (max_poll_retries) + assert mock_client.get_entity.call_count == 2 + + def test_executor_respects_custom_poll_interval(self, mock_client: Mock, sample_run_request: RunRequest) -> None: + """Verify executor respects custom poll_interval_seconds during polling.""" + # Create executor with very short interval + executor = ClientAgentExecutor(mock_client, max_poll_retries=3, poll_interval_seconds=0.01) + + # Measure time taken + start = time.time() + result = executor.run_durable_agent("test_agent", sample_run_request) + elapsed = time.time() - start + + # Should take roughly 3 * 0.01 = 0.03 seconds (plus overhead) + # Be generous with timing to avoid flakiness + assert elapsed < 0.2 # Should be quick with 0.01 interval + assert isinstance(result, AgentResponse) + + +class TestClientAgentExecutorFireAndForget: + """Test fire-and-forget mode (wait_for_response=False) for ClientAgentExecutor.""" + + def test_fire_and_forget_returns_immediately(self, mock_client: Mock) -> None: + """Verify wait_for_response=False returns immediately without polling.""" + executor = ClientAgentExecutor(mock_client, max_poll_retries=10, poll_interval_seconds=0.1) + + # Create a request with wait_for_response=False + request = RunRequest(message="test message", correlation_id="test-123", wait_for_response=False) + + # Measure time taken + start = time.time() + result = executor.run_durable_agent("test_agent", request) + elapsed = time.time() - start + + # Should return immediately without polling (elapsed time should be very small) + assert elapsed < 0.1 # Much faster than any polling would take + + # Should return an AgentResponse + assert isinstance(result, AgentResponse) + + # Should have signaled the entity but not polled + assert mock_client.signal_entity.call_count == 1 + assert mock_client.get_entity.call_count == 0 # No polling occurred + + def test_fire_and_forget_returns_empty_response(self, mock_client: Mock) -> None: + """Verify wait_for_response=False returns an acceptance message with correlation ID.""" + executor = ClientAgentExecutor(mock_client) + + request = RunRequest(message="test message", correlation_id="test-456", wait_for_response=False) + + result = executor.run_durable_agent("test_agent", request) + + # Verify it contains an acceptance message + assert isinstance(result, AgentResponse) + assert len(result.messages) == 1 + assert result.messages[0].role == Role.SYSTEM + # Check message contains key information + message_text = result.messages[0].text + assert "accepted" in message_text.lower() + assert "test-456" in message_text # Contains correlation ID + assert "background" in message_text.lower() + + +class TestOrchestrationAgentExecutorFireAndForget: + """Test fire-and-forget mode for OrchestrationAgentExecutor.""" + + def test_orchestration_fire_and_forget_calls_signal_entity(self, mock_orchestration_context: Mock) -> None: + """Verify wait_for_response=False calls signal_entity instead of call_entity.""" + executor = OrchestrationAgentExecutor(mock_orchestration_context) + mock_orchestration_context.signal_entity = Mock() + + request = RunRequest(message="test", correlation_id="test-123", wait_for_response=False) + + result = executor.run_durable_agent("test_agent", request) + + # Verify signal_entity was called and call_entity was not + assert mock_orchestration_context.signal_entity.call_count == 1 + assert mock_orchestration_context.call_entity.call_count == 0 + + # Should still return a DurableAgentTask + assert isinstance(result, DurableAgentTask) + + def test_orchestration_fire_and_forget_returns_completed_task(self, mock_orchestration_context: Mock) -> None: + """Verify wait_for_response=False returns pre-completed DurableAgentTask.""" + executor = OrchestrationAgentExecutor(mock_orchestration_context) + mock_orchestration_context.signal_entity = Mock() + + request = RunRequest(message="test", correlation_id="test-456", wait_for_response=False) + + result = executor.run_durable_agent("test_agent", request) + + # Task should be immediately complete + assert isinstance(result, DurableAgentTask) + assert result.is_complete + + def test_orchestration_fire_and_forget_returns_acceptance_response(self, mock_orchestration_context: Mock) -> None: + """Verify wait_for_response=False returns acceptance response.""" + executor = OrchestrationAgentExecutor(mock_orchestration_context) + mock_orchestration_context.signal_entity = Mock() + + request = RunRequest(message="test", correlation_id="test-789", wait_for_response=False) + + result = executor.run_durable_agent("test_agent", request) + + # Get the result + response = result.get_result() + assert isinstance(response, AgentResponse) + assert len(response.messages) == 1 + assert response.messages[0].role == Role.SYSTEM + assert "test-789" in response.messages[0].text + + def test_orchestration_blocking_mode_calls_call_entity(self, mock_orchestration_context: Mock) -> None: + """Verify wait_for_response=True uses call_entity as before.""" + executor = OrchestrationAgentExecutor(mock_orchestration_context) + mock_orchestration_context.signal_entity = Mock() + + request = RunRequest(message="test", correlation_id="test-abc", wait_for_response=True) + + result = executor.run_durable_agent("test_agent", request) + + # Verify call_entity was called and signal_entity was not + assert mock_orchestration_context.call_entity.call_count == 1 + assert mock_orchestration_context.signal_entity.call_count == 0 + + # Should return a DurableAgentTask + assert isinstance(result, DurableAgentTask) + + +class TestOrchestrationAgentExecutorRun: + """Test OrchestrationAgentExecutor.run_durable_agent implementation.""" + + def test_orchestration_executor_run_returns_durable_agent_task( + self, orchestration_executor: OrchestrationAgentExecutor, sample_run_request: RunRequest + ) -> None: + """Verify OrchestrationAgentExecutor.run_durable_agent returns DurableAgentTask.""" + result = orchestration_executor.run_durable_agent("test_agent", sample_run_request) + + assert isinstance(result, DurableAgentTask) + + def test_orchestration_executor_calls_entity_with_correct_parameters( + self, + mock_orchestration_context: Mock, + orchestration_executor: OrchestrationAgentExecutor, + sample_run_request: RunRequest, + ) -> None: + """Verify call_entity is invoked with correct entity ID and request.""" + orchestration_executor.run_durable_agent("test_agent", sample_run_request) + + # Verify call_entity was called once + assert mock_orchestration_context.call_entity.call_count == 1 + + # Get the call arguments + call_args = mock_orchestration_context.call_entity.call_args + entity_id_arg = call_args[0][0] + operation_arg = call_args[0][1] + request_dict_arg = call_args[0][2] + + # Verify entity ID + assert isinstance(entity_id_arg, EntityInstanceId) + assert entity_id_arg.entity == "dafx-test_agent" + + # Verify operation name + assert operation_arg == "run" + + # Verify request dict + assert request_dict_arg == sample_run_request.to_dict() + + def test_orchestration_executor_uses_thread_session_id( + self, + mock_orchestration_context: Mock, + orchestration_executor: OrchestrationAgentExecutor, + sample_run_request: RunRequest, + ) -> None: + """Verify executor uses thread's session ID when provided.""" + # Create thread with specific session ID + session_id = AgentSessionId(name="test_agent", key="specific-key-123") + thread = DurableAgentThread.from_session_id(session_id) + + result = orchestration_executor.run_durable_agent("test_agent", sample_run_request, thread=thread) + + # Verify call_entity was called with the specific key + call_args = mock_orchestration_context.call_entity.call_args + entity_id_arg = call_args[0][0] + + assert entity_id_arg.key == "specific-key-123" + assert isinstance(result, DurableAgentTask) + + +class TestDurableAgentTask: + """Test DurableAgentTask completion and response transformation.""" + + def test_durable_agent_task_transforms_successful_result( + self, configure_successful_entity_task: Any, successful_agent_response: dict[str, Any] + ) -> None: + """Verify DurableAgentTask converts successful entity result to AgentResponse.""" + mock_entity_task = configure_successful_entity_task(successful_agent_response) + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=None, correlation_id="test-123") + + # Simulate child task completion + task.on_child_completed(mock_entity_task) + + assert task.is_complete + result = task.get_result() + assert isinstance(result, AgentResponse) + assert len(result.messages) == 1 + assert result.messages[0].role == Role.ASSISTANT + + def test_durable_agent_task_propagates_failure(self, configure_failed_entity_task: Any) -> None: + """Verify DurableAgentTask propagates task failures.""" + mock_entity_task = configure_failed_entity_task(ValueError("Entity error")) + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=None, correlation_id="test-123") + + # Simulate child task completion with failure + task.on_child_completed(mock_entity_task) + + assert task.is_complete + assert task.is_failed + # The exception is wrapped in TaskFailedError by the durabletask library + exception = task.get_exception() + assert exception is not None + + def test_durable_agent_task_validates_response_format(self, configure_successful_entity_task: Any) -> None: + """Verify DurableAgentTask validates response format when provided.""" + response = { + "messages": [{"role": "assistant", "contents": [{"type": "text", "text": '{"answer": "42"}'}]}], + "created_at": "2025-12-30T10:00:00Z", + } + mock_entity_task = configure_successful_entity_task(response) + + class TestResponse(BaseModel): + answer: str + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=TestResponse, correlation_id="test-123") + + # Simulate child task completion + task.on_child_completed(mock_entity_task) + + assert task.is_complete + result = task.get_result() + assert isinstance(result, AgentResponse) + + def test_durable_agent_task_ignores_duplicate_completion( + self, configure_successful_entity_task: Any, successful_agent_response: dict[str, Any] + ) -> None: + """Verify DurableAgentTask ignores duplicate completion calls.""" + mock_entity_task = configure_successful_entity_task(successful_agent_response) + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=None, correlation_id="test-123") + + # Simulate child task completion twice + task.on_child_completed(mock_entity_task) + first_result = task.get_result() + + task.on_child_completed(mock_entity_task) + second_result = task.get_result() + + # Should be the same result, get_result should only be called once + assert first_result is second_result + assert mock_entity_task.get_result.call_count == 1 + + def test_durable_agent_task_fails_on_malformed_response(self, configure_successful_entity_task: Any) -> None: + """Verify DurableAgentTask fails when entity returns malformed response data.""" + # Use data that will cause AgentResponse.from_dict to fail + # Using a list instead of dict, or other invalid structure + mock_entity_task = configure_successful_entity_task("invalid string response") + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=None, correlation_id="test-123") + + # Simulate child task completion with malformed data + task.on_child_completed(mock_entity_task) + + assert task.is_complete + assert task.is_failed + + def test_durable_agent_task_fails_on_invalid_response_format(self, configure_successful_entity_task: Any) -> None: + """Verify DurableAgentTask fails when response doesn't match required format.""" + response = { + "messages": [{"role": "assistant", "contents": [{"type": "text", "text": '{"wrong": "field"}'}]}], + "created_at": "2025-12-30T10:00:00Z", + } + mock_entity_task = configure_successful_entity_task(response) + + class StrictResponse(BaseModel): + required_field: str + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=StrictResponse, correlation_id="test-123") + + # Simulate child task completion with wrong format + task.on_child_completed(mock_entity_task) + + assert task.is_complete + assert task.is_failed + + def test_durable_agent_task_handles_empty_response(self, configure_successful_entity_task: Any) -> None: + """Verify DurableAgentTask handles response with empty messages list.""" + response: dict[str, str | list[Any]] = { + "messages": [], + "created_at": "2025-12-30T10:00:00Z", + } + mock_entity_task = configure_successful_entity_task(response) + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=None, correlation_id="test-123") + + # Simulate child task completion + task.on_child_completed(mock_entity_task) + + assert task.is_complete + result = task.get_result() + assert isinstance(result, AgentResponse) + assert len(result.messages) == 0 + + def test_durable_agent_task_handles_multiple_messages(self, configure_successful_entity_task: Any) -> None: + """Verify DurableAgentTask correctly processes response with multiple messages.""" + response = { + "messages": [ + {"role": "assistant", "contents": [{"type": "text", "text": "First message"}]}, + {"role": "assistant", "contents": [{"type": "text", "text": "Second message"}]}, + ], + "created_at": "2025-12-30T10:00:00Z", + } + mock_entity_task = configure_successful_entity_task(response) + + task = DurableAgentTask(entity_task=mock_entity_task, response_format=None, correlation_id="test-123") + + # Simulate child task completion + task.on_child_completed(mock_entity_task) + + assert task.is_complete + result = task.get_result() + assert isinstance(result, AgentResponse) + assert len(result.messages) == 2 + assert result.messages[0].role == Role.ASSISTANT + assert result.messages[1].role == Role.ASSISTANT + + def test_durable_agent_task_is_not_complete_initially(self, mock_entity_task: Mock) -> None: + """Verify DurableAgentTask is not complete when first created.""" + task = DurableAgentTask(entity_task=mock_entity_task, response_format=None, correlation_id="test-123") + + assert not task.is_complete + assert not task.is_failed + + def test_durable_agent_task_completes_with_complex_response_format( + self, configure_successful_entity_task: Any + ) -> None: + """Verify DurableAgentTask validates complex nested response formats correctly.""" + response = { + "messages": [ + { + "role": "assistant", + "contents": [ + { + "type": "text", + "text": '{"name": "test", "count": 42, "items": ["a", "b", "c"]}', + } + ], + } + ], + "created_at": "2025-12-30T10:00:00Z", + } + mock_entity_task = configure_successful_entity_task(response) + + class ComplexResponse(BaseModel): + name: str + count: int + items: list[str] + + task = DurableAgentTask( + entity_task=mock_entity_task, response_format=ComplexResponse, correlation_id="test-123" + ) + + # Simulate child task completion + task.on_child_completed(mock_entity_task) + + assert task.is_complete + assert not task.is_failed + result = task.get_result() + assert isinstance(result, AgentResponse) + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_models.py b/python/packages/durabletask/tests/test_models.py new file mode 100644 index 0000000000..0f6a24293d --- /dev/null +++ b/python/packages/durabletask/tests/test_models.py @@ -0,0 +1,310 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for data models (RunRequest).""" + +import pytest +from agent_framework import Role +from pydantic import BaseModel + +from agent_framework_durabletask._models import RunRequest + + +class ModuleStructuredResponse(BaseModel): + value: int + + +class TestRunRequest: + """Test suite for RunRequest.""" + + def test_init_with_defaults(self) -> None: + """Test RunRequest initialization with defaults.""" + request = RunRequest(message="Hello", correlation_id="corr-001") + + assert request.message == "Hello" + assert request.correlation_id == "corr-001" + assert request.role == Role.USER + assert request.response_format is None + assert request.enable_tool_calls is True + assert request.wait_for_response is True + + def test_init_with_all_fields(self) -> None: + """Test RunRequest initialization with all fields.""" + schema = ModuleStructuredResponse + request = RunRequest( + message="Hello", + correlation_id="corr-002", + role=Role.SYSTEM, + response_format=schema, + enable_tool_calls=False, + wait_for_response=False, + ) + + assert request.message == "Hello" + assert request.correlation_id == "corr-002" + assert request.role == Role.SYSTEM + assert request.response_format is schema + assert request.enable_tool_calls is False + assert request.wait_for_response is False + + def test_init_coerces_string_role(self) -> None: + """Ensure string role values are coerced into Role instances.""" + request = RunRequest(message="Hello", correlation_id="corr-003", role="system") # type: ignore[arg-type] + + assert request.role == Role.SYSTEM + + def test_to_dict_with_defaults(self) -> None: + """Test to_dict with default values.""" + request = RunRequest(message="Test message", correlation_id="corr-004") + data = request.to_dict() + + assert data["message"] == "Test message" + assert data["enable_tool_calls"] is True + assert data["wait_for_response"] is True + assert data["role"] == "user" + assert data["correlationId"] == "corr-004" + assert "response_format" not in data or data["response_format"] is None + assert "thread_id" not in data + + def test_to_dict_with_all_fields(self) -> None: + """Test to_dict with all fields.""" + schema = ModuleStructuredResponse + request = RunRequest( + message="Hello", + correlation_id="corr-005", + role=Role.ASSISTANT, + response_format=schema, + enable_tool_calls=False, + wait_for_response=False, + ) + data = request.to_dict() + + assert data["message"] == "Hello" + assert data["correlationId"] == "corr-005" + assert data["role"] == "assistant" + assert data["response_format"]["__response_schema_type__"] == "pydantic_model" + assert data["response_format"]["module"] == schema.__module__ + assert data["response_format"]["qualname"] == schema.__qualname__ + assert data["enable_tool_calls"] is False + assert data["wait_for_response"] is False + assert "thread_id" not in data + + def test_from_dict_with_defaults(self) -> None: + """Test from_dict with minimal data.""" + data = {"message": "Hello", "correlationId": "corr-006"} + request = RunRequest.from_dict(data) + + assert request.message == "Hello" + assert request.correlation_id == "corr-006" + assert request.role == Role.USER + assert request.enable_tool_calls is True + assert request.wait_for_response is True + + def test_from_dict_ignores_thread_id_field(self) -> None: + """Ensure legacy thread_id input does not break RunRequest parsing.""" + request = RunRequest.from_dict({"message": "Hello", "correlationId": "corr-007", "thread_id": "ignored"}) + + assert request.message == "Hello" + + def test_from_dict_with_all_fields(self) -> None: + """Test from_dict with all fields.""" + data = { + "message": "Test", + "correlationId": "corr-008", + "role": "system", + "response_format": { + "__response_schema_type__": "pydantic_model", + "module": ModuleStructuredResponse.__module__, + "qualname": ModuleStructuredResponse.__qualname__, + }, + "enable_tool_calls": False, + } + request = RunRequest.from_dict(data) + + assert request.message == "Test" + assert request.correlation_id == "corr-008" + assert request.role == Role.SYSTEM + assert request.response_format is ModuleStructuredResponse + assert request.enable_tool_calls is False + + def test_from_dict_unknown_role_preserves_value(self) -> None: + """Test from_dict keeps custom roles intact.""" + data = {"message": "Test", "correlationId": "corr-009", "role": "reviewer"} + request = RunRequest.from_dict(data) + + assert request.role.value == "reviewer" + assert request.role != Role.USER + + def test_from_dict_empty_message(self) -> None: + """Test from_dict with empty message.""" + request = RunRequest.from_dict({"correlationId": "corr-010"}) + + assert request.message == "" + assert request.correlation_id == "corr-010" + assert request.role == Role.USER + + def test_from_dict_missing_correlation_id_raises(self) -> None: + """Test from_dict raises when correlationId is missing.""" + with pytest.raises(ValueError, match="correlationId is required"): + RunRequest.from_dict({"message": "Test"}) + + def test_round_trip_dict_conversion(self) -> None: + """Test round-trip to_dict and from_dict.""" + original = RunRequest( + message="Test message", + correlation_id="corr-011", + role=Role.SYSTEM, + response_format=ModuleStructuredResponse, + enable_tool_calls=False, + ) + + data = original.to_dict() + restored = RunRequest.from_dict(data) + + assert restored.message == original.message + assert restored.correlation_id == original.correlation_id + assert restored.role == original.role + assert restored.response_format is ModuleStructuredResponse + assert restored.enable_tool_calls == original.enable_tool_calls + + def test_round_trip_with_pydantic_response_format(self) -> None: + """Ensure Pydantic response formats serialize and deserialize properly.""" + original = RunRequest( + message="Structured", + correlation_id="corr-012", + response_format=ModuleStructuredResponse, + ) + + data = original.to_dict() + + assert data["response_format"]["__response_schema_type__"] == "pydantic_model" + assert data["response_format"]["module"] == ModuleStructuredResponse.__module__ + assert data["response_format"]["qualname"] == ModuleStructuredResponse.__qualname__ + + restored = RunRequest.from_dict(data) + assert restored.response_format is ModuleStructuredResponse + + def test_round_trip_with_options(self) -> None: + """Ensure options are preserved and response_format is deserialized.""" + original = RunRequest( + message="Test", + correlation_id="corr-opts-1", + response_format=ModuleStructuredResponse, + enable_tool_calls=False, + options={ + "response_format": ModuleStructuredResponse, + "enable_tool_calls": False, + "custom": "value", + }, + ) + + data = original.to_dict() + assert data["options"]["custom"] == "value" + + restored = RunRequest.from_dict(data) + assert restored.options is not None + assert restored.options["custom"] == "value" + assert restored.options["response_format"] is ModuleStructuredResponse + + def test_init_with_correlationId(self) -> None: + """Test RunRequest initialization with correlationId.""" + request = RunRequest(message="Test message", correlation_id="corr-123") + + assert request.message == "Test message" + assert request.correlation_id == "corr-123" + + def test_to_dict_with_correlationId(self) -> None: + """Test to_dict includes correlationId.""" + request = RunRequest(message="Test", correlation_id="corr-456") + data = request.to_dict() + + assert data["message"] == "Test" + assert data["correlationId"] == "corr-456" + + def test_from_dict_with_correlationId(self) -> None: + """Test from_dict with correlationId.""" + data = {"message": "Test", "correlationId": "corr-789"} + request = RunRequest.from_dict(data) + + assert request.message == "Test" + assert request.correlation_id == "corr-789" + + def test_round_trip_with_correlationId(self) -> None: + """Test round-trip to_dict and from_dict with correlationId.""" + original = RunRequest( + message="Test message", + role=Role.SYSTEM, + correlation_id="corr-124", + ) + + data = original.to_dict() + restored = RunRequest.from_dict(data) + + assert restored.message == original.message + assert restored.role == original.role + assert restored.correlation_id == original.correlation_id + + def test_init_with_orchestration_id(self) -> None: + """Test RunRequest initialization with orchestration_id.""" + request = RunRequest( + message="Test message", + correlation_id="corr-125", + orchestration_id="orch-123", + ) + + assert request.message == "Test message" + assert request.orchestration_id == "orch-123" + + def test_to_dict_with_orchestration_id(self) -> None: + """Test to_dict includes orchestrationId.""" + request = RunRequest( + message="Test", + correlation_id="corr-126", + orchestration_id="orch-456", + ) + data = request.to_dict() + + assert data["message"] == "Test" + assert data["orchestrationId"] == "orch-456" + + def test_to_dict_excludes_orchestration_id_when_none(self) -> None: + """Test to_dict excludes orchestrationId when not set.""" + request = RunRequest( + message="Test", + correlation_id="corr-127", + ) + data = request.to_dict() + + assert "orchestrationId" not in data + + def test_from_dict_with_orchestration_id(self) -> None: + """Test from_dict with orchestrationId.""" + data = { + "message": "Test", + "correlationId": "corr-128", + "orchestrationId": "orch-789", + } + request = RunRequest.from_dict(data) + + assert request.message == "Test" + assert request.orchestration_id == "orch-789" + + def test_round_trip_with_orchestration_id(self) -> None: + """Test round-trip to_dict and from_dict with orchestration_id.""" + original = RunRequest( + message="Test message", + role=Role.SYSTEM, + correlation_id="corr-129", + orchestration_id="orch-123", + ) + + data = original.to_dict() + restored = RunRequest.from_dict(data) + + assert restored.message == original.message + assert restored.role == original.role + assert restored.correlation_id == original.correlation_id + assert restored.orchestration_id == original.orchestration_id + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_orchestration_context.py b/python/packages/durabletask/tests/test_orchestration_context.py new file mode 100644 index 0000000000..f6a7755335 --- /dev/null +++ b/python/packages/durabletask/tests/test_orchestration_context.py @@ -0,0 +1,98 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for DurableAIAgentOrchestrationContext. + +Focuses on critical orchestration workflows: agent retrieval and integration. +Run with: pytest tests/test_orchestration_context.py -v +""" + +from unittest.mock import Mock + +import pytest +from agent_framework import AgentProtocol + +from agent_framework_durabletask import DurableAgentThread +from agent_framework_durabletask._orchestration_context import DurableAIAgentOrchestrationContext +from agent_framework_durabletask._shim import DurableAIAgent + + +@pytest.fixture +def mock_orchestration_context() -> Mock: + """Create a mock OrchestrationContext for testing.""" + return Mock() + + +@pytest.fixture +def agent_context(mock_orchestration_context: Mock) -> DurableAIAgentOrchestrationContext: + """Create a DurableAIAgentOrchestrationContext with mock context.""" + return DurableAIAgentOrchestrationContext(mock_orchestration_context) + + +class TestDurableAIAgentOrchestrationContextGetAgent: + """Test core workflow: retrieving agents from orchestration context.""" + + def test_get_agent_returns_durable_agent_shim(self, agent_context: DurableAIAgentOrchestrationContext) -> None: + """Verify get_agent returns a DurableAIAgent instance.""" + agent = agent_context.get_agent("assistant") + + assert isinstance(agent, DurableAIAgent) + assert isinstance(agent, AgentProtocol) + + def test_get_agent_shim_has_correct_name(self, agent_context: DurableAIAgentOrchestrationContext) -> None: + """Verify retrieved agent has the correct name.""" + agent = agent_context.get_agent("my_agent") + + assert agent.name == "my_agent" + + def test_get_agent_multiple_times_returns_new_instances( + self, agent_context: DurableAIAgentOrchestrationContext + ) -> None: + """Verify multiple get_agent calls return independent instances.""" + agent1 = agent_context.get_agent("assistant") + agent2 = agent_context.get_agent("assistant") + + assert agent1 is not agent2 # Different object instances + + def test_get_agent_different_agents(self, agent_context: DurableAIAgentOrchestrationContext) -> None: + """Verify context can retrieve multiple different agents.""" + agent1 = agent_context.get_agent("agent1") + agent2 = agent_context.get_agent("agent2") + + assert agent1.name == "agent1" + assert agent2.name == "agent2" + + +class TestDurableAIAgentOrchestrationContextIntegration: + """Test integration scenarios between orchestration context and agent shim.""" + + def test_orchestration_agent_has_working_run_method( + self, agent_context: DurableAIAgentOrchestrationContext + ) -> None: + """Verify agent from context has callable run method (even if not yet implemented).""" + agent = agent_context.get_agent("assistant") + + assert hasattr(agent, "run") + assert callable(agent.run) + + def test_orchestration_agent_can_create_threads(self, agent_context: DurableAIAgentOrchestrationContext) -> None: + """Verify agent from context can create DurableAgentThread instances.""" + agent = agent_context.get_agent("assistant") + + thread = agent.get_new_thread() + + assert isinstance(thread, DurableAgentThread) + + def test_orchestration_agent_thread_with_parameters( + self, agent_context: DurableAIAgentOrchestrationContext + ) -> None: + """Verify agent can create threads with custom parameters.""" + agent = agent_context.get_agent("assistant") + + thread = agent.get_new_thread(service_thread_id="orch-session-456") + + assert isinstance(thread, DurableAgentThread) + assert thread.service_thread_id == "orch-session-456" + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_shim.py b/python/packages/durabletask/tests/test_shim.py new file mode 100644 index 0000000000..26988edca4 --- /dev/null +++ b/python/packages/durabletask/tests/test_shim.py @@ -0,0 +1,213 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for DurableAIAgent shim and DurableAgentProvider. + +Focuses on critical message normalization, delegation, and protocol compliance. +Run with: pytest tests/test_shim.py -v +""" + +from typing import Any +from unittest.mock import Mock + +import pytest +from agent_framework import AgentProtocol, ChatMessage +from pydantic import BaseModel + +from agent_framework_durabletask import DurableAgentThread +from agent_framework_durabletask._executors import DurableAgentExecutor +from agent_framework_durabletask._models import RunRequest +from agent_framework_durabletask._shim import DurableAgentProvider, DurableAIAgent + + +class ResponseFormatModel(BaseModel): + """Test Pydantic model for response format testing.""" + + result: str + + +@pytest.fixture +def mock_executor() -> Mock: + """Create a mock executor for testing.""" + mock = Mock(spec=DurableAgentExecutor) + mock.run_durable_agent = Mock(return_value=None) + mock.get_new_thread = Mock(return_value=DurableAgentThread()) + + # Mock get_run_request to create actual RunRequest objects + def create_run_request( + message: str, + options: dict[str, Any] | None = None, + ) -> RunRequest: + import uuid + + opts = dict(options) if options else {} + response_format = opts.pop("response_format", None) + enable_tool_calls = opts.pop("enable_tool_calls", True) + wait_for_response = opts.pop("wait_for_response", True) + return RunRequest( + message=message, + correlation_id=str(uuid.uuid4()), + response_format=response_format, + enable_tool_calls=enable_tool_calls, + wait_for_response=wait_for_response, + options=opts, + ) + + mock.get_run_request = Mock(side_effect=create_run_request) + return mock + + +@pytest.fixture +def test_agent(mock_executor: Mock) -> DurableAIAgent[Any]: + """Create a test agent with mock executor.""" + return DurableAIAgent(mock_executor, "test_agent") + + +class TestDurableAIAgentMessageNormalization: + """Test that DurableAIAgent properly normalizes various message input types.""" + + def test_run_accepts_string_message(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run accepts and normalizes string messages.""" + test_agent.run("Hello, world!") + + mock_executor.run_durable_agent.assert_called_once() + # Verify agent_name and run_request were passed correctly as kwargs + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["agent_name"] == "test_agent" + assert kwargs["run_request"].message == "Hello, world!" + + def test_run_accepts_chat_message(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run accepts and normalizes ChatMessage objects.""" + chat_msg = ChatMessage(role="user", text="Test message") + test_agent.run(chat_msg) + + mock_executor.run_durable_agent.assert_called_once() + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["run_request"].message == "Test message" + + def test_run_accepts_list_of_strings(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run accepts and joins list of strings.""" + test_agent.run(["First message", "Second message"]) + + mock_executor.run_durable_agent.assert_called_once() + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["run_request"].message == "First message\nSecond message" + + def test_run_accepts_list_of_chat_messages(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run accepts and joins list of ChatMessage objects.""" + messages = [ + ChatMessage(role="user", text="Message 1"), + ChatMessage(role="assistant", text="Message 2"), + ] + test_agent.run(messages) + + mock_executor.run_durable_agent.assert_called_once() + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["run_request"].message == "Message 1\nMessage 2" + + def test_run_handles_none_message(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run handles None message gracefully.""" + test_agent.run(None) + + mock_executor.run_durable_agent.assert_called_once() + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["run_request"].message == "" + + def test_run_handles_empty_list(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run handles empty list gracefully.""" + test_agent.run([]) + + mock_executor.run_durable_agent.assert_called_once() + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["run_request"].message == "" + + +class TestDurableAIAgentParameterFlow: + """Test that parameters flow correctly through the shim to executor.""" + + def test_run_forwards_thread_parameter(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run forwards thread parameter to executor.""" + thread = DurableAgentThread(service_thread_id="test-thread") + test_agent.run("message", thread=thread) + + mock_executor.run_durable_agent.assert_called_once() + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["thread"] == thread + + def test_run_forwards_response_format(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify run forwards response_format parameter to executor.""" + test_agent.run("message", options={"response_format": ResponseFormatModel}) + + mock_executor.run_durable_agent.assert_called_once() + _, kwargs = mock_executor.run_durable_agent.call_args + assert kwargs["run_request"].response_format == ResponseFormatModel + + +class TestDurableAIAgentProtocolCompliance: + """Test that DurableAIAgent implements AgentProtocol correctly.""" + + def test_agent_implements_protocol(self, test_agent: DurableAIAgent[Any]) -> None: + """Verify DurableAIAgent implements AgentProtocol.""" + assert isinstance(test_agent, AgentProtocol) + + def test_agent_has_required_properties(self, test_agent: DurableAIAgent[Any]) -> None: + """Verify DurableAIAgent has all required AgentProtocol properties.""" + assert hasattr(test_agent, "id") + assert hasattr(test_agent, "name") + assert hasattr(test_agent, "display_name") + assert hasattr(test_agent, "description") + + def test_agent_id_defaults_to_name(self, mock_executor: Mock) -> None: + """Verify agent id defaults to name when not provided.""" + agent: DurableAIAgent[Any] = DurableAIAgent(mock_executor, "my_agent") + + assert agent.id == "my_agent" + assert agent.name == "my_agent" + + def test_agent_id_can_be_customized(self, mock_executor: Mock) -> None: + """Verify agent id can be set independently from name.""" + agent: DurableAIAgent[Any] = DurableAIAgent(mock_executor, "my_agent", agent_id="custom-id") + + assert agent.id == "custom-id" + assert agent.name == "my_agent" + + +class TestDurableAIAgentThreadManagement: + """Test thread creation and management.""" + + def test_get_new_thread_delegates_to_executor(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify get_new_thread delegates to executor.""" + mock_thread = DurableAgentThread() + mock_executor.get_new_thread.return_value = mock_thread + + thread = test_agent.get_new_thread() + + mock_executor.get_new_thread.assert_called_once_with("test_agent") + assert thread == mock_thread + + def test_get_new_thread_forwards_kwargs(self, test_agent: DurableAIAgent[Any], mock_executor: Mock) -> None: + """Verify get_new_thread forwards kwargs to executor.""" + mock_thread = DurableAgentThread(service_thread_id="thread-123") + mock_executor.get_new_thread.return_value = mock_thread + + test_agent.get_new_thread(service_thread_id="thread-123") + + mock_executor.get_new_thread.assert_called_once() + _, kwargs = mock_executor.get_new_thread.call_args + assert kwargs["service_thread_id"] == "thread-123" + + +class TestDurableAgentProviderInterface: + """Test that DurableAgentProvider defines the correct interface.""" + + def test_provider_cannot_be_instantiated(self) -> None: + """Verify DurableAgentProvider is abstract and cannot be instantiated.""" + with pytest.raises(TypeError): + DurableAgentProvider() # type: ignore[abstract] + + def test_provider_defines_get_agent_method(self) -> None: + """Verify DurableAgentProvider defines get_agent abstract method.""" + assert hasattr(DurableAgentProvider, "get_agent") + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/packages/durabletask/tests/test_worker.py b/python/packages/durabletask/tests/test_worker.py new file mode 100644 index 0000000000..e6dabcdfdf --- /dev/null +++ b/python/packages/durabletask/tests/test_worker.py @@ -0,0 +1,168 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Unit tests for DurableAIAgentWorker. + +Focuses on critical worker flows: agent registration, validation, callbacks, and lifecycle. +""" + +from unittest.mock import Mock + +import pytest + +from agent_framework_durabletask import DurableAIAgentWorker + + +@pytest.fixture +def mock_grpc_worker() -> Mock: + """Create a mock TaskHubGrpcWorker for testing.""" + mock = Mock() + mock.add_entity = Mock(return_value="dafx-test_agent") + mock.start = Mock() + mock.stop = Mock() + return mock + + +@pytest.fixture +def mock_agent() -> Mock: + """Create a mock agent for testing.""" + agent = Mock() + agent.name = "test_agent" + return agent + + +@pytest.fixture +def agent_worker(mock_grpc_worker: Mock) -> DurableAIAgentWorker: + """Create a DurableAIAgentWorker with mock worker.""" + return DurableAIAgentWorker(mock_grpc_worker) + + +class TestDurableAIAgentWorkerRegistration: + """Test agent registration behavior.""" + + def test_add_agent_accepts_agent_with_name( + self, agent_worker: DurableAIAgentWorker, mock_agent: Mock, mock_grpc_worker: Mock + ) -> None: + """Verify that agents with names can be registered.""" + agent_worker.add_agent(mock_agent) + + # Verify entity was registered with underlying worker + mock_grpc_worker.add_entity.assert_called_once() + # Verify agent name is tracked + assert "test_agent" in agent_worker.registered_agent_names + + def test_add_agent_rejects_agent_without_name(self, agent_worker: DurableAIAgentWorker) -> None: + """Verify that agents without names are rejected.""" + agent_no_name = Mock() + agent_no_name.name = None + + with pytest.raises(ValueError, match="Agent must have a name"): + agent_worker.add_agent(agent_no_name) + + def test_add_agent_rejects_empty_name(self, agent_worker: DurableAIAgentWorker) -> None: + """Verify that agents with empty names are rejected.""" + agent_empty_name = Mock() + agent_empty_name.name = "" + + with pytest.raises(ValueError, match="Agent must have a name"): + agent_worker.add_agent(agent_empty_name) + + def test_add_agent_rejects_duplicate_names(self, agent_worker: DurableAIAgentWorker, mock_agent: Mock) -> None: + """Verify duplicate agent names are not allowed.""" + agent_worker.add_agent(mock_agent) + + # Try to register another agent with the same name + duplicate_agent = Mock() + duplicate_agent.name = "test_agent" + + with pytest.raises(ValueError, match="already registered"): + agent_worker.add_agent(duplicate_agent) + + def test_registered_agent_names_tracks_multiple_agents(self, agent_worker: DurableAIAgentWorker) -> None: + """Verify registered_agent_names tracks all registered agents.""" + agent1 = Mock() + agent1.name = "agent1" + agent2 = Mock() + agent2.name = "agent2" + agent3 = Mock() + agent3.name = "agent3" + + agent_worker.add_agent(agent1) + agent_worker.add_agent(agent2) + agent_worker.add_agent(agent3) + + registered = agent_worker.registered_agent_names + assert "agent1" in registered + assert "agent2" in registered + assert "agent3" in registered + assert len(registered) == 3 + + +class TestDurableAIAgentWorkerCallbacks: + """Test callback configuration behavior.""" + + def test_worker_level_callback_accepted(self, mock_grpc_worker: Mock) -> None: + """Verify worker-level callback can be set.""" + mock_callback = Mock() + agent_worker = DurableAIAgentWorker(mock_grpc_worker, callback=mock_callback) + + assert agent_worker is not None + + def test_agent_level_callback_accepted(self, agent_worker: DurableAIAgentWorker, mock_agent: Mock) -> None: + """Verify agent-level callback can be set during registration.""" + mock_callback = Mock() + + # Should not raise exception + agent_worker.add_agent(mock_agent, callback=mock_callback) + + assert "test_agent" in agent_worker.registered_agent_names + + def test_none_callback_accepted(self, mock_grpc_worker: Mock, mock_agent: Mock) -> None: + """Verify None callback is valid (no callbacks required).""" + agent_worker = DurableAIAgentWorker(mock_grpc_worker, callback=None) + agent_worker.add_agent(mock_agent, callback=None) + + assert "test_agent" in agent_worker.registered_agent_names + + +class TestDurableAIAgentWorkerLifecycle: + """Test worker lifecycle behavior.""" + + def test_start_delegates_to_underlying_worker( + self, agent_worker: DurableAIAgentWorker, mock_grpc_worker: Mock + ) -> None: + """Verify start() delegates to wrapped worker.""" + agent_worker.start() + + mock_grpc_worker.start.assert_called_once() + + def test_stop_delegates_to_underlying_worker( + self, agent_worker: DurableAIAgentWorker, mock_grpc_worker: Mock + ) -> None: + """Verify stop() delegates to wrapped worker.""" + agent_worker.stop() + + mock_grpc_worker.stop.assert_called_once() + + def test_start_works_with_no_agents(self, agent_worker: DurableAIAgentWorker, mock_grpc_worker: Mock) -> None: + """Verify worker can start even with no agents registered.""" + agent_worker.start() + + mock_grpc_worker.start.assert_called_once() + + def test_start_works_with_multiple_agents(self, agent_worker: DurableAIAgentWorker, mock_grpc_worker: Mock) -> None: + """Verify worker can start with multiple agents registered.""" + agent1 = Mock() + agent1.name = "agent1" + agent2 = Mock() + agent2.name = "agent2" + + agent_worker.add_agent(agent1) + agent_worker.add_agent(agent2) + agent_worker.start() + + mock_grpc_worker.start.assert_called_once() + assert len(agent_worker.registered_agent_names) == 2 + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--tb=short"]) diff --git a/python/pyproject.toml b/python/pyproject.toml index 895ef7fdd4..2301447f19 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -95,6 +95,7 @@ agent-framework-chatkit = { workspace = true } agent-framework-copilotstudio = { workspace = true } agent-framework-declarative = { workspace = true } agent-framework-devui = { workspace = true } +agent-framework-durabletask = { workspace = true } agent-framework-foundry-local = { workspace = true } agent-framework-lab = { workspace = true } agent-framework-mem0 = { workspace = true } diff --git a/python/samples/README.md b/python/samples/README.md index b877be0f2b..169ebbc001 100644 --- a/python/samples/README.md +++ b/python/samples/README.md @@ -253,6 +253,21 @@ The recommended way to use Ollama is via the native `OllamaChatClient` from the | [`getting_started/azure_functions/05_multi_agent_orchestration_concurrency/`](./getting_started/azure_functions/05_multi_agent_orchestration_concurrency/) | Run two agents concurrently within a durable orchestration and combine their domain-specific outputs. | | [`getting_started/azure_functions/06_multi_agent_orchestration_conditionals/`](./getting_started/azure_functions/06_multi_agent_orchestration_conditionals/) | Route orchestration logic based on structured agent responses for spam detection and reply drafting. | | [`getting_started/azure_functions/07_single_agent_orchestration_hitl/`](./getting_started/azure_functions/07_single_agent_orchestration_hitl/) | Implement a human-in-the-loop approval loop that iterates on agent output inside a durable orchestration. | +| [`getting_started/azure_functions/08_mcp_server/`](./getting_started/azure_functions/08_mcp_server/) | Configure agents as both HTTP endpoints and MCP tools for flexible integration patterns. | + +## Durable Task + +These samples demonstrate durable agent hosting using the Durable Task Scheduler with a worker-client architecture pattern, enabling distributed agent execution with persistent conversation state. + +| Sample | Description | +|--------|-------------| +| [`getting_started/durabletask/01_single_agent/`](./getting_started/durabletask/01_single_agent/) | Host a single conversational agent with worker-client architecture and agent state management. | +| [`getting_started/durabletask/02_multi_agent/`](./getting_started/durabletask/02_multi_agent/) | Host multiple domain-specific agents and route requests based on question topic. | +| [`getting_started/durabletask/03_single_agent_streaming/`](./getting_started/durabletask/03_single_agent_streaming/) | Implement reliable streaming using Redis Streams with cursor-based resumption for durable agents. | +| [`getting_started/durabletask/04_single_agent_orchestration_chaining/`](./getting_started/durabletask/04_single_agent_orchestration_chaining/) | Chain multiple agent invocations using durable orchestration while preserving conversation context. | +| [`getting_started/durabletask/05_multi_agent_orchestration_concurrency/`](./getting_started/durabletask/05_multi_agent_orchestration_concurrency/) | Run multiple agents concurrently within an orchestration and aggregate their responses. | +| [`getting_started/durabletask/06_multi_agent_orchestration_conditionals/`](./getting_started/durabletask/06_multi_agent_orchestration_conditionals/) | Implement conditional branching with spam detection using structured outputs and activity functions. | +| [`getting_started/durabletask/07_single_agent_orchestration_hitl/`](./getting_started/durabletask/07_single_agent_orchestration_hitl/) | Human-in-the-loop pattern with external event handling, timeouts, and iterative refinement. | ## Observability diff --git a/python/samples/getting_started/azure_functions/04_single_agent_orchestration_chaining/function_app.py b/python/samples/getting_started/azure_functions/04_single_agent_orchestration_chaining/function_app.py index b04fb0d34b..52b3612cda 100644 --- a/python/samples/getting_started/azure_functions/04_single_agent_orchestration_chaining/function_app.py +++ b/python/samples/getting_started/azure_functions/04_single_agent_orchestration_chaining/function_app.py @@ -10,6 +10,7 @@ import json import logging +from collections.abc import Generator from typing import Any import azure.functions as func @@ -44,7 +45,7 @@ def _create_writer_agent() -> Any: # 4. Orchestration that runs the agent sequentially on a shared thread for chaining behaviour. @app.orchestration_trigger(context_name="context") -def single_agent_orchestration(context: DurableOrchestrationContext): +def single_agent_orchestration(context: DurableOrchestrationContext) -> Generator[Any, Any, str]: """Run the writer agent twice on the same thread to mirror chaining behaviour.""" writer = app.get_agent(context, WRITER_AGENT_NAME) @@ -116,12 +117,6 @@ async def get_orchestration_status( ) status = await client.get_status(instance_id) - if status is None: - return func.HttpResponse( - body=json.dumps({"error": "Instance not found"}), - status_code=404, - mimetype="application/json", - ) response_data: dict[str, Any] = { "instanceId": status.instance_id, diff --git a/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py b/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py index 4ba86d4455..f1772280f8 100644 --- a/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py +++ b/python/samples/getting_started/azure_functions/05_multi_agent_orchestration_concurrency/function_app.py @@ -10,6 +10,7 @@ import json import logging +from collections.abc import Generator from typing import Any, cast import azure.functions as func @@ -51,7 +52,7 @@ def _create_agents() -> list[Any]: # 4. Durable Functions orchestration that runs both agents in parallel. @app.orchestration_trigger(context_name="context") -def multi_agent_concurrent_orchestration(context: DurableOrchestrationContext): +def multi_agent_concurrent_orchestration(context: DurableOrchestrationContext) -> Generator[Any, Any, dict[str, str]]: """Fan out to two domain-specific agents and aggregate their responses.""" prompt = context.get_input() @@ -137,12 +138,6 @@ async def get_orchestration_status( ) status = await client.get_status(instance_id) - if status is None: - return func.HttpResponse( - body=json.dumps({"error": "Instance not found"}), - status_code=404, - mimetype="application/json", - ) response_data: dict[str, Any] = { "instanceId": status.instance_id, diff --git a/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py b/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py index 2779c5ee65..ea373e588a 100644 --- a/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py +++ b/python/samples/getting_started/azure_functions/06_multi_agent_orchestration_conditionals/function_app.py @@ -11,7 +11,7 @@ import json import logging -from collections.abc import Mapping +from collections.abc import Generator, Mapping from typing import Any import azure.functions as func @@ -74,7 +74,7 @@ def send_email(message: str) -> str: # 4. Orchestration validates input, runs agents, and branches on spam results. @app.orchestration_trigger(context_name="context") -def spam_detection_orchestration(context: DurableOrchestrationContext): +def spam_detection_orchestration(context: DurableOrchestrationContext) -> Generator[Any, Any, str]: payload_raw = context.get_input() if not isinstance(payload_raw, Mapping): raise ValueError("Email data is required") @@ -107,7 +107,7 @@ def spam_detection_orchestration(context: DurableOrchestrationContext): raise ValueError("Failed to parse spam detection result") if spam_result.is_spam: - result = yield context.call_activity("handle_spam_email", spam_result.reason) + result = yield context.call_activity("handle_spam_email", spam_result.reason) # type: ignore[misc] return result email_thread = email_agent.get_new_thread() @@ -129,7 +129,7 @@ def spam_detection_orchestration(context: DurableOrchestrationContext): if email_result is None: raise ValueError("Failed to parse email response") - result = yield context.call_activity("send_email", email_result.response) + result = yield context.call_activity("send_email", email_result.response) # type: ignore[misc] return result @@ -200,12 +200,6 @@ async def get_orchestration_status( ) status = await client.get_status(instance_id) - if status is None: - return func.HttpResponse( - body=json.dumps({"error": "Instance not found"}), - status_code=404, - mimetype="application/json", - ) response_data: dict[str, Any] = { "instanceId": status.instance_id, diff --git a/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py b/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py index b9665e3d15..1b55620233 100644 --- a/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py +++ b/python/samples/getting_started/azure_functions/07_single_agent_orchestration_hitl/function_app.py @@ -10,7 +10,7 @@ import json import logging -from collections.abc import Mapping +from collections.abc import Generator, Mapping from datetime import timedelta from typing import Any @@ -62,7 +62,7 @@ def _create_writer_agent() -> Any: # 3. Activities encapsulate external work for review notifications and publishing. @app.activity_trigger(input_name="content") -def notify_user_for_approval(content: dict) -> None: +def notify_user_for_approval(content: dict[str, str]) -> None: model = GeneratedContent.model_validate(content) logger.info("NOTIFICATION: Please review the following content for approval:") logger.info("Title: %s", model.title or "(untitled)") @@ -71,7 +71,7 @@ def notify_user_for_approval(content: dict) -> None: @app.activity_trigger(input_name="content") -def publish_content(content: dict) -> None: +def publish_content(content: dict[str, str]) -> None: model = GeneratedContent.model_validate(content) logger.info("PUBLISHING: Content has been published successfully:") logger.info("Title: %s", model.title or "(untitled)") @@ -80,7 +80,7 @@ def publish_content(content: dict) -> None: # 4. Orchestration loops until the human approves, times out, or attempts are exhausted. @app.orchestration_trigger(context_name="context") -def content_generation_hitl_orchestration(context: DurableOrchestrationContext): +def content_generation_hitl_orchestration(context: DurableOrchestrationContext) -> Generator[Any, Any, dict[str, str]]: payload_raw = context.get_input() if not isinstance(payload_raw, Mapping): raise ValueError("Content generation input is required") @@ -101,8 +101,7 @@ def content_generation_hitl_orchestration(context: DurableOrchestrationContext): options={"response_format": GeneratedContent}, ) - content = initial_raw.try_parse_value(GeneratedContent) - logger.info("Type of content after extraction: %s", type(content)) + content = initial_raw.value if content is None: raise ValueError("Agent returned no content after extraction.") @@ -114,7 +113,7 @@ def content_generation_hitl_orchestration(context: DurableOrchestrationContext): f"Requesting human feedback. Iteration #{attempt}. Timeout: {payload.approval_timeout_hours} hour(s)." ) - yield context.call_activity("notify_user_for_approval", content.model_dump()) + yield context.call_activity("notify_user_for_approval", content.model_dump()) # type: ignore[misc] approval_task = context.wait_for_external_event(HUMAN_APPROVAL_EVENT) timeout_task = context.create_timer( @@ -129,13 +128,20 @@ def content_generation_hitl_orchestration(context: DurableOrchestrationContext): if approval_payload.approved: context.set_custom_status("Content approved by human reviewer. Publishing content...") - yield context.call_activity("publish_content", content.model_dump()) + yield context.call_activity("publish_content", content.model_dump()) # type: ignore[misc] context.set_custom_status( f"Content published successfully at {context.current_utc_datetime:%Y-%m-%dT%H:%M:%S}" ) return {"content": content.content} - context.set_custom_status("Content rejected by human reviewer. Incorporating feedback and regenerating...") + context.set_custom_status( + "Content rejected by human reviewer. Incorporating feedback and regenerating..." + ) + + # Check if we've exhausted attempts + if attempt >= payload.max_review_attempts: + break + rewrite_prompt = ( "The content was rejected by a human reviewer. Please rewrite the article incorporating their feedback.\n\n" f"Human Feedback: {approval_payload.feedback or 'No feedback provided.'}" @@ -153,9 +159,15 @@ def content_generation_hitl_orchestration(context: DurableOrchestrationContext): context.set_custom_status( f"Human approval timed out after {payload.approval_timeout_hours} hour(s). Treating as rejection." ) - raise TimeoutError(f"Human approval timed out after {payload.approval_timeout_hours} hour(s).") - - raise RuntimeError(f"Content could not be approved after {payload.max_review_attempts} iteration(s).") + raise TimeoutError( + f"Human approval timed out after {payload.approval_timeout_hours} hour(s)." + ) + + # If we exit the loop without returning, max attempts were exhausted + context.set_custom_status("Max review attempts exhausted.") + raise RuntimeError( + f"Content could not be approved after {payload.max_review_attempts} iteration(s)." + ) # 5. HTTP endpoint that starts the human-in-the-loop orchestration. @@ -281,7 +293,7 @@ async def get_orchestration_status( ) # Check if status is None or if the instance doesn't exist (runtime_status is None) - if status is None or getattr(status, "runtime_status", None) is None: + if getattr(status, "runtime_status", None) is None: return func.HttpResponse( body=json.dumps({"error": "Instance not found."}), status_code=404, diff --git a/python/samples/getting_started/durabletask/01_single_agent/README.md b/python/samples/getting_started/durabletask/01_single_agent/README.md new file mode 100644 index 0000000000..ffe3b1484a --- /dev/null +++ b/python/samples/getting_started/durabletask/01_single_agent/README.md @@ -0,0 +1,73 @@ +# Single Agent + +This sample demonstrates how to create a worker-client setup that hosts a single AI agent and provides interactive conversation via the Durable Task Scheduler. + +## Key Concepts Demonstrated + +- Using the Microsoft Agent Framework to define a simple AI agent with a name and instructions. +- Registering durable agents with the worker and interacting with them via a client. +- Conversation management (via threads) for isolated interactions. +- Worker-client architecture for distributed agent execution. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample using the combined approach or separate worker and client processes: + +**Option 1: Combined (Recommended for Testing)** + +```bash +cd samples/getting_started/durabletask/01_single_agent +python sample.py +``` + +**Option 2: Separate Processes** + +Start the worker in one terminal: + +```bash +python worker.py +``` + +In a new terminal, run the client: + +```bash +python client.py +``` + +The client will interact with the Joker agent: + +``` +Starting Durable Task Agent Client... +Using taskhub: default +Using endpoint: http://localhost:8080 + +Getting reference to Joker agent... +Created conversation thread: a1b2c3d4-e5f6-7890-abcd-ef1234567890 + +User: Tell me a short joke about cloud computing. + +Joker: Why did the cloud break up with the server? +Because it found someone more "uplifting"! + +User: Now tell me one about Python programming. + +Joker: Why do Python programmers prefer dark mode? +Because light attracts bugs! +``` + +## Viewing Agent State + +You can view the state of the agent in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view: + - The state of the Joker agent entity (dafx-Joker) + - Conversation history and current state + - How the durable agents extension manages conversation context + + + diff --git a/python/samples/getting_started/durabletask/01_single_agent/client.py b/python/samples/getting_started/durabletask/01_single_agent/client.py new file mode 100644 index 0000000000..7c8b27d80c --- /dev/null +++ b/python/samples/getting_started/durabletask/01_single_agent/client.py @@ -0,0 +1,119 @@ +"""Client application for interacting with a Durable Task hosted agent. + +This client connects to the Durable Task Scheduler and sends requests to +registered agents, demonstrating how to interact with agents from external processes. + +Prerequisites: +- The worker must be running with the agent registered +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running +""" + +import asyncio +import logging +import os + +from agent_framework.azure import DurableAIAgentClient +from azure.identity import DefaultAzureCredential +from durabletask.azuremanaged.client import DurableTaskSchedulerClient + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_client( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableAIAgentClient: + """Create a configured DurableAIAgentClient. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for client logging + + Returns: + Configured DurableAIAgentClient instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + dts_client = DurableTaskSchedulerClient( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + return DurableAIAgentClient(dts_client) + + +def run_client(agent_client: DurableAIAgentClient) -> None: + """Run client interactions with the Joker agent. + + Args: + agent_client: The DurableAIAgentClient instance + """ + # Get a reference to the Joker agent + logger.debug("Getting reference to Joker agent...") + joker = agent_client.get_agent("Joker") + + # Create a new thread for the conversation + thread = joker.get_new_thread() + logger.debug(f"Thread ID: {thread.session_id}") + logger.info("Start chatting with the Joker agent! (Type 'exit' to quit)") + + # Interactive conversation loop + while True: + # Get user input + try: + user_message = input("You: ").strip() + except (EOFError, KeyboardInterrupt): + logger.info("\nExiting...") + break + + # Check for exit command + if user_message.lower() == "exit": + logger.info("Goodbye!") + break + + # Skip empty messages + if not user_message: + continue + + # Send message to agent and get response + try: + response = joker.run(user_message, thread=thread) + logger.info(f"Joker: {response.text} \n") + except Exception as e: + logger.error(f"Error getting response: {e}") + + logger.info("Conversation completed.") + + +async def main() -> None: + """Main entry point for the client application.""" + logger.debug("Starting Durable Task Agent Client...") + + # Create client using helper function + agent_client = get_client() + + try: + run_client(agent_client) + except Exception as e: + logger.exception(f"Error during agent interaction: {e}") + finally: + logger.debug("Client shutting down") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/01_single_agent/requirements.txt b/python/samples/getting_started/durabletask/01_single_agent/requirements.txt new file mode 100644 index 0000000000..371b9e3b79 --- /dev/null +++ b/python/samples/getting_started/durabletask/01_single_agent/requirements.txt @@ -0,0 +1,6 @@ +# Agent Framework packages (installing from local package until a package is published) +-e ../../../../ +-e ../../../../packages/durabletask + +# Azure authentication +azure-identity diff --git a/python/samples/getting_started/durabletask/01_single_agent/sample.py b/python/samples/getting_started/durabletask/01_single_agent/sample.py new file mode 100644 index 0000000000..b8c39974c0 --- /dev/null +++ b/python/samples/getting_started/durabletask/01_single_agent/sample.py @@ -0,0 +1,57 @@ +"""Single Agent Sample - Durable Task Integration (Combined Worker + Client) + +This sample demonstrates running both the worker and client in a single process. +The worker is started first to register the agent, then client operations are +performed against the running worker. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running (e.g., using Docker) + +To run this sample: + python sample.py +""" + +import logging + +from dotenv import load_dotenv + +# Import helper functions from worker and client modules +from client import get_client, run_client +from worker import get_worker, setup_worker + +# Configure logging (must be after imports to override their basicConfig) +logging.basicConfig(level=logging.INFO, force=True) +logger = logging.getLogger(__name__) + +def main(): + """Main entry point - runs both worker and client in single process.""" + logger.debug("Starting Durable Task Agent Sample (Combined Worker + Client)...") + + silent_handler = logging.NullHandler() + + # Create and start the worker using helper function and context manager + with get_worker(log_handler=silent_handler) as dts_worker: + # Register agents using helper function + setup_worker(dts_worker) + + # Start the worker + dts_worker.start() + logger.debug("Worker started and listening for requests...") + + # Create the client using helper function + agent_client = get_client(log_handler=silent_handler) + + try: + # Run client interactions using helper function + run_client(agent_client) + except Exception as e: + logger.exception(f"Error during agent interaction: {e}") + + logger.debug("Sample completed. Worker shutting down...") + + +if __name__ == "__main__": + load_dotenv() + main() diff --git a/python/samples/getting_started/durabletask/01_single_agent/worker.py b/python/samples/getting_started/durabletask/01_single_agent/worker.py new file mode 100644 index 0000000000..4b837a8a8e --- /dev/null +++ b/python/samples/getting_started/durabletask/01_single_agent/worker.py @@ -0,0 +1,121 @@ +"""Worker process for hosting a single Azure OpenAI-powered agent using Durable Task. + +This worker registers agents as durable entities and continuously listens for requests. +The worker should run as a background service, processing incoming agent requests. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Start a Durable Task Scheduler (e.g., using Docker) +""" + +import asyncio +import logging +import os + +from agent_framework import ChatAgent +from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentWorker +from azure.identity import AzureCliCredential, DefaultAzureCredential +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +# Configure logging +logging.basicConfig(level=logging.WARNING) +logger = logging.getLogger(__name__) + + +def create_joker_agent() -> ChatAgent: + """Create the Joker agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Joker agent + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name="Joker", + instructions="You are good at telling jokes.", + ) + + +def get_worker( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerWorker: + """Create a configured DurableTaskSchedulerWorker. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for worker logging + + Returns: + Configured DurableTaskSchedulerWorker instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerWorker( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def setup_worker(worker: DurableTaskSchedulerWorker) -> DurableAIAgentWorker: + """Set up the worker with agents registered. + + Args: + worker: The DurableTaskSchedulerWorker instance + + Returns: + DurableAIAgentWorker with agents registered + """ + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker) + + # Create and register the Joker agent + logger.debug("Creating and registering Joker agent...") + joker_agent = create_joker_agent() + agent_worker.add_agent(joker_agent) + + logger.debug(f"✓ Registered agent: {joker_agent.name}") + logger.debug(f" Entity name: dafx-{joker_agent.name}") + + return agent_worker + + +async def main(): + """Main entry point for the worker process.""" + logger.debug("Starting Durable Task Agent Worker...") + + # Create a worker using the helper function + worker = get_worker() + + # Setup worker with agents + setup_worker(worker) + + logger.info("Worker is ready and listening for requests...") + logger.info("Press Ctrl+C to stop.") + logger.info("") + + try: + # Start the worker (this blocks until stopped) + worker.start() + + # Keep the worker running + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.debug("Worker shutdown initiated") + + logger.debug("Worker stopped") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/02_multi_agent/README.md b/python/samples/getting_started/durabletask/02_multi_agent/README.md new file mode 100644 index 0000000000..e9b2a36e19 --- /dev/null +++ b/python/samples/getting_started/durabletask/02_multi_agent/README.md @@ -0,0 +1,80 @@ +# Multi-Agent + +This sample demonstrates how to host multiple AI agents with different tools in a single worker-client setup using the Durable Task Scheduler. + +## Key Concepts Demonstrated + +- Hosting multiple agents (WeatherAgent and MathAgent) in a single worker process. +- Each agent with its own specialized tools and instructions. +- Interacting with different agents using separate conversation threads. +- Worker-client architecture for multi-agent systems. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample using the combined approach or separate worker and client processes: + +**Option 1: Combined (Recommended for Testing)** + +```bash +cd samples/getting_started/durabletask/02_multi_agent +python sample.py +``` + +**Option 2: Separate Processes** + +Start the worker in one terminal: + +```bash +python worker.py +``` + +In a new terminal, run the client: + +```bash +python client.py +``` + +The client will interact with both agents: + +``` +Starting Durable Task Multi-Agent Client... +Using taskhub: default +Using endpoint: http://localhost:8080 + +================================================================================ +Testing WeatherAgent +================================================================================ + +Created weather conversation thread: +User: What is the weather in Seattle? + +🔧 [TOOL CALLED] get_weather(location=Seattle) +✓ [TOOL RESULT] {'location': 'Seattle', 'temperature': 72, 'conditions': 'Sunny', 'humidity': 45} + +WeatherAgent: The current weather in Seattle is sunny with a temperature of 72°F and 45% humidity. + +================================================================================ +Testing MathAgent +================================================================================ + +Created math conversation thread: +User: Calculate a 20% tip on a $50 bill + +🔧 [TOOL CALLED] calculate_tip(bill_amount=50.0, tip_percentage=20.0) +✓ [TOOL RESULT] {'bill_amount': 50.0, 'tip_percentage': 20.0, 'tip_amount': 10.0, 'total': 60.0} + +MathAgent: For a $50 bill with a 20% tip, the tip amount is $10.00 and the total is $60.00. +``` + +## Viewing Agent State + +You can view the state of both agents in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view: + - The state of both WeatherAgent and MathAgent entities (dafx-WeatherAgent, dafx-MathAgent) + - Each agent's conversation state across multiple interactions diff --git a/python/samples/getting_started/durabletask/02_multi_agent/client.py b/python/samples/getting_started/durabletask/02_multi_agent/client.py new file mode 100644 index 0000000000..d7cecabd99 --- /dev/null +++ b/python/samples/getting_started/durabletask/02_multi_agent/client.py @@ -0,0 +1,116 @@ +"""Client application for interacting with multiple hosted agents. + +This client connects to the Durable Task Scheduler and interacts with two different +agents (WeatherAgent and MathAgent), demonstrating how to work with multiple agents +each with their own specialized capabilities and tools. + +Prerequisites: +- The worker must be running with both agents registered +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running +""" + +import asyncio +import logging +import os + +from agent_framework.azure import DurableAIAgentClient +from azure.identity import DefaultAzureCredential +from durabletask.azuremanaged.client import DurableTaskSchedulerClient + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_client( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableAIAgentClient: + """Create a configured DurableAIAgentClient. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for client logging + + Returns: + Configured DurableAIAgentClient instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + dts_client = DurableTaskSchedulerClient( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + return DurableAIAgentClient(dts_client) + + +def run_client(agent_client: DurableAIAgentClient) -> None: + """Run client interactions with both WeatherAgent and MathAgent. + + Args: + agent_client: The DurableAIAgentClient instance + """ + logger.debug("Testing WeatherAgent") + + # Get reference to WeatherAgent + weather_agent = agent_client.get_agent("WeatherAgent") + weather_thread = weather_agent.get_new_thread() + + logger.debug(f"Created weather conversation thread: {weather_thread.session_id}") + + # Test WeatherAgent + weather_message = "What is the weather in Seattle?" + logger.info(f"User: {weather_message}") + + weather_response = weather_agent.run(weather_message, thread=weather_thread) + logger.info(f"WeatherAgent: {weather_response.text} \n") + + logger.debug("Testing MathAgent") + + # Get reference to MathAgent + math_agent = agent_client.get_agent("MathAgent") + math_thread = math_agent.get_new_thread() + + logger.debug(f"Created math conversation thread: {math_thread.session_id}") + + # Test MathAgent + math_message = "Calculate a 20% tip on a $50 bill" + logger.info(f"User: {math_message}") + + math_response = math_agent.run(math_message, thread=math_thread) + logger.info(f"MathAgent: {math_response.text} \n") + + logger.debug("Both agents completed successfully!") + + +async def main() -> None: + """Main entry point for the client application.""" + logger.debug("Starting Durable Task Multi-Agent Client...") + + # Create client using helper function + agent_client = get_client() + + try: + run_client(agent_client) + except Exception as e: + logger.exception(f"Error during agent interaction: {e}") + finally: + logger.debug("Client shutting down") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/02_multi_agent/requirements.txt b/python/samples/getting_started/durabletask/02_multi_agent/requirements.txt new file mode 100644 index 0000000000..371b9e3b79 --- /dev/null +++ b/python/samples/getting_started/durabletask/02_multi_agent/requirements.txt @@ -0,0 +1,6 @@ +# Agent Framework packages (installing from local package until a package is published) +-e ../../../../ +-e ../../../../packages/durabletask + +# Azure authentication +azure-identity diff --git a/python/samples/getting_started/durabletask/02_multi_agent/sample.py b/python/samples/getting_started/durabletask/02_multi_agent/sample.py new file mode 100644 index 0000000000..9945601c20 --- /dev/null +++ b/python/samples/getting_started/durabletask/02_multi_agent/sample.py @@ -0,0 +1,57 @@ +"""Multi-Agent Sample - Durable Task Integration (Combined Worker + Client) + +This sample demonstrates running both the worker and client in a single process +for multiple agents with different tools. The worker registers two agents +(WeatherAgent and MathAgent), each with their own specialized capabilities. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running (e.g., using Docker) + +To run this sample: + python sample.py +""" + +import logging + +from dotenv import load_dotenv + +# Import helper functions from worker and client modules +from client import get_client, run_client +from worker import get_worker, setup_worker + +# Configure logging +logging.basicConfig(level=logging.INFO, force=True) +logger = logging.getLogger(__name__) + + +def main(): + """Main entry point - runs both worker and client in single process.""" + logger.debug("Starting Durable Task Multi-Agent Sample (Combined Worker + Client)...") + + silent_handler = logging.NullHandler() + # Create and start the worker using helper function and context manager + with get_worker(log_handler=silent_handler) as dts_worker: + # Register agents using helper function + setup_worker(dts_worker) + + # Start the worker + dts_worker.start() + logger.debug("Worker started and listening for requests...") + + # Create the client using helper function + agent_client = get_client(log_handler=silent_handler) + + try: + # Run client interactions using helper function + run_client(agent_client) + except Exception as e: + logger.exception(f"Error during agent interaction: {e}") + + logger.debug("Sample completed. Worker shutting down...") + + +if __name__ == "__main__": + load_dotenv() + main() diff --git a/python/samples/getting_started/durabletask/02_multi_agent/worker.py b/python/samples/getting_started/durabletask/02_multi_agent/worker.py new file mode 100644 index 0000000000..b0e51541b9 --- /dev/null +++ b/python/samples/getting_started/durabletask/02_multi_agent/worker.py @@ -0,0 +1,171 @@ +"""Worker process for hosting multiple agents with different tools using Durable Task. + +This worker registers two agents - a weather assistant and a math assistant - each +with their own specialized tools. This demonstrates how to host multiple agents +with different capabilities in a single worker process. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Start a Durable Task Scheduler (e.g., using Docker) +""" + +import asyncio +import logging +import os +from typing import Any + +from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentWorker +from azure.identity import AzureCliCredential, DefaultAzureCredential +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Agent names +WEATHER_AGENT_NAME = "WeatherAgent" +MATH_AGENT_NAME = "MathAgent" + + +def get_weather(location: str) -> dict[str, Any]: + """Get current weather for a location.""" + logger.info(f"🔧 [TOOL CALLED] get_weather(location={location})") + result = { + "location": location, + "temperature": 72, + "conditions": "Sunny", + "humidity": 45, + } + logger.info(f"✓ [TOOL RESULT] {result}") + return result + + +def calculate_tip(bill_amount: float, tip_percentage: float = 15.0) -> dict[str, Any]: + """Calculate tip amount and total bill.""" + logger.info( + f"🔧 [TOOL CALLED] calculate_tip(bill_amount={bill_amount}, tip_percentage={tip_percentage})" + ) + tip = bill_amount * (tip_percentage / 100) + total = bill_amount + tip + result = { + "bill_amount": bill_amount, + "tip_percentage": tip_percentage, + "tip_amount": round(tip, 2), + "total": round(total, 2), + } + logger.info(f"✓ [TOOL RESULT] {result}") + return result + + +def create_weather_agent(): + """Create the Weather agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Weather agent with weather tool + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=WEATHER_AGENT_NAME, + instructions="You are a helpful weather assistant. Provide current weather information.", + tools=[get_weather], + ) + + +def create_math_agent(): + """Create the Math agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Math agent with calculation tools + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=MATH_AGENT_NAME, + instructions="You are a helpful math assistant. Help users with calculations like tip calculations.", + tools=[calculate_tip], + ) + + +def get_worker( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerWorker: + """Create a configured DurableTaskSchedulerWorker. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for worker logging + + Returns: + Configured DurableTaskSchedulerWorker instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerWorker( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def setup_worker(worker: DurableTaskSchedulerWorker) -> DurableAIAgentWorker: + """Set up the worker with multiple agents registered. + + Args: + worker: The DurableTaskSchedulerWorker instance + + Returns: + DurableAIAgentWorker with agents registered + """ + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker) + + # Create and register both agents + logger.debug("Creating and registering agents...") + weather_agent = create_weather_agent() + math_agent = create_math_agent() + + agent_worker.add_agent(weather_agent) + agent_worker.add_agent(math_agent) + + logger.debug(f"✓ Registered agents: {weather_agent.name}, {math_agent.name}") + + return agent_worker + + +async def main(): + """Main entry point for the worker process.""" + logger.debug("Starting Durable Task Multi-Agent Worker...") + + # Create a worker using the helper function + worker = get_worker() + + # Setup worker with agents + setup_worker(worker) + + logger.info("Worker is ready and listening for requests...") + logger.info("Press Ctrl+C to stop. \n") + + try: + # Start the worker (this blocks until stopped) + worker.start() + + # Keep the worker running + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.debug("Worker shutdown initiated") + + logger.info("Worker stopped") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/README.md b/python/samples/getting_started/durabletask/03_single_agent_streaming/README.md new file mode 100644 index 0000000000..6e9f1428bf --- /dev/null +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/README.md @@ -0,0 +1,150 @@ +# Single Agent with Reliable Streaming + +This sample demonstrates how to use Redis Streams with agent response callbacks to enable reliable, resumable streaming for durable agents. Streaming responses are persisted to Redis, allowing clients to disconnect and reconnect without losing messages. + +## Key Concepts Demonstrated + +- Using `AgentResponseCallbackProtocol` to capture streaming agent responses. +- Persisting streaming chunks to Redis Streams for reliable delivery. +- Non-blocking agent execution with `options={"wait_for_response": False}` (fire-and-forget mode). +- Cursor-based resumption for disconnected clients. +- Decoupling agent execution from response streaming. + +## Prerequisites + +In addition to the common setup in the parent [README.md](../README.md), this sample requires Redis: + +```bash +docker run -d --name redis -p 6379:6379 redis:latest +``` + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +Additional environment variables for this sample: + +```bash +# Optional: Redis Configuration +REDIS_CONNECTION_STRING=redis://localhost:6379 +REDIS_STREAM_TTL_MINUTES=10 +``` + +## Running the Sample + +With the environment setup, you can run the sample using the combined approach or separate worker and client processes: + +**Option 1: Combined (Recommended for Testing)** + +```bash +cd samples/getting_started/durabletask/03_single_agent_streaming +python sample.py +``` + +**Option 2: Separate Processes** + +Start the worker in one terminal: + +```bash +python worker.py +``` + +In a new terminal, run the client: + +```bash +python client.py +``` + +The client will send a travel planning request to the TravelPlanner agent and stream the response from Redis in real-time: + +``` +================================================================================ +TravelPlanner Agent - Redis Streaming Demo +================================================================================ + +You: Plan a 3-day trip to Tokyo with emphasis on culture and food + +TravelPlanner (streaming from Redis): +-------------------------------------------------------------------------------- +# Your Amazing 3-Day Tokyo Adventure! 🗾 + +Let me create the perfect cultural and culinary journey through Tokyo... + +## Day 1: Traditional Tokyo & First Impressions +... +(continues streaming) +... + +✓ Response complete! +``` + + +## How It Works + +### Redis Streaming Callback + +The `RedisStreamCallback` class implements `AgentResponseCallbackProtocol` to capture streaming updates and persist them to Redis: + +```python +class RedisStreamCallback(AgentResponseCallbackProtocol): + async def on_streaming_response_update(self, update, context): + # Write chunk to Redis Stream + async with await get_stream_handler() as handler: + await handler.write_chunk(thread_id, update.text, sequence) + + async def on_agent_response(self, response, context): + # Write end-of-stream marker + async with await get_stream_handler() as handler: + await handler.write_completion(thread_id, sequence) +``` + +### Worker Registration + +The worker registers the agent with the Redis streaming callback: + +```python +redis_callback = RedisStreamCallback() +agent_worker = DurableAIAgentWorker(worker, callback=redis_callback) +agent_worker.add_agent(create_travel_agent()) +``` + +### Client Streaming + +The client uses fire-and-forget mode to start the agent and streams from Redis: + +```python +# Start agent run with wait_for_response=False for non-blocking execution +travel_planner.run(user_message, thread=thread, options={"wait_for_response": False}) + +# Stream response from Redis while the agent is processing +async with await get_stream_handler() as stream_handler: + async for chunk in stream_handler.read_stream(thread_id): + if chunk.text: + print(chunk.text, end="", flush=True) + elif chunk.is_done: + break +``` + +**Fire-and-Forget Mode**: Use `options={"wait_for_response": False}` to enable non-blocking execution. The `run()` method signals the agent and returns immediately, allowing the client to stream from Redis without blocking. + +### Cursor-Based Resumption + +Clients can resume streaming from any point after disconnection: + +```python +cursor = "1734649123456-0" # Entry ID from previous stream +async with await get_stream_handler() as stream_handler: + async for chunk in stream_handler.read_stream(thread_id, cursor=cursor): + # Process chunk +``` + +## Viewing Agent State + +You can view the state of the TravelPlanner agent in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view: + - The state of the TravelPlanner agent entity (dafx-TravelPlanner) + - Conversation history and current state + - How the durable agents extension manages conversation context with streaming + diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/client.py b/python/samples/getting_started/durabletask/03_single_agent_streaming/client.py new file mode 100644 index 0000000000..92c941d766 --- /dev/null +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/client.py @@ -0,0 +1,186 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Client application for interacting with the TravelPlanner agent and streaming from Redis. + +This client demonstrates: +1. Sending a travel planning request to the durable agent +2. Streaming the response from Redis in real-time +3. Handling reconnection and cursor-based resumption + +Prerequisites: +- The worker must be running with the TravelPlanner agent registered +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME +- Redis must be running +- Durable Task Scheduler must be running +""" + +import asyncio +import logging +import os +from datetime import timedelta + +import redis.asyncio as aioredis +from agent_framework.azure import DurableAIAgentClient +from azure.identity import DefaultAzureCredential +from durabletask.azuremanaged.client import DurableTaskSchedulerClient + +from redis_stream_response_handler import RedisStreamResponseHandler + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Configuration +REDIS_CONNECTION_STRING = os.environ.get("REDIS_CONNECTION_STRING", "redis://localhost:6379") +REDIS_STREAM_TTL_MINUTES = int(os.environ.get("REDIS_STREAM_TTL_MINUTES", "10")) + + +async def get_stream_handler() -> RedisStreamResponseHandler: + """Create a new Redis stream handler for each request. + + This avoids event loop conflicts by creating a fresh Redis client + in the current event loop context. + """ + # Create a new Redis client in the current event loop + redis_client = aioredis.from_url( # type: ignore[reportUnknownMemberType] + REDIS_CONNECTION_STRING, + encoding="utf-8", + decode_responses=False, + ) + + return RedisStreamResponseHandler( + redis_client=redis_client, + stream_ttl=timedelta(minutes=REDIS_STREAM_TTL_MINUTES), + ) + + +def get_client( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableAIAgentClient: + """Create a configured DurableAIAgentClient. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional log handler for client logging + + Returns: + Configured DurableAIAgentClient instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + dts_client = DurableTaskSchedulerClient( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + return DurableAIAgentClient(dts_client) + + +async def stream_from_redis(thread_id: str, cursor: str | None = None) -> None: + """Stream agent responses from Redis. + + Args: + thread_id: The conversation/thread ID to stream from + cursor: Optional cursor to resume from. If None, starts from beginning. + """ + stream_key = f"agent-stream:{thread_id}" + logger.info(f"Streaming response from Redis (thread: {thread_id[:8]}...)") + logger.debug(f"To manually check Redis, run: redis-cli XLEN {stream_key}") + if cursor: + logger.info(f"Resuming from cursor: {cursor}") + + async with await get_stream_handler() as stream_handler: + logger.info(f"Stream handler created, starting to read...") + try: + chunk_count = 0 + async for chunk in stream_handler.read_stream(thread_id, cursor): + chunk_count += 1 + logger.debug(f"Received chunk #{chunk_count}: error={chunk.error}, is_done={chunk.is_done}, text_len={len(chunk.text) if chunk.text else 0}") + + if chunk.error: + logger.error(f"Stream error: {chunk.error}") + break + + if chunk.is_done: + print("\n✓ Response complete!", flush=True) + logger.info(f"Stream completed after {chunk_count} chunks") + break + + if chunk.text: + # Print directly to console with flush for immediate display + print(chunk.text, end='', flush=True) + + if chunk_count == 0: + logger.warning("No chunks received from Redis stream!") + logger.warning(f"Check Redis manually: redis-cli XLEN {stream_key}") + logger.warning(f"View stream contents: redis-cli XREAD STREAMS {stream_key} 0") + + except Exception as ex: + logger.error(f"Error reading from Redis: {ex}", exc_info=True) + + +def run_client(agent_client: DurableAIAgentClient) -> None: + """Run client interactions with the TravelPlanner agent. + + Args: + agent_client: The DurableAIAgentClient instance + """ + # Get a reference to the TravelPlanner agent + logger.debug("Getting reference to TravelPlanner agent...") + travel_planner = agent_client.get_agent("TravelPlanner") + + # Create a new thread for the conversation + thread = travel_planner.get_new_thread() + if not thread.session_id: + logger.error("Failed to create a new thread with session ID!") + return + + key = thread.session_id.key + logger.info(f"Thread ID: {key}") + + # Get user input + print("\nEnter your travel planning request:") + user_message = input("> ").strip() + + if not user_message: + logger.warning("No input provided. Using default message.") + user_message = "Plan a 3-day trip to Tokyo with emphasis on culture and food" + + logger.info(f"\nYou: {user_message}\n") + logger.info("TravelPlanner (streaming from Redis):") + logger.info("-" * 80) + + # Start the agent run with wait_for_response=False for non-blocking execution + # This signals the agent to start processing without waiting for completion + # The agent will execute in the background and write chunks to Redis + travel_planner.run(user_message, thread=thread, options={"wait_for_response": False}) + + # Stream the response from Redis + # This demonstrates that the client can stream from Redis while + # the agent is still processing (or after it completes) + asyncio.run(stream_from_redis(str(key))) + + logger.info("\nDemo completed!") + + +if __name__ == "__main__": + from dotenv import load_dotenv + load_dotenv() + + # Create the client + client = get_client() + + # Run the demo + run_client(client) diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/redis_stream_response_handler.py b/python/samples/getting_started/durabletask/03_single_agent_streaming/redis_stream_response_handler.py new file mode 100644 index 0000000000..981393cf00 --- /dev/null +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/redis_stream_response_handler.py @@ -0,0 +1,200 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Redis-based streaming response handler for durable agents. + +This module provides reliable, resumable streaming of agent responses using Redis Streams +as a message broker. It enables clients to disconnect and reconnect without losing messages. +""" + +import asyncio +import time +from dataclasses import dataclass +from datetime import timedelta +from collections.abc import AsyncIterator + +import redis.asyncio as aioredis + + +@dataclass +class StreamChunk: + """Represents a chunk of streamed data from Redis. + + Attributes: + entry_id: The Redis stream entry ID (used as cursor for resumption). + text: The text content of the chunk, if any. + is_done: Whether this is the final chunk in the stream. + error: Error message if an error occurred, otherwise None. + """ + entry_id: str + text: str | None = None + is_done: bool = False + error: str | None = None + + +class RedisStreamResponseHandler: + """Handles agent responses by persisting them to Redis Streams. + + This handler writes agent response updates to Redis Streams, enabling reliable, + resumable streaming delivery to clients. Clients can disconnect and reconnect + at any point using cursor-based pagination. + + Attributes: + MAX_EMPTY_READS: Maximum number of empty reads before timing out. + POLL_INTERVAL_MS: Interval in milliseconds between polling attempts. + """ + + MAX_EMPTY_READS = 300 + POLL_INTERVAL_MS = 1000 + + def __init__(self, redis_client: aioredis.Redis, stream_ttl: timedelta): + """Initialize the Redis stream response handler. + + Args: + redis_client: The async Redis client instance. + stream_ttl: Time-to-live for stream entries in Redis. + """ + self._redis = redis_client + self._stream_ttl = stream_ttl + + async def __aenter__(self): + """Enter async context manager.""" + return self + + async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: object) -> None: + """Exit async context manager and close Redis connection.""" + await self._redis.aclose() + + async def write_chunk( + self, + conversation_id: str, + text: str, + sequence: int, + ) -> None: + """Write a single text chunk to the Redis Stream. + + Args: + conversation_id: The conversation ID for this agent run. + text: The text content to write. + sequence: The sequence number for ordering. + """ + stream_key = self._get_stream_key(conversation_id) + await self._redis.xadd( + stream_key, + { + "text": text, + "sequence": str(sequence), + "timestamp": str(int(time.time() * 1000)), + } + ) + await self._redis.expire(stream_key, self._stream_ttl) + + async def write_completion( + self, + conversation_id: str, + sequence: int, + ) -> None: + """Write an end-of-stream marker to the Redis Stream. + + Args: + conversation_id: The conversation ID for this agent run. + sequence: The final sequence number. + """ + stream_key = self._get_stream_key(conversation_id) + await self._redis.xadd( + stream_key, + { + "text": "", + "sequence": str(sequence), + "timestamp": str(int(time.time() * 1000)), + "done": "true", + } + ) + await self._redis.expire(stream_key, self._stream_ttl) + + async def read_stream( + self, + conversation_id: str, + cursor: str | None = None, + ) -> AsyncIterator[StreamChunk]: + """Read entries from a Redis Stream with cursor-based pagination. + + This method polls the Redis Stream for new entries, yielding chunks as they + become available. Clients can resume from any point using the entry_id from + a previous chunk. + + Args: + conversation_id: The conversation ID to read from. + cursor: Optional cursor to resume from. If None, starts from beginning. + + Yields: + StreamChunk instances containing text content or status markers. + """ + stream_key = self._get_stream_key(conversation_id) + start_id = cursor if cursor else "0-0" + + empty_read_count = 0 + has_seen_data = False + + while True: + try: + # Read up to 100 entries from the stream + entries = await self._redis.xread( + {stream_key: start_id}, + count=100, + block=None, + ) + + if not entries: + # No entries found + if not has_seen_data: + empty_read_count += 1 + if empty_read_count >= self.MAX_EMPTY_READS: + timeout_seconds = self.MAX_EMPTY_READS * self.POLL_INTERVAL_MS / 1000 + yield StreamChunk( + entry_id=start_id, + error=f"Stream not found or timed out after {timeout_seconds} seconds" + ) + return + + # Wait before polling again + await asyncio.sleep(self.POLL_INTERVAL_MS / 1000) + continue + + has_seen_data = True + + # Process entries from the stream + for _stream_name, stream_entries in entries: + for entry_id, entry_data in stream_entries: + start_id = entry_id.decode() if isinstance(entry_id, bytes) else entry_id + + # Decode entry data + text = entry_data.get(b"text", b"").decode() if b"text" in entry_data else None + done = entry_data.get(b"done", b"").decode() if b"done" in entry_data else None + error = entry_data.get(b"error", b"").decode() if b"error" in entry_data else None + + if error: + yield StreamChunk(entry_id=start_id, error=error) + return + + if done == "true": + yield StreamChunk(entry_id=start_id, is_done=True) + return + + if text: + yield StreamChunk(entry_id=start_id, text=text) + + except Exception as ex: + yield StreamChunk(entry_id=start_id, error=str(ex)) + return + + @staticmethod + def _get_stream_key(conversation_id: str) -> str: + """Generate the Redis key for a conversation's stream. + + Args: + conversation_id: The conversation ID. + + Returns: + The Redis stream key. + """ + return f"agent-stream:{conversation_id}" diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/requirements.txt b/python/samples/getting_started/durabletask/03_single_agent_streaming/requirements.txt new file mode 100644 index 0000000000..047a5d36f1 --- /dev/null +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/requirements.txt @@ -0,0 +1,9 @@ +# Agent Framework packages (installing from local package until a package is published) +-e ../../../../ +-e ../../../../packages/durabletask + +# Azure authentication +azure-identity + +# Redis client +redis diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/sample.py b/python/samples/getting_started/durabletask/03_single_agent_streaming/sample.py new file mode 100644 index 0000000000..14de97caf8 --- /dev/null +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/sample.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Single Agent Streaming Sample - Durable Task Integration (Combined Worker + Client) + +This sample demonstrates running both the worker and client in a single process +with reliable Redis-based streaming for agent responses. + +The worker is started first to register the TravelPlanner agent with Redis streaming +callback, then client operations are performed against the running worker. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running (e.g., using Docker) +- Redis must be running (e.g., docker run -d --name redis -p 6379:6379 redis:latest) + +To run this sample: + python sample.py +""" + +import logging + +from dotenv import load_dotenv + +# Import helper functions from worker and client modules +from client import get_client, run_client +from worker import get_worker, setup_worker + +# Configure logging (must be after imports to override their basicConfig) +logging.basicConfig(level=logging.INFO, force=True) +logger = logging.getLogger(__name__) + +def main(): + """Main entry point - runs both worker and client in single process.""" + logger.debug("Starting Durable Task Agent Sample with Redis Streaming...") + + silent_handler = logging.NullHandler() + + # Create and start the worker using helper function and context manager + with get_worker(log_handler=silent_handler) as dts_worker: + # Register agents and callbacks using helper function + setup_worker(dts_worker) + + # Start the worker + dts_worker.start() + logger.debug("Worker started and listening for requests...") + + # Create the client using helper function + agent_client = get_client(log_handler=silent_handler) + + try: + # Run client interactions using helper function + run_client(agent_client) + except Exception as e: + logger.exception(f"Error during agent interaction: {e}") + + logger.debug("Sample completed. Worker shutting down...") + + +if __name__ == "__main__": + load_dotenv() + main() diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/tools.py b/python/samples/getting_started/durabletask/03_single_agent_streaming/tools.py new file mode 100644 index 0000000000..6a71fdfa03 --- /dev/null +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/tools.py @@ -0,0 +1,165 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Mock travel tools for demonstration purposes. + +In a real application, these would call actual weather and events APIs. +""" + +from typing import Annotated + + +def get_weather_forecast( + destination: Annotated[str, "The destination city or location"], + date: Annotated[str, 'The date for the forecast (e.g., "2025-01-15" or "next Monday")'], +) -> str: + """Get the weather forecast for a destination on a specific date. + + Use this to provide weather-aware recommendations in the itinerary. + + Args: + destination: The destination city or location. + date: The date for the forecast. + + Returns: + A weather forecast summary. + """ + # Mock weather data based on destination for realistic responses + weather_by_region = { + "Tokyo": ("Partly cloudy with a chance of light rain", 58, 45), + "Paris": ("Overcast with occasional drizzle", 52, 41), + "New York": ("Clear and cold", 42, 28), + "London": ("Foggy morning, clearing in afternoon", 48, 38), + "Sydney": ("Sunny and warm", 82, 68), + "Rome": ("Sunny with light breeze", 62, 48), + "Barcelona": ("Partly sunny", 59, 47), + "Amsterdam": ("Cloudy with light rain", 46, 38), + "Dubai": ("Sunny and hot", 85, 72), + "Singapore": ("Tropical thunderstorms in afternoon", 88, 77), + "Bangkok": ("Hot and humid, afternoon showers", 91, 78), + "Los Angeles": ("Sunny and pleasant", 72, 55), + "San Francisco": ("Morning fog, afternoon sun", 62, 52), + "Seattle": ("Rainy with breaks", 48, 40), + "Miami": ("Warm and sunny", 78, 65), + "Honolulu": ("Tropical paradise weather", 82, 72), + } + + # Find a matching destination or use a default + forecast = ("Partly cloudy", 65, 50) + for city, weather in weather_by_region.items(): + if city.lower() in destination.lower(): + forecast = weather + break + + condition, high_f, low_f = forecast + high_c = (high_f - 32) * 5 // 9 + low_c = (low_f - 32) * 5 // 9 + + recommendation = _get_weather_recommendation(condition) + + return f"""Weather forecast for {destination} on {date}: +Conditions: {condition} +High: {high_f}°F ({high_c}°C) +Low: {low_f}°F ({low_c}°C) + +Recommendation: {recommendation}""" + + +def get_local_events( + destination: Annotated[str, "The destination city or location"], + date: Annotated[str, 'The date to search for events (e.g., "2025-01-15" or "next week")'], +) -> str: + """Get local events and activities happening at a destination around a specific date. + + Use this to suggest timely activities and experiences. + + Args: + destination: The destination city or location. + date: The date to search for events. + + Returns: + A list of local events and activities. + """ + # Mock events data based on destination + events_by_city = { + "Tokyo": [ + "🎭 Kabuki Theater Performance at Kabukiza Theatre - Traditional Japanese drama", + "🌸 Winter Illuminations at Yoyogi Park - Spectacular light displays", + "🍜 Ramen Festival at Tokyo Station - Sample ramen from across Japan", + "🎮 Gaming Expo at Tokyo Big Sight - Latest video games and technology", + ], + "Paris": [ + "🎨 Impressionist Exhibition at Musée d'Orsay - Extended evening hours", + "🍷 Wine Tasting Tour in Le Marais - Local sommelier guided", + "🎵 Jazz Night at Le Caveau de la Huchette - Historic jazz club", + "🥐 French Pastry Workshop - Learn from master pâtissiers", + ], + "New York": [ + "🎭 Broadway Show: Hamilton - Limited engagement performances", + "🏀 Knicks vs Lakers at Madison Square Garden", + "🎨 Modern Art Exhibit at MoMA - New installations", + "🍕 Pizza Walking Tour of Brooklyn - Artisan pizzerias", + ], + "London": [ + "👑 Royal Collection Exhibition at Buckingham Palace", + "🎭 West End Musical: The Phantom of the Opera", + "🍺 Craft Beer Festival at Brick Lane", + "🎪 Winter Wonderland at Hyde Park - Rides and markets", + ], + "Sydney": [ + "🏄 Pro Surfing Competition at Bondi Beach", + "🎵 Opera at Sydney Opera House - La Bohème", + "🦘 Wildlife Night Safari at Taronga Zoo", + "🍽️ Harbor Dinner Cruise with fireworks", + ], + "Rome": [ + "🏛️ After-Hours Vatican Tour - Skip the crowds", + "🍝 Pasta Making Class in Trastevere", + "🎵 Classical Concert at Borghese Gallery", + "🍷 Wine Tasting in Roman Cellars", + ], + } + + # Find events for the destination or use generic events + events = [ + "🎭 Local theater performance", + "🍽️ Food and wine festival", + "🎨 Art gallery opening", + "🎵 Live music at local venues", + ] + + for city, city_events in events_by_city.items(): + if city.lower() in destination.lower(): + events = city_events + break + + event_list = "\n• ".join(events) + return f"""Local events in {destination} around {date}: + +• {event_list} + +💡 Tip: Book popular events in advance as they may sell out quickly!""" + + +def _get_weather_recommendation(condition: str) -> str: + """Get a recommendation based on weather conditions. + + Args: + condition: The weather condition description. + + Returns: + A recommendation string. + """ + condition_lower = condition.lower() + + if "rain" in condition_lower or "drizzle" in condition_lower: + return "Bring an umbrella and waterproof jacket. Consider indoor activities for backup." + elif "fog" in condition_lower: + return "Morning visibility may be limited. Plan outdoor sightseeing for afternoon." + elif "cold" in condition_lower: + return "Layer up with warm clothing. Hot drinks and cozy cafés recommended." + elif "hot" in condition_lower or "warm" in condition_lower: + return "Stay hydrated and use sunscreen. Plan strenuous activities for cooler morning hours." + elif "thunder" in condition_lower or "storm" in condition_lower: + return "Keep an eye on weather updates. Have indoor alternatives ready." + else: + return "Pleasant conditions expected. Great day for outdoor exploration!" diff --git a/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py b/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py new file mode 100644 index 0000000000..1ca37ff607 --- /dev/null +++ b/python/samples/getting_started/durabletask/03_single_agent_streaming/worker.py @@ -0,0 +1,255 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Worker process for hosting a TravelPlanner agent with reliable Redis streaming. + +This worker registers the TravelPlanner agent with the Durable Task Scheduler +and uses RedisStreamCallback to persist streaming responses to Redis for reliable delivery. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Start a Durable Task Scheduler (e.g., using Docker) +- Start Redis (e.g., docker run -d --name redis -p 6379:6379 redis:latest) +""" + +import asyncio +import logging +import os +from datetime import timedelta + +import redis.asyncio as aioredis +from agent_framework import AgentResponseUpdate, ChatAgent +from agent_framework.azure import ( + AgentCallbackContext, + AgentResponseCallbackProtocol, + AzureOpenAIChatClient, + DurableAIAgentWorker, +) +from azure.identity import AzureCliCredential, DefaultAzureCredential +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +from redis_stream_response_handler import RedisStreamResponseHandler +from tools import get_local_events, get_weather_forecast + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Configuration +REDIS_CONNECTION_STRING = os.environ.get("REDIS_CONNECTION_STRING", "redis://localhost:6379") +REDIS_STREAM_TTL_MINUTES = int(os.environ.get("REDIS_STREAM_TTL_MINUTES", "10")) + + +async def get_stream_handler() -> RedisStreamResponseHandler: + """Create a new Redis stream handler for each request. + + This avoids event loop conflicts by creating a fresh Redis client + in the current event loop context. + """ + # Create a new Redis client in the current event loop + redis_client = aioredis.from_url( # type: ignore[reportUnknownMemberType] + REDIS_CONNECTION_STRING, + encoding="utf-8", + decode_responses=False, + ) + + return RedisStreamResponseHandler( + redis_client=redis_client, + stream_ttl=timedelta(minutes=REDIS_STREAM_TTL_MINUTES), + ) + + +class RedisStreamCallback(AgentResponseCallbackProtocol): + """Callback that writes streaming updates to Redis Streams for reliable delivery. + + This enables clients to disconnect and reconnect without losing messages. + """ + + def __init__(self) -> None: + self._sequence_numbers: dict[str, int] = {} # Track sequence per thread + + async def on_streaming_response_update( + self, + update: AgentResponseUpdate, + context: AgentCallbackContext, + ) -> None: + """Write streaming update to Redis Stream. + + Args: + update: The streaming response update chunk. + context: The callback context with thread_id, agent_name, etc. + """ + thread_id = context.thread_id + if not thread_id: + logger.warning("No thread_id available for streaming update") + return + + if not update.text: + return + + text = update.text + + # Get or initialize sequence number for this thread + if thread_id not in self._sequence_numbers: + self._sequence_numbers[thread_id] = 0 + + sequence = self._sequence_numbers[thread_id] + + try: + # Use context manager to ensure Redis client is properly closed + async with await get_stream_handler() as stream_handler: + # Write chunk to Redis Stream using public API + await stream_handler.write_chunk(thread_id, text, sequence) + + self._sequence_numbers[thread_id] += 1 + + logger.debug( + "[%s][%s] Wrote chunk to Redis: seq=%d, text=%s", + context.agent_name, + thread_id[:8], + sequence, + text, + ) + except Exception as ex: + logger.error(f"Error writing to Redis stream: {ex}", exc_info=True) + + async def on_agent_response(self, response: object, context: AgentCallbackContext) -> None: + """Write end-of-stream marker when agent completes. + + Args: + response: The final agent response. + context: The callback context. + """ + thread_id = context.thread_id + if not thread_id: + return + + sequence = self._sequence_numbers.get(thread_id, 0) + + try: + # Use context manager to ensure Redis client is properly closed + async with await get_stream_handler() as stream_handler: + # Write end-of-stream marker using public API + await stream_handler.write_completion(thread_id, sequence) + + logger.info( + "[%s][%s] Agent completed, wrote end-of-stream marker", + context.agent_name, + thread_id[:8], + ) + + # Clean up sequence tracker + self._sequence_numbers.pop(thread_id, None) + except Exception as ex: + logger.error(f"Error writing end-of-stream marker: {ex}", exc_info=True) + + +def create_travel_agent() -> "ChatAgent": + """Create the TravelPlanner agent using Azure OpenAI. + + Returns: + ChatAgent: The configured TravelPlanner agent with travel planning tools. + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name="TravelPlanner", + instructions="""You are an expert travel planner who creates detailed, personalized travel itineraries. +When asked to plan a trip, you should: +1. Create a comprehensive day-by-day itinerary +2. Include specific recommendations for activities, restaurants, and attractions +3. Provide practical tips for each destination +4. Consider weather and local events when making recommendations +5. Include estimated times and logistics between activities + +Always use the available tools to get current weather forecasts and local events +for the destination to make your recommendations more relevant and timely. + +Format your response with clear headings for each day and include emoji icons +to make the itinerary easy to scan and visually appealing.""", + tools=[get_weather_forecast, get_local_events], + ) + + +def get_worker( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerWorker: + """Create a configured DurableTaskSchedulerWorker. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional log handler for worker logging + + Returns: + Configured DurableTaskSchedulerWorker instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerWorker( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def setup_worker(worker: DurableTaskSchedulerWorker) -> DurableAIAgentWorker: + """Set up the worker with the TravelPlanner agent and Redis streaming callback. + + Args: + worker: The DurableTaskSchedulerWorker instance + + Returns: + DurableAIAgentWorker with agent and callback registered + """ + # Create the Redis streaming callback + redis_callback = RedisStreamCallback() + + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker, callback=redis_callback) + + # Create and register the TravelPlanner agent + logger.debug("Creating and registering TravelPlanner agent...") + travel_agent = create_travel_agent() + agent_worker.add_agent(travel_agent) + + logger.debug(f"✓ Registered agent: {travel_agent.name}") + + return agent_worker + + +async def main(): + """Main entry point for the worker process.""" + logger.debug("Starting Durable Task Agent Worker with Redis Streaming...") + + # Create a worker using the helper function + worker = get_worker() + + # Setup worker with agent and callback + setup_worker(worker) + + # Start the worker + logger.debug("Worker started and listening for requests...") + worker.start() + + try: + # Keep the worker running + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.debug("Worker shutting down...") + finally: + worker.stop() + logger.debug("Worker stopped") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/README.md b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/README.md new file mode 100644 index 0000000000..3a5605b3dd --- /dev/null +++ b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/README.md @@ -0,0 +1,68 @@ +# Single Agent Orchestration Chaining + +This sample demonstrates how to chain multiple invocations of the same agent using a durable orchestration while preserving conversation state between runs. + +## Key Concepts Demonstrated + +- Using durable orchestrations to coordinate sequential agent invocations. +- Chaining agent calls where the output of one run becomes input to the next. +- Maintaining conversation context across sequential runs using a shared thread. +- Using `DurableAIAgentOrchestrationContext` to access agents within orchestrations. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample using the combined approach or separate worker and client processes: + +**Option 1: Combined (Recommended for Testing)** + +```bash +cd samples/getting_started/durabletask/04_single_agent_orchestration_chaining +python sample.py +``` + +**Option 2: Separate Processes** + +Start the worker in one terminal: + +```bash +python worker.py +``` + +In a new terminal, run the client: + +```bash +python client.py +``` + +The orchestration will execute the writer agent twice sequentially: + +``` +[Orchestration] Starting single agent chaining... +[Orchestration] Created thread: abc-123 +[Orchestration] First agent run: Generating initial sentence... +[Orchestration] Initial response: Every small step forward is progress toward mastery. +[Orchestration] Second agent run: Refining the sentence... +[Orchestration] Refined response: Each small step forward brings you closer to mastery and growth. +[Orchestration] Chaining complete + +================================================================================ +Orchestration Result +================================================================================ +Each small step forward brings you closer to mastery and growth. +``` + +## Viewing Orchestration State + +You can view the state of the orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view: + - The sequential execution of both agent runs + - The conversation thread shared between runs + - Input and output at each step + - Overall orchestration state and history + diff --git a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/client.py b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/client.py new file mode 100644 index 0000000000..23ac266b36 --- /dev/null +++ b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/client.py @@ -0,0 +1,117 @@ +"""Client application for starting a single agent chaining orchestration. + +This client connects to the Durable Task Scheduler and starts an orchestration +that runs a writer agent twice sequentially on the same thread, demonstrating +how conversation context is maintained across multiple agent invocations. + +Prerequisites: +- The worker must be running with the writer agent and orchestration registered +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running +""" + +import asyncio +import json +import logging +import os + +from azure.identity import DefaultAzureCredential +from durabletask.azuremanaged.client import DurableTaskSchedulerClient + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_client( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerClient: + """Create a configured DurableTaskSchedulerClient. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for client logging + + Returns: + Configured DurableTaskSchedulerClient instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerClient( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def run_client(client: DurableTaskSchedulerClient) -> None: + """Run client to start and monitor the orchestration. + + Args: + client: The DurableTaskSchedulerClient instance + """ + logger.debug("Starting single agent chaining orchestration...") + + # Start the orchestration + instance_id = client.schedule_new_orchestration( # type: ignore + orchestrator="single_agent_chaining_orchestration", + input="", + ) + + logger.info(f"Orchestration started with instance ID: {instance_id}") + logger.debug("Waiting for orchestration to complete...") + + # Retrieve the final state + metadata = client.wait_for_orchestration_completion( + instance_id=instance_id, + timeout=300 + ) + + if metadata and metadata.runtime_status.name == "COMPLETED": + result = metadata.serialized_output + + logger.debug("Orchestration completed successfully!") + + # Parse and display the result + if result: + final_text = json.loads(result) + logger.info("Final refined sentence: %s \n", final_text) + + elif metadata: + logger.error(f"Orchestration ended with status: {metadata.runtime_status.name}") + if metadata.serialized_output: + logger.error(f"Output: {metadata.serialized_output}") + else: + logger.error("Orchestration did not complete within the timeout period") + + +async def main() -> None: + """Main entry point for the client application.""" + logger.debug("Starting Durable Task Single Agent Chaining Orchestration Client...") + + # Create client using helper function + client = get_client() + + try: + run_client(client) + except Exception as e: + logger.exception(f"Error during orchestration: {e}") + finally: + logger.debug("") + logger.debug("Client shutting down") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/requirements.txt b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/requirements.txt new file mode 100644 index 0000000000..371b9e3b79 --- /dev/null +++ b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/requirements.txt @@ -0,0 +1,6 @@ +# Agent Framework packages (installing from local package until a package is published) +-e ../../../../ +-e ../../../../packages/durabletask + +# Azure authentication +azure-identity diff --git a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/sample.py b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/sample.py new file mode 100644 index 0000000000..208c223f5e --- /dev/null +++ b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/sample.py @@ -0,0 +1,70 @@ +"""Single Agent Orchestration Chaining Sample - Durable Task Integration + +This sample demonstrates chaining two invocations of the same agent inside a Durable Task +orchestration while preserving the conversation state between runs. The orchestration +runs the writer agent sequentially on a shared thread to refine text iteratively. + +Components used: +- AzureOpenAIChatClient to construct the writer agent +- DurableTaskSchedulerWorker and DurableAIAgentWorker for agent hosting +- DurableTaskSchedulerClient and orchestration for sequential agent invocations +- Thread management to maintain conversation context across invocations + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running (e.g., using Docker emulator) + +To run this sample: + python sample.py +""" + +import logging + +from dotenv import load_dotenv + +# Import helper functions from worker and client modules +from client import get_client, run_client +from worker import get_worker, setup_worker + +# Configure logging +logging.basicConfig(level=logging.INFO, force=True) +logger = logging.getLogger(__name__) + + +def main(): + """Main entry point - runs both worker and client in single process.""" + logger.debug("Starting Single Agent Orchestration Chaining Sample...") + + silent_handler = logging.NullHandler() + # Create and start the worker using helper function and context manager + with get_worker(log_handler=silent_handler) as dts_worker: + # Register agents and orchestrations using helper function + setup_worker(dts_worker) + + # Start the worker + dts_worker.start() + logger.debug("Worker started and listening for requests...") + + # Create the client using helper function + client = get_client(log_handler=silent_handler) + + logger.debug("CLIENT: Starting orchestration...") + + # Run the client in the same process + try: + run_client(client) + except KeyboardInterrupt: + logger.debug("Sample interrupted by user") + except Exception as e: + logger.exception(f"Error during orchestration: {e}") + finally: + logger.debug("Worker stopping...") + + logger.debug("") + logger.debug("Sample completed") + + +if __name__ == "__main__": + load_dotenv() + main() diff --git a/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py new file mode 100644 index 0000000000..321a5f1149 --- /dev/null +++ b/python/samples/getting_started/durabletask/04_single_agent_orchestration_chaining/worker.py @@ -0,0 +1,206 @@ +"""Worker process for hosting a single agent with chaining orchestration using Durable Task. + +This worker registers a writer agent and an orchestration function that demonstrates +chaining behavior by running the agent twice sequentially on the same thread, +preserving conversation context between invocations. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Start a Durable Task Scheduler (e.g., using Docker) +""" + +import asyncio +from collections.abc import Generator +import logging +import os + +from agent_framework import AgentResponse, ChatAgent +from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker +from azure.identity import AzureCliCredential, DefaultAzureCredential +from durabletask.task import OrchestrationContext, Task +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Agent name +WRITER_AGENT_NAME = "WriterAgent" + + +def create_writer_agent() -> "ChatAgent": + """Create the Writer agent using Azure OpenAI. + + This agent refines short pieces of text, enhancing initial sentences + and polishing improved versions further. + + Returns: + ChatAgent: The configured Writer agent + """ + instructions = ( + "You refine short pieces of text. When given an initial sentence you enhance it;\n" + "when given an improved sentence you polish it further." + ) + + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=WRITER_AGENT_NAME, + instructions=instructions, + ) + + +def get_orchestration(): + """Get the orchestration function for this sample. + + Returns: + The orchestration function to register with the worker + """ + return single_agent_chaining_orchestration + + +def single_agent_chaining_orchestration( + context: OrchestrationContext, _: str +) -> Generator[Task[AgentResponse], AgentResponse, str]: + """Orchestration that runs the writer agent twice on the same thread. + + This demonstrates chaining behavior where the output of the first agent run + becomes part of the input for the second run, all while maintaining the + conversation context through a shared thread. + + Args: + context: The orchestration context + _: Input parameter (unused) + + Yields: + Task[AgentRunResponse]: Tasks that resolve to AgentRunResponse + + Returns: + str: The final refined text from the second agent run + """ + logger.debug("[Orchestration] Starting single agent chaining...") + + # Wrap the orchestration context to access agents + agent_context = DurableAIAgentOrchestrationContext(context) + + # Get the writer agent using the agent context + writer = agent_context.get_agent(WRITER_AGENT_NAME) + + # Create a new thread for the conversation - this will be shared across both runs + writer_thread = writer.get_new_thread() + + logger.debug(f"[Orchestration] Created thread: {writer_thread.session_id}") + + prompt = "Write a concise inspirational sentence about learning." + # First run: Generate an initial inspirational sentence + logger.info("[Orchestration] First agent run: Generating initial sentence about: %s", prompt) + initial_response = yield writer.run( + messages=prompt, + thread=writer_thread, + ) + logger.info(f"[Orchestration] Initial response: {initial_response.text}") + + # Second run: Refine the initial response on the same thread + improved_prompt = ( + f"Improve this further while keeping it under 25 words: " + f"{initial_response.text}" + ) + + logger.info("[Orchestration] Second agent run: Refining the sentence: %s", improved_prompt) + refined_response = yield writer.run( + messages=improved_prompt, + thread=writer_thread, + ) + + logger.info(f"[Orchestration] Refined response: {refined_response.text}") + + logger.debug("[Orchestration] Chaining complete") + return refined_response.text + + +def get_worker( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerWorker: + """Create a configured DurableTaskSchedulerWorker. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for worker logging + + Returns: + Configured DurableTaskSchedulerWorker instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerWorker( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def setup_worker(worker: DurableTaskSchedulerWorker) -> DurableAIAgentWorker: + """Set up the worker with agents and orchestrations registered. + + Args: + worker: The DurableTaskSchedulerWorker instance + + Returns: + DurableAIAgentWorker with agents and orchestrations registered + """ + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker) + + # Create and register the Writer agent + logger.debug("Creating and registering Writer agent...") + writer_agent = create_writer_agent() + agent_worker.add_agent(writer_agent) + + logger.debug(f"✓ Registered agent: {writer_agent.name}") + + # Register the orchestration function + logger.debug("Registering orchestration function...") + worker.add_orchestrator(single_agent_chaining_orchestration) # type: ignore + logger.debug(f"✓ Registered orchestration: {single_agent_chaining_orchestration.__name__}") + + return agent_worker + + +async def main(): + """Main entry point for the worker process.""" + logger.debug("Starting Durable Task Single Agent Chaining Worker with Orchestration...") + + # Create a worker using the helper function + worker = get_worker() + + # Setup worker with agents and orchestrations + setup_worker(worker) + + logger.debug("Worker is ready and listening for requests...") + logger.debug("Press Ctrl+C to stop.") + + try: + # Start the worker (this blocks until stopped) + worker.start() + + # Keep the worker running + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.debug("Worker shutdown initiated") + + logger.debug("Worker stopped") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/README.md b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/README.md new file mode 100644 index 0000000000..0edf244d78 --- /dev/null +++ b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/README.md @@ -0,0 +1,71 @@ +# Multi-Agent Orchestration with Concurrency + +This sample demonstrates how to host multiple agents and run them concurrently using a durable orchestration, aggregating their responses into a single result. + +## Key Concepts Demonstrated + +- Running multiple specialized agents in parallel within an orchestration. +- Using `OrchestrationAgentExecutor` to get `DurableAgentTask` objects for concurrent execution. +- Aggregating results from multiple agents using `task.when_all()`. +- Creating separate conversation threads for independent agent contexts. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample using the combined approach or separate worker and client processes: + +**Option 1: Combined (Recommended for Testing)** + +```bash +cd samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency +python sample.py +``` + +**Option 2: Separate Processes** + +Start the worker in one terminal: + +```bash +python worker.py +``` + +In a new terminal, run the client: + +```bash +python client.py +``` + +The orchestration will execute both agents concurrently: + +``` +Prompt: What is temperature? + +Starting multi-agent concurrent orchestration... +Orchestration started with instance ID: abc123... +⚡ Running PhysicistAgent and ChemistAgent in parallel... +Orchestration status: COMPLETED + +Results: + +Physicist's response: + Temperature measures the average kinetic energy of particles in a system... + +Chemist's response: + Temperature reflects how molecular motion influences reaction rates... +``` + +## Viewing Orchestration State + +You can view the state of the orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view: + - The concurrent execution of both agents (PhysicistAgent and ChemistAgent) + - Separate conversation threads for each agent + - Parallel task execution and completion timing + - Aggregated results from both agents + + diff --git a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/client.py b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/client.py new file mode 100644 index 0000000000..f3e92c9fb9 --- /dev/null +++ b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/client.py @@ -0,0 +1,114 @@ +"""Client application for starting a multi-agent concurrent orchestration. + +This client connects to the Durable Task Scheduler and starts an orchestration +that runs two agents (physicist and chemist) concurrently, then retrieves and +displays the aggregated results. + +Prerequisites: +- The worker must be running with both agents and orchestration registered +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running +""" + +import asyncio +import json +import logging +import os + +from azure.identity import DefaultAzureCredential +from durabletask.azuremanaged.client import DurableTaskSchedulerClient + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_client( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerClient: + """Create a configured DurableTaskSchedulerClient. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for client logging + + Returns: + Configured DurableTaskSchedulerClient instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerClient( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def run_client(client: DurableTaskSchedulerClient, prompt: str = "What is temperature?") -> None: + """Run client to start and monitor the orchestration. + + Args: + client: The DurableTaskSchedulerClient instance + prompt: The prompt to send to both agents + """ + # Start the orchestration with the prompt as input + instance_id = client.schedule_new_orchestration( # type: ignore + orchestrator="multi_agent_concurrent_orchestration", + input=prompt, + ) + + logger.info(f"Orchestration started with instance ID: {instance_id}") + logger.debug("Waiting for orchestration to complete...") + + # Retrieve the final state + metadata = client.wait_for_orchestration_completion( + instance_id=instance_id, + ) + + if metadata and metadata.runtime_status.name == "COMPLETED": + result = metadata.serialized_output + + logger.debug("Orchestration completed successfully!") + + # Parse and display the result + if result: + result_json = json.loads(result) if isinstance(result, str) else result + logger.info("Orchestration Results:\n%s", json.dumps(result_json, indent=2)) + + elif metadata: + logger.error(f"Orchestration ended with status: {metadata.runtime_status.name}") + if metadata.serialized_output: + logger.error(f"Output: {metadata.serialized_output}") + else: + logger.error("Orchestration did not complete within the timeout period") + + +async def main() -> None: + """Main entry point for the client application.""" + logger.debug("Starting Durable Task Multi-Agent Orchestration Client...") + + # Create client using helper function + client = get_client() + + try: + run_client(client) + except Exception as e: + logger.exception(f"Error during orchestration: {e}") + finally: + logger.debug("Client shutting down") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/requirements.txt b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/requirements.txt new file mode 100644 index 0000000000..371b9e3b79 --- /dev/null +++ b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/requirements.txt @@ -0,0 +1,6 @@ +# Agent Framework packages (installing from local package until a package is published) +-e ../../../../ +-e ../../../../packages/durabletask + +# Azure authentication +azure-identity diff --git a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/sample.py b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/sample.py new file mode 100644 index 0000000000..ca80aa043e --- /dev/null +++ b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/sample.py @@ -0,0 +1,64 @@ +"""Multi-Agent Orchestration Sample - Durable Task Integration (Combined Worker + Client) + +This sample demonstrates running both the worker and client in a single process for +concurrent multi-agent orchestration. The worker registers two domain-specific agents +(physicist and chemist) and an orchestration function that runs them in parallel. + +The orchestration uses OrchestrationAgentExecutor to execute agents concurrently +and aggregate their responses. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running (e.g., using Docker) + +To run this sample: + python sample.py +""" + +import logging + +from dotenv import load_dotenv + +# Import helper functions from worker and client modules +from client import get_client, run_client +from worker import get_worker, setup_worker + +# Configure logging +logging.basicConfig(level=logging.INFO, force=True) +logger = logging.getLogger(__name__) + + +def main(): + """Main entry point - runs both worker and client in single process.""" + logger.debug("Starting Durable Task Multi-Agent Orchestration Sample (Combined Worker + Client)...") + + silent_handler = logging.NullHandler() + # Create and start the worker using helper function and context manager + with get_worker(log_handler=silent_handler) as dts_worker: + # Register agents and orchestrations using helper function + setup_worker(dts_worker) + + # Start the worker + dts_worker.start() + logger.debug("Worker started and listening for requests...") + + # Create the client using helper function + client = get_client(log_handler=silent_handler) + + # Define the prompt + prompt = "What is temperature?" + logger.debug("CLIENT: Starting orchestration...") + + try: + # Run the client to start the orchestration + run_client(client, prompt) + except Exception as e: + logger.exception(f"Error during sample execution: {e}") + + logger.debug("Sample completed. Worker shutting down...") + + +if __name__ == "__main__": + load_dotenv() + main() diff --git a/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py new file mode 100644 index 0000000000..41b4bd8dda --- /dev/null +++ b/python/samples/getting_started/durabletask/05_multi_agent_orchestration_concurrency/worker.py @@ -0,0 +1,201 @@ +"""Worker process for hosting multiple agents with orchestration using Durable Task. + +This worker registers two domain-specific agents (physicist and chemist) and an orchestration +function that runs them concurrently. The orchestration uses OrchestrationAgentExecutor +to execute agents in parallel and aggregate their responses. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Start a Durable Task Scheduler (e.g., using Docker) +""" + +import asyncio +from collections.abc import Generator +import logging +import os +from typing import Any + +from agent_framework import AgentResponse, ChatAgent +from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker +from azure.identity import AzureCliCredential, DefaultAzureCredential +from durabletask.task import OrchestrationContext, when_all, Task +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Agent names +PHYSICIST_AGENT_NAME = "PhysicistAgent" +CHEMIST_AGENT_NAME = "ChemistAgent" + + +def create_physicist_agent() -> "ChatAgent": + """Create the Physicist agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Physicist agent + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=PHYSICIST_AGENT_NAME, + instructions="You are an expert in physics. You answer questions from a physics perspective.", + ) + + +def create_chemist_agent() -> "ChatAgent": + """Create the Chemist agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Chemist agent + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=CHEMIST_AGENT_NAME, + instructions="You are an expert in chemistry. You answer questions from a chemistry perspective.", + ) + + +def multi_agent_concurrent_orchestration(context: OrchestrationContext, prompt: str) -> Generator[Task[Any], Any, dict[str, str]]: + """Orchestration that runs both agents in parallel and aggregates results. + + Uses DurableAIAgentOrchestrationContext to wrap the orchestration context and + access agents via the OrchestrationAgentExecutor. + + Args: + context: The orchestration context + prompt: The prompt to send to both agents + + Returns: + dict: Dictionary with 'physicist' and 'chemist' response texts + """ + + logger.info(f"[Orchestration] Starting concurrent execution for prompt: {prompt}") + + # Wrap the orchestration context to access agents + agent_context = DurableAIAgentOrchestrationContext(context) + + # Get agents using the agent context (returns DurableAIAgent proxies) + physicist = agent_context.get_agent(PHYSICIST_AGENT_NAME) + chemist = agent_context.get_agent(CHEMIST_AGENT_NAME) + + # Create separate threads for each agent + physicist_thread = physicist.get_new_thread() + chemist_thread = chemist.get_new_thread() + + logger.debug(f"[Orchestration] Created threads - Physicist: {physicist_thread.session_id}, Chemist: {chemist_thread.session_id}") + + # Create tasks from agent.run() calls - these return DurableAgentTask instances + physicist_task = physicist.run(messages=str(prompt), thread=physicist_thread) + chemist_task = chemist.run(messages=str(prompt), thread=chemist_thread) + + logger.debug("[Orchestration] Created agent tasks, executing concurrently...") + + # Execute both tasks concurrently using when_all + # The DurableAgentTask instances wrap the underlying entity calls + task_results = yield when_all([physicist_task, chemist_task]) + + logger.debug("[Orchestration] Both agents completed") + + # Extract results from the tasks - DurableAgentTask yields AgentResponse + physicist_result: AgentResponse = task_results[0] + chemist_result: AgentResponse = task_results[1] + + result = { + "physicist": physicist_result.text, + "chemist": chemist_result.text, + } + + logger.debug(f"[Orchestration] Aggregated results ready") + return result + + +def get_worker( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerWorker: + """Create a configured DurableTaskSchedulerWorker. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for worker logging + + Returns: + Configured DurableTaskSchedulerWorker instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerWorker( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def setup_worker(worker: DurableTaskSchedulerWorker) -> DurableAIAgentWorker: + """Set up the worker with agents and orchestrations registered. + + Args: + worker: The DurableTaskSchedulerWorker instance + + Returns: + DurableAIAgentWorker with agents and orchestrations registered + """ + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker) + + # Create and register both agents + logger.debug("Creating and registering agents...") + physicist_agent = create_physicist_agent() + chemist_agent = create_chemist_agent() + + agent_worker.add_agent(physicist_agent) + agent_worker.add_agent(chemist_agent) + + logger.debug(f"✓ Registered agents: {physicist_agent.name}, {chemist_agent.name}") + + # Register the orchestration function + logger.debug("Registering orchestration function...") + worker.add_orchestrator(multi_agent_concurrent_orchestration) # type: ignore + logger.debug(f"✓ Registered orchestration: {multi_agent_concurrent_orchestration.__name__}") + + return agent_worker + + +async def main(): + """Main entry point for the worker process.""" + logger.debug("Starting Durable Task Multi-Agent Worker with Orchestration...") + + # Create a worker using the helper function + worker = get_worker() + + # Setup worker with agents and orchestrations + setup_worker(worker) + + logger.debug("Worker is ready and listening for requests...") + logger.debug("Press Ctrl+C to stop.") + + try: + # Start the worker (this blocks until stopped) + worker.start() + + # Keep the worker running + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.debug("Worker shutdown initiated") + + logger.debug("Worker stopped") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/README.md b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/README.md new file mode 100644 index 0000000000..f6a40c087b --- /dev/null +++ b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/README.md @@ -0,0 +1,84 @@ +# Multi-Agent Orchestration with Conditionals + +This sample demonstrates conditional orchestration logic with two agents that analyze incoming emails and route execution based on spam detection results. + +## Key Concepts Demonstrated + +- Multi-agent orchestration with two specialized agents (SpamDetectionAgent and EmailAssistantAgent). +- Conditional branching with different execution paths based on spam detection results. +- Structured outputs using Pydantic models with `options={"response_format": ...}` for type-safe agent responses. +- Activity functions for side effects (spam handling and email sending). +- Decision-based routing where orchestration logic branches on agent output. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample using the combined approach or separate worker and client processes: + +**Option 1: Combined (Recommended for Testing)** + +```bash +cd samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals +python sample.py +``` + +**Option 2: Separate Processes** + +Start the worker in one terminal: + +```bash +python worker.py +``` + +In a new terminal, run the client: + +```bash +python client.py +``` + +The sample runs two test cases: + +**Test 1: Legitimate Email** +``` +Email ID: email-001 +Email Content: Hello! I wanted to reach out about our upcoming project meeting... + +🔍 SpamDetectionAgent: Analyzing email... +✓ Not spam - routing to EmailAssistantAgent + +📧 EmailAssistantAgent: Drafting response... +✓ Email sent: [Professional response drafted by EmailAssistantAgent] +``` + +**Test 2: Spam Email** +``` +Email ID: email-002 +Email Content: URGENT! You've won $1,000,000! Click here now... + +🔍 SpamDetectionAgent: Analyzing email... +⚠️ Spam detected: [Reason from SpamDetectionAgent] +✓ Email marked as spam and handled +``` + +## How It Works + +1. **Input Validation**: Orchestration validates email payload using Pydantic models. +2. **Spam Detection**: SpamDetectionAgent analyzes email content. +3. **Conditional Routing**: + - If spam: Calls `handle_spam_email` activity + - If legitimate: Runs EmailAssistantAgent and calls `send_email` activity +4. **Result**: Returns confirmation message from the appropriate activity. + +## Viewing Agent State + +You can view the state of both agents and orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view: + - Orchestration instance status and history + - SpamDetectionAgent and EmailAssistantAgent entity states + - Activity execution logs + - Decision branch paths taken diff --git a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/client.py b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/client.py new file mode 100644 index 0000000000..58d4ecc1e8 --- /dev/null +++ b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/client.py @@ -0,0 +1,145 @@ +"""Client application for starting a spam detection orchestration. + +This client connects to the Durable Task Scheduler and starts an orchestration +that uses conditional logic to either handle spam emails or draft professional responses. + +Prerequisites: +- The worker must be running with both agents, orchestration, and activities registered +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running +""" + +import asyncio +import logging +import os + +from azure.identity import DefaultAzureCredential +from durabletask.azuremanaged.client import DurableTaskSchedulerClient + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def get_client( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerClient: + """Create a configured DurableTaskSchedulerClient. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for client logging + + Returns: + Configured DurableTaskSchedulerClient instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerClient( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def run_client( + client: DurableTaskSchedulerClient, + email_id: str = "email-001", + email_content: str = "Hello! I wanted to reach out about our upcoming project meeting." +) -> None: + """Run client to start and monitor the spam detection orchestration. + + Args: + client: The DurableTaskSchedulerClient instance + email_id: The email ID + email_content: The email content to analyze + """ + payload = { + "email_id": email_id, + "email_content": email_content, + } + + logger.debug("Starting spam detection orchestration...") + + # Start the orchestration with the email payload + instance_id = client.schedule_new_orchestration( # type: ignore + orchestrator="spam_detection_orchestration", + input=payload, + ) + + logger.debug(f"Orchestration started with instance ID: {instance_id}") + logger.debug("Waiting for orchestration to complete...") + + # Retrieve the final state + metadata = client.wait_for_orchestration_completion( + instance_id=instance_id, + timeout=300 + ) + + if metadata and metadata.runtime_status.name == "COMPLETED": + result = metadata.serialized_output + + logger.debug("Orchestration completed successfully!") + + # Parse and display the result + if result: + # Remove quotes if present + if result.startswith('"') and result.endswith('"'): + result = result[1:-1] + logger.info(f"Result: {result}") + + elif metadata: + logger.error(f"Orchestration ended with status: {metadata.runtime_status.name}") + if metadata.serialized_output: + logger.error(f"Output: {metadata.serialized_output}") + else: + logger.error("Orchestration did not complete within the timeout period") + + +async def main() -> None: + """Main entry point for the client application.""" + logger.debug("Starting Durable Task Spam Detection Orchestration Client...") + + # Create client using helper function + client = get_client() + + try: + # Test with a legitimate email + logger.info("TEST 1: Legitimate Email") + + run_client( + client, + email_id="email-001", + email_content="Hello! I wanted to reach out about our upcoming project meeting scheduled for next week." + ) + + # Test with a spam email + logger.info("TEST 2: Spam Email") + + run_client( + client, + email_id="email-002", + email_content="URGENT! You've won $1,000,000! Click here now to claim your prize! Limited time offer! Don't miss out!" + ) + + except Exception as e: + logger.exception(f"Error during orchestration: {e}") + finally: + logger.debug("") + logger.debug("Client shutting down") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/requirements.txt b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/requirements.txt new file mode 100644 index 0000000000..371b9e3b79 --- /dev/null +++ b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/requirements.txt @@ -0,0 +1,6 @@ +# Agent Framework packages (installing from local package until a package is published) +-e ../../../../ +-e ../../../../packages/durabletask + +# Azure authentication +azure-identity diff --git a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/sample.py b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/sample.py new file mode 100644 index 0000000000..d8e9d0a4b3 --- /dev/null +++ b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/sample.py @@ -0,0 +1,79 @@ +"""Multi-Agent Orchestration with Conditionals Sample - Durable Task Integration + +This sample demonstrates conditional orchestration logic with two agents: +- SpamDetectionAgent: Analyzes emails for spam content +- EmailAssistantAgent: Drafts professional responses to legitimate emails + +The orchestration branches based on spam detection results, calling different +activity functions to handle spam or send legitimate email responses. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running (e.g., using Docker) + +To run this sample: + python sample.py +""" + +import logging + +from dotenv import load_dotenv + +# Import helper functions from worker and client modules +from client import get_client, run_client +from worker import get_worker, setup_worker + +logging.basicConfig( + level=logging.INFO, + force=True +) +logger = logging.getLogger() + + +def main(): + """Main entry point - runs both worker and client in single process.""" + logger.debug("Starting Durable Task Spam Detection Orchestration Sample (Combined Worker + Client)...") + + silent_handler = logging.NullHandler() + # Create and start the worker using helper function and context manager + with get_worker(log_handler=silent_handler) as dts_worker: + # Register agents, orchestrations, and activities using helper function + setup_worker(dts_worker) + + # Start the worker + dts_worker.start() + logger.debug("Worker started and listening for requests...") + + # Create the client using helper function + client = get_client(log_handler=silent_handler) + logger.debug("CLIENT: Starting orchestration tests...") + + try: + # Test 1: Legitimate email + # logger.info("TEST 1: Legitimate Email") + + run_client( + client, + email_id="email-001", + email_content="Hello! I wanted to reach out about our upcoming project meeting scheduled for next week." + ) + + # Test 2: Spam email + logger.info("TEST 2: Spam Email") + + run_client( + client, + email_id="email-002", + email_content="URGENT! You've won $1,000,000! Click here now to claim your prize! Limited time offer! Don't miss out!" + ) + + except Exception as e: + logger.exception(f"Error during sample execution: {e}") + + logger.debug("Sample completed. Worker shutting down...") + + +if __name__ == "__main__": + load_dotenv() + main() diff --git a/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py new file mode 100644 index 0000000000..78ac71ce8a --- /dev/null +++ b/python/samples/getting_started/durabletask/06_multi_agent_orchestration_conditionals/worker.py @@ -0,0 +1,291 @@ +"""Worker process for hosting spam detection and email assistant agents with conditional orchestration. + +This worker registers two domain-specific agents (spam detector and email assistant) and an +orchestration function that routes execution based on spam detection results. Activity functions +handle side effects (spam handling and email sending). + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Start a Durable Task Scheduler (e.g., using Docker) +""" + +import asyncio +from collections.abc import Generator +import logging +import os +from typing import Any, cast + +from agent_framework import AgentResponse, ChatAgent +from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker +from azure.identity import AzureCliCredential, DefaultAzureCredential +from durabletask.task import ActivityContext, OrchestrationContext, Task +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker +from pydantic import BaseModel, ValidationError + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Agent names +SPAM_AGENT_NAME = "SpamDetectionAgent" +EMAIL_AGENT_NAME = "EmailAssistantAgent" + + +class SpamDetectionResult(BaseModel): + """Result from spam detection agent.""" + is_spam: bool + reason: str + + +class EmailResponse(BaseModel): + """Result from email assistant agent.""" + response: str + + +class EmailPayload(BaseModel): + """Input payload for the orchestration.""" + email_id: str + email_content: str + + +def create_spam_agent() -> "ChatAgent": + """Create the Spam Detection agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Spam Detection agent + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=SPAM_AGENT_NAME, + instructions="You are a spam detection assistant that identifies spam emails.", + ) + + +def create_email_agent() -> "ChatAgent": + """Create the Email Assistant agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Email Assistant agent + """ + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=EMAIL_AGENT_NAME, + instructions="You are an email assistant that helps users draft responses to emails with professionalism.", + ) + + +def handle_spam_email(context: ActivityContext, reason: str) -> str: + """Activity function to handle spam emails. + + Args: + context: The activity context + reason: The reason why the email was marked as spam + + Returns: + str: Confirmation message + """ + logger.debug(f"[Activity] Handling spam email: {reason}") + return f"Email marked as spam: {reason}" + + +def send_email(context: ActivityContext, message: str) -> str: + """Activity function to send emails. + + Args: + context: The activity context + message: The email message to send + + Returns: + str: Confirmation message + """ + logger.debug(f"[Activity] Sending email: {message[:50]}...") + return f"Email sent: {message}" + + +def spam_detection_orchestration(context: OrchestrationContext, payload_raw: Any) -> Generator[Task[Any], Any, str]: + """Orchestration that detects spam and conditionally drafts email responses. + + This orchestration: + 1. Validates the input payload + 2. Runs the spam detection agent + 3. If spam: calls handle_spam_email activity + 4. If legitimate: runs email assistant agent and calls send_email activity + + Args: + context: The orchestration context + payload_raw: The input payload dictionary + + Returns: + str: Result message from activity functions + """ + logger.debug("[Orchestration] Starting spam detection orchestration") + + # Validate input + if not isinstance(payload_raw, dict): + raise ValueError("Email data is required") + + try: + payload = EmailPayload.model_validate(payload_raw) + except ValidationError as exc: + raise ValueError(f"Invalid email payload: {exc}") from exc + + logger.debug(f"[Orchestration] Processing email ID: {payload.email_id}") + + # Wrap the orchestration context to access agents + agent_context = DurableAIAgentOrchestrationContext(context) + + # Get spam detection agent + spam_agent = agent_context.get_agent(SPAM_AGENT_NAME) + + # Run spam detection + spam_prompt = ( + "Analyze this email for spam content and return a JSON response with 'is_spam' (boolean) " + "and 'reason' (string) fields:\n" + f"Email ID: {payload.email_id}\n" + f"Content: {payload.email_content}" + ) + + logger.info("[Orchestration] Running spam detection agent: %s", spam_prompt) + spam_result_task = spam_agent.run( + messages=spam_prompt, + options={"response_format": SpamDetectionResult}, + ) + + spam_result_raw: AgentResponse = yield spam_result_task + spam_result = cast(SpamDetectionResult, spam_result_raw.value) + + logger.info("[Orchestration] Spam detection result: is_spam=%s", spam_result.is_spam) + + # Branch based on spam detection result + if spam_result.is_spam: + logger.debug("[Orchestration] Email is spam, handling...") + result_task: Task[str] = context.call_activity("handle_spam_email", input=spam_result.reason) + result: str = yield result_task + return result + + # Email is legitimate - draft a response + logger.debug("[Orchestration] Email is legitimate, drafting response...") + + email_agent = agent_context.get_agent(EMAIL_AGENT_NAME) + + email_prompt = ( + "Draft a professional response to this email. Return a JSON response with a 'response' field " + "containing the reply:\n\n" + f"Email ID: {payload.email_id}\n" + f"Content: {payload.email_content}" + ) + + logger.info("[Orchestration] Running email assistant agent: %s", email_prompt) + email_result_task = email_agent.run( + messages=email_prompt, + options={"response_format": EmailResponse}, + ) + + email_result_raw: AgentResponse = yield email_result_task + email_result = cast(EmailResponse, email_result_raw.value) + + logger.debug("[Orchestration] Email response drafted, sending...") + result_task: Task[str] = context.call_activity("send_email", input=email_result.response) + result: str = yield result_task + + logger.info("Sent Email: %s", result) + + return result + + +def get_worker( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerWorker: + """Create a configured DurableTaskSchedulerWorker. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for worker logging + + Returns: + Configured DurableTaskSchedulerWorker instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerWorker( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def setup_worker(worker: DurableTaskSchedulerWorker) -> DurableAIAgentWorker: + """Set up the worker with agents, orchestrations, and activities registered. + + Args: + worker: The DurableTaskSchedulerWorker instance + + Returns: + DurableAIAgentWorker with agents, orchestrations, and activities registered + """ + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker) + + # Create and register both agents + logger.debug("Creating and registering agents...") + spam_agent = create_spam_agent() + email_agent = create_email_agent() + + agent_worker.add_agent(spam_agent) + agent_worker.add_agent(email_agent) + + logger.debug(f"✓ Registered agents: {spam_agent.name}, {email_agent.name}") + + # Register activity functions + logger.debug("Registering activity functions...") + worker.add_activity(handle_spam_email) # type: ignore[arg-type] + worker.add_activity(send_email) # type: ignore[arg-type] + logger.debug(f"✓ Registered activity: handle_spam_email") + logger.debug(f"✓ Registered activity: send_email") + + # Register the orchestration function + logger.debug("Registering orchestration function...") + worker.add_orchestrator(spam_detection_orchestration) # type: ignore[arg-type] + logger.debug(f"✓ Registered orchestration: {spam_detection_orchestration.__name__}") + + return agent_worker + + +async def main(): + """Main entry point for the worker process.""" + logger.debug("Starting Durable Task Spam Detection Worker with Orchestration...") + + # Create a worker using the helper function + worker = get_worker() + + # Setup worker with agents, orchestrations, and activities + setup_worker(worker) + + logger.debug("Worker is ready and listening for requests...") + logger.debug("Press Ctrl+C to stop.") + + try: + # Start the worker (this blocks until stopped) + worker.start() + + # Keep the worker running + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.debug("Worker shutdown initiated") + + logger.debug("Worker stopped") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/README.md b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/README.md new file mode 100644 index 0000000000..fbfe905d59 --- /dev/null +++ b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/README.md @@ -0,0 +1,87 @@ +# Single-Agent Orchestration with Human-in-the-Loop (HITL) + +This sample demonstrates the human-in-the-loop pattern where a WriterAgent generates content and waits for human approval before publishing. The orchestration handles external events, timeouts, and iterative refinement based on feedback. + +## Key Concepts Demonstrated + +- Human-in-the-loop workflow with orchestration pausing for external approval/rejection events. +- External event handling using `wait_for_external_event()` to receive human input. +- Timeout management with `when_any()` to race between approval event and timeout. +- Iterative refinement where agent regenerates content based on reviewer feedback. +- Structured outputs using Pydantic models with `options={"response_format": ...}` for type-safe agent responses. +- Activity functions for notifications and publishing as separate side effects. +- Long-running orchestrations maintaining state across multiple interactions. + +## Environment Setup + +See the [README.md](../README.md) file in the parent directory for more information on how to configure the environment, including how to install and run common sample dependencies. + +## Running the Sample + +With the environment setup, you can run the sample using the combined approach or separate worker and client processes: + +**Option 1: Combined (Recommended for Testing)** + +```bash +cd samples/getting_started/durabletask/07_single_agent_orchestration_hitl +python sample.py +``` + +**Option 2: Separate Processes** + +Start the worker in one terminal: + +```bash +python worker.py +``` + +In a new terminal, run the client: + +```bash +python client.py +``` + +The sample runs two test scenarios: + +**Test 1: Immediate Approval** +``` +Topic: The benefits of cloud computing +[WriterAgent generates content] +[Notification sent: Please review the content] +[Client sends approval] +✓ Content published successfully +``` + +**Test 2: Rejection with Feedback, Then Approval** +``` +Topic: The future of artificial intelligence +[WriterAgent generates initial content] +[Notification sent: Please review the content] +[Client sends rejection with feedback: "Make it more technical..."] +[WriterAgent regenerates content with feedback] +[Notification sent: Please review the revised content] +[Client sends approval] +✓ Revised content published successfully +``` + +## How It Works + +1. **Initial Generation**: WriterAgent creates content based on the topic. +2. **Review Loop** (up to max_review_attempts): + - Activity notifies user for approval + - Orchestration waits for approval event OR timeout + - **If approved**: Publishes content and returns + - **If rejected**: Incorporates feedback and regenerates + - **If timeout**: Raises TimeoutError +3. **Completion**: Returns published content or error. + +## Viewing Agent State + +You can view the state of the WriterAgent and orchestration in the Durable Task Scheduler dashboard: + +1. Open your browser and navigate to `http://localhost:8082` +2. In the dashboard, you can view: + - Orchestration instance status and pending events + - WriterAgent entity state and conversation threads + - Activity execution logs + - External event history diff --git a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/client.py b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/client.py new file mode 100644 index 0000000000..446ab1b347 --- /dev/null +++ b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/client.py @@ -0,0 +1,308 @@ +"""Client application for starting a human-in-the-loop content generation orchestration. + +This client connects to the Durable Task Scheduler and demonstrates the HITL pattern +by starting an orchestration, sending approval/rejection events, and monitoring progress. + +Prerequisites: +- The worker must be running with the agent, orchestration, and activities registered +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running +""" + +import asyncio +import json +import logging +import os +import time + +from azure.identity import DefaultAzureCredential +from durabletask.azuremanaged.client import DurableTaskSchedulerClient +from durabletask.client import OrchestrationState + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Constants +HUMAN_APPROVAL_EVENT = "HumanApproval" + + +def get_client( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerClient: + """Create a configured DurableTaskSchedulerClient. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for client logging + + Returns: + Configured DurableTaskSchedulerClient instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerClient( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def _log_completion_result( + metadata: OrchestrationState | None, +) -> None: + """Log the orchestration completion result. + + Args: + metadata: The orchestration metadata + """ + if metadata and metadata.runtime_status.name == "COMPLETED": + result = metadata.serialized_output + + logger.debug(f"Orchestration completed successfully!") + + if result: + try: + result_dict = json.loads(result) + logger.info("Final Result: %s", json.dumps(result_dict, indent=2)) + except json.JSONDecodeError: + logger.debug(f"Result: {result}") + + elif metadata: + logger.error(f"Orchestration ended with status: {metadata.runtime_status.name}") + if metadata.serialized_output: + logger.error(f"Output: {metadata.serialized_output}") + else: + logger.error("Orchestration did not complete within the timeout period") + + +def _wait_and_log_completion( + client: DurableTaskSchedulerClient, + instance_id: str, + timeout: int = 60 +) -> None: + """Wait for orchestration completion and log the result. + + Args: + client: The DurableTaskSchedulerClient instance + instance_id: The orchestration instance ID + timeout: Maximum time to wait for completion in seconds + """ + logger.debug("Waiting for orchestration to complete...") + metadata = client.wait_for_orchestration_completion( + instance_id=instance_id, + timeout=timeout + ) + + _log_completion_result(metadata) + + +def send_approval( + client: DurableTaskSchedulerClient, + instance_id: str, + approved: bool, + feedback: str = "" +) -> None: + """Send approval or rejection event to the orchestration. + + Args: + client: The DurableTaskSchedulerClient instance + instance_id: The orchestration instance ID + approved: Whether to approve or reject + feedback: Optional feedback message (used when rejected) + """ + approval_data = { + "approved": approved, + "feedback": feedback + } + + logger.debug(f"Sending {'APPROVAL' if approved else 'REJECTION'} to instance {instance_id}") + if feedback: + logger.debug(f"Feedback: {feedback}") + + # Raise the external event + client.raise_orchestration_event( + instance_id=instance_id, + event_name=HUMAN_APPROVAL_EVENT, + data=approval_data + ) + + logger.debug("Event sent successfully") + + +def wait_for_notification( + client: DurableTaskSchedulerClient, + instance_id: str, + timeout_seconds: int = 10 +) -> bool: + """Wait for the orchestration to reach a notification point. + + Polls the orchestration status until it appears to be waiting for approval. + + Args: + client: The DurableTaskSchedulerClient instance + instance_id: The orchestration instance ID + timeout_seconds: Maximum time to wait + + Returns: + True if notification detected, False if timeout + """ + logger.debug("Waiting for orchestration to reach notification point...") + + start_time = time.time() + while time.time() - start_time < timeout_seconds: + try: + metadata = client.get_orchestration_state( + instance_id=instance_id, + ) + + if metadata: + # Check if we're waiting for approval by examining custom status + if metadata.serialized_custom_status: + try: + custom_status = json.loads(metadata.serialized_custom_status) + # Handle both string and dict custom status + status_str = custom_status if isinstance(custom_status, str) else str(custom_status) + if status_str.lower().startswith("requesting human feedback"): + logger.debug("Orchestration is requesting human feedback") + return True + except (json.JSONDecodeError, AttributeError): + # If it's not JSON, treat as plain string + if metadata.serialized_custom_status.lower().startswith("requesting human feedback"): + logger.debug("Orchestration is requesting human feedback") + return True + + # Check for terminal states + if metadata.runtime_status.name == "COMPLETED": + logger.debug("Orchestration already completed") + return False + elif metadata.runtime_status.name == "FAILED": + logger.error("Orchestration failed") + return False + except Exception as e: + logger.debug(f"Status check: {e}") + + time.sleep(1) + + logger.warning("Timeout waiting for notification") + return False + + +def run_interactive_client(client: DurableTaskSchedulerClient) -> None: + """Run an interactive client that prompts for user input and handles approval workflow. + + Args: + client: The DurableTaskSchedulerClient instance + """ + # Get user inputs + logger.debug("Content Generation - Human-in-the-Loop") + + topic = input("Enter the topic for content generation: ").strip() + if not topic: + topic = "The benefits of cloud computing" + logger.info(f"Using default topic: {topic}") + + max_attempts_str = input("Enter max review attempts (default: 3): ").strip() + max_review_attempts = int(max_attempts_str) if max_attempts_str else 3 + + timeout_hours_str = input("Enter approval timeout in hours (default: 5): ").strip() + timeout_hours = float(timeout_hours_str) if timeout_hours_str else 5.0 + approval_timeout_seconds = int(timeout_hours * 3600) + + payload = { + "topic": topic, + "max_review_attempts": max_review_attempts, + "approval_timeout_seconds": approval_timeout_seconds + } + + logger.debug(f"Configuration: Topic={topic}, Max attempts={max_review_attempts}, Timeout={timeout_hours}h") + + # Start the orchestration + logger.debug("Starting content generation orchestration...") + instance_id = client.schedule_new_orchestration( # type: ignore + orchestrator="content_generation_hitl_orchestration", + input=payload, + ) + + logger.info(f"Orchestration started with instance ID: {instance_id}") + + # Review loop + attempt = 1 + while attempt <= max_review_attempts: + logger.info(f"Review Attempt {attempt}/{max_review_attempts}") + + # Wait for orchestration to reach notification point + logger.debug("Waiting for content generation...") + if not wait_for_notification(client, instance_id, timeout_seconds=120): + logger.error("Failed to receive notification. Orchestration may have completed or failed.") + break + + logger.info("Content is ready for review! Please review the content in the worker logs.") + + # Get user decision + while True: + decision = input("Do you approve this content? (yes/no): ").strip().lower() + if decision in ['yes', 'y', 'no', 'n']: + break + logger.info("Please enter 'yes' or 'no'") + + approved = decision in ['yes', 'y'] + + if approved: + logger.debug("Sending approval...") + send_approval(client, instance_id, approved=True) + logger.info("Approval sent. Waiting for orchestration to complete...") + _wait_and_log_completion(client, instance_id, timeout=60) + break + else: + feedback = input("Enter feedback for improvement: ").strip() + if not feedback: + feedback = "Please revise the content." + + logger.debug("Sending rejection with feedback...") + send_approval(client, instance_id, approved=False, feedback=feedback) + logger.info("Rejection sent. Content will be regenerated...") + + attempt += 1 + + if attempt > max_review_attempts: + logger.info(f"Maximum review attempts ({max_review_attempts}) reached.") + _wait_and_log_completion(client, instance_id, timeout=30) + break + + # Small pause before next iteration + time.sleep(2) + + +async def main() -> None: + """Main entry point for the client application.""" + logger.debug("Starting Durable Task HITL Content Generation Client") + + # Create client using helper function + client = get_client() + + try: + run_interactive_client(client) + + except KeyboardInterrupt: + logger.info("Interrupted by user") + except Exception as e: + logger.exception(f"Error during orchestration: {e}") + finally: + logger.debug("Client shutting down") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/requirements.txt b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/requirements.txt new file mode 100644 index 0000000000..371b9e3b79 --- /dev/null +++ b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/requirements.txt @@ -0,0 +1,6 @@ +# Agent Framework packages (installing from local package until a package is published) +-e ../../../../ +-e ../../../../packages/durabletask + +# Azure authentication +azure-identity diff --git a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/sample.py b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/sample.py new file mode 100644 index 0000000000..5468a70dd3 --- /dev/null +++ b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/sample.py @@ -0,0 +1,64 @@ +"""Human-in-the-Loop Orchestration Sample - Durable Task Integration + +This sample demonstrates the HITL pattern with a WriterAgent that generates content +and waits for human approval. The orchestration handles: +- External event waiting (approval/rejection) +- Timeout handling +- Iterative refinement based on feedback +- Activity functions for notifications and publishing + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Durable Task Scheduler must be running (e.g., using Docker) + +To run this sample: + python sample.py +""" + +import logging + +from dotenv import load_dotenv + +# Import helper functions from worker and client modules +from client import get_client, run_interactive_client +from worker import get_worker, setup_worker + +logging.basicConfig( + level=logging.INFO, + force=True +) +logger = logging.getLogger() + + +def main(): + """Main entry point - runs both worker and client in single process.""" + logger.debug("Starting Durable Task HITL Content Generation Sample (Combined Worker + Client)...") + + silent_handler = logging.NullHandler() + # Create and start the worker using helper function and context manager + with get_worker(log_handler=silent_handler) as dts_worker: + # Register agent, orchestration, and activities using helper function + setup_worker(dts_worker) + + # Start the worker + dts_worker.start() + logger.debug("Worker started and listening for requests...") + + # Create the client using helper function + client = get_client(log_handler=silent_handler) + + try: + logger.debug("CLIENT: Starting orchestration tests...") + + run_interactive_client(client) + + except Exception as e: + logger.exception(f"Error during sample execution: {e}") + + logger.debug("Sample completed. Worker shutting down...") + + +if __name__ == "__main__": + load_dotenv() + main() diff --git a/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py new file mode 100644 index 0000000000..db32aecf14 --- /dev/null +++ b/python/samples/getting_started/durabletask/07_single_agent_orchestration_hitl/worker.py @@ -0,0 +1,374 @@ +"""Worker process for hosting a writer agent with human-in-the-loop orchestration. + +This worker registers a WriterAgent and an orchestration function that implements +a human-in-the-loop review workflow. The orchestration pauses for external events +(human approval/rejection) with timeout handling, and iterates based on feedback. + +Prerequisites: +- Set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_CHAT_DEPLOYMENT_NAME + (plus AZURE_OPENAI_API_KEY or Azure CLI authentication) +- Start a Durable Task Scheduler (e.g., using Docker) +""" + +import asyncio +from collections.abc import Generator +from datetime import timedelta +import logging +import os +from typing import Any, cast + +from agent_framework import AgentResponse, ChatAgent +from agent_framework.azure import AzureOpenAIChatClient, DurableAIAgentOrchestrationContext, DurableAIAgentWorker +from azure.identity import AzureCliCredential, DefaultAzureCredential +from durabletask.task import ActivityContext, OrchestrationContext, Task, when_any # type: ignore +from durabletask.azuremanaged.worker import DurableTaskSchedulerWorker +from pydantic import BaseModel, ValidationError + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Constants +WRITER_AGENT_NAME = "WriterAgent" +HUMAN_APPROVAL_EVENT = "HumanApproval" + + +class ContentGenerationInput(BaseModel): + """Input for content generation orchestration.""" + topic: str + max_review_attempts: int = 3 + approval_timeout_seconds: float = 300 # 5 minutes for demo (72 hours in production) + + +class GeneratedContent(BaseModel): + """Structured output from writer agent.""" + title: str + content: str + + +class HumanApproval(BaseModel): + """Human approval decision.""" + approved: bool + feedback: str = "" + + +def create_writer_agent() -> "ChatAgent": + """Create the Writer agent using Azure OpenAI. + + Returns: + ChatAgent: The configured Writer agent + """ + instructions = ( + "You are a professional content writer who creates high-quality articles on various topics. " + "You write engaging, informative, and well-structured content that follows best practices for readability and accuracy. " + "Return your response as JSON with 'title' and 'content' fields." + "Limit response to 300 words or less." + ) + + return AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( + name=WRITER_AGENT_NAME, + instructions=instructions, + ) + + +def notify_user_for_approval(context: ActivityContext, content: dict[str, str]) -> str: + """Activity function to notify user for approval. + + Args: + context: The activity context + content: The generated content dictionary + """ + model = GeneratedContent.model_validate(content) + logger.info("NOTIFICATION: Please review the following content for approval:") + logger.info(f"Title: {model.title or '(untitled)'}") + logger.info(f"Content: {model.content}") + logger.info("Use the client to send approval or rejection.") + return "Notification sent to user for approval." + +def publish_content(context: ActivityContext, content: dict[str, str]) -> str: + """Activity function to publish approved content. + + Args: + context: The activity context + content: The generated content dictionary + """ + model = GeneratedContent.model_validate(content) + logger.info("PUBLISHING: Content has been published successfully:") + logger.info(f"Title: {model.title or '(untitled)'}") + logger.info(f"Content: {model.content}") + return "Published content successfully." + + +def content_generation_hitl_orchestration( + context: OrchestrationContext, + payload_raw: Any +) -> Generator[Task[Any], Any, dict[str, str]]: + """Human-in-the-loop orchestration for content generation with approval workflow. + + This orchestration: + 1. Generates initial content using WriterAgent + 2. Loops up to max_review_attempts times: + a. Notifies user for approval + b. Waits for approval event or timeout + c. If approved: publishes and returns + d. If rejected: incorporates feedback and regenerates + e. If timeout: raises TimeoutError + 3. Raises RuntimeError if max attempts exhausted + + Args: + context: The orchestration context + payload_raw: The input payload + + Returns: + dict: Result with published content + + Raises: + ValueError: If input is invalid or agent returns no content + TimeoutError: If human approval times out + RuntimeError: If max review attempts exhausted + """ + logger.debug("[Orchestration] Starting HITL content generation orchestration") + + # Validate input + if not isinstance(payload_raw, dict): + raise ValueError("Content generation input is required") + + try: + payload = ContentGenerationInput.model_validate(payload_raw) + except ValidationError as exc: + raise ValueError(f"Invalid content generation input: {exc}") from exc + + logger.debug(f"[Orchestration] Topic: {payload.topic}") + logger.debug(f"[Orchestration] Max attempts: {payload.max_review_attempts}") + logger.debug(f"[Orchestration] Approval timeout: {payload.approval_timeout_seconds}s") + + # Wrap the orchestration context to access agents + agent_context = DurableAIAgentOrchestrationContext(context) + + # Get the writer agent + writer = agent_context.get_agent(WRITER_AGENT_NAME) + writer_thread = writer.get_new_thread() + + logger.info(f"ThreadID: {writer_thread.session_id}") + + # Generate initial content + logger.info("[Orchestration] Generating initial content...") + + initial_response: AgentResponse = yield writer.run( + messages=f"Write a short article about '{payload.topic}'.", + thread=writer_thread, + options={"response_format": GeneratedContent}, + ) + content = cast(GeneratedContent, initial_response.value) + + if not isinstance(content, GeneratedContent): + raise ValueError("Agent returned no content after extraction.") + + logger.debug(f"[Orchestration] Initial content generated: {content.title}") + + # Review loop + attempt = 0 + while attempt < payload.max_review_attempts: + attempt += 1 + logger.debug(f"[Orchestration] Review iteration #{attempt}/{payload.max_review_attempts}") + + context.set_custom_status(f"Requesting human feedback (Attempt {attempt}, timeout {payload.approval_timeout_seconds}s)") + + # Notify user for approval + yield context.call_activity( + "notify_user_for_approval", + input=content.model_dump() + ) + + logger.debug("[Orchestration] Waiting for human approval or timeout...") + + # Wait for approval event or timeout + approval_task: Task[Any] = context.wait_for_external_event(HUMAN_APPROVAL_EVENT) # type: ignore + timeout_task: Task[Any] = context.create_timer( # type: ignore + context.current_utc_datetime + timedelta(seconds=payload.approval_timeout_seconds) + ) + + # Race between approval and timeout + winner_task = yield when_any([approval_task, timeout_task]) # type: ignore + + if winner_task == approval_task: + # Approval received before timeout + logger.debug("[Orchestration] Received human approval event") + + context.set_custom_status("Content reviewed by human reviewer.") + + # Parse approval + approval_data: Any = approval_task.get_result() # type: ignore + logger.debug(f"[Orchestration] Approval data: {approval_data}") + + # Handle different formats of approval_data + if isinstance(approval_data, dict): + approval = HumanApproval.model_validate(approval_data) + elif isinstance(approval_data, str): + # Try to parse as boolean-like string + lower_data = approval_data.lower().strip() + if lower_data in {"true", "yes", "approved", "y", "1"}: + approval = HumanApproval(approved=True, feedback="") + elif lower_data in {"false", "no", "rejected", "n", "0"}: + approval = HumanApproval(approved=False, feedback="") + else: + approval = HumanApproval(approved=False, feedback=approval_data) + else: + approval = HumanApproval(approved=False, feedback=str(approval_data)) # type: ignore + + if approval.approved: + # Content approved - publish and return + logger.debug("[Orchestration] Content approved! Publishing...") + context.set_custom_status("Content approved by human reviewer. Publishing...") + publish_task: Task[Any] = context.call_activity( + "publish_content", + input=content.model_dump() + ) + yield publish_task + + logger.debug("[Orchestration] Content published successfully") + return {"content": content.content, "title": content.title} + + # Content rejected - incorporate feedback and regenerate + logger.debug(f"[Orchestration] Content rejected. Feedback: {approval.feedback}") + + # Check if we've exhausted attempts + if attempt >= payload.max_review_attempts: + context.set_custom_status("Max review attempts exhausted.") + # Max attempts exhausted + logger.error(f"[Orchestration] Max attempts ({payload.max_review_attempts}) exhausted") + break + + context.set_custom_status(f"Content rejected by human reviewer. Regenerating...") + + rewrite_prompt = ( + "The content was rejected by a human reviewer. Please rewrite the article incorporating their feedback.\n\n" + f"Human Feedback: {approval.feedback or 'No specific feedback provided.'}" + ) + + logger.debug("[Orchestration] Regenerating content with feedback...") + + logger.warning(f"Regenerating with ThreadID: {writer_thread.session_id}") + + rewrite_response: AgentResponse = yield writer.run( + messages=rewrite_prompt, + thread=writer_thread, + options={"response_format": GeneratedContent}, + ) + rewritten_content = cast(GeneratedContent, rewrite_response.value) + + if not isinstance(rewritten_content, GeneratedContent): + raise ValueError("Agent returned no content after rewrite.") + + content = rewritten_content + logger.debug(f"[Orchestration] Content regenerated: {content.title}") + + else: + # Timeout occurred + logger.error(f"[Orchestration] Approval timeout after {payload.approval_timeout_seconds}s") + + raise TimeoutError( + f"Human approval timed out after {payload.approval_timeout_seconds} second(s)." + ) + + # If we exit the loop without returning, max attempts were exhausted + context.set_custom_status("Max review attempts exhausted.") + raise RuntimeError( + f"Content could not be approved after {payload.max_review_attempts} iteration(s)." + ) + + +def get_worker( + taskhub: str | None = None, + endpoint: str | None = None, + log_handler: logging.Handler | None = None +) -> DurableTaskSchedulerWorker: + """Create a configured DurableTaskSchedulerWorker. + + Args: + taskhub: Task hub name (defaults to TASKHUB env var or "default") + endpoint: Scheduler endpoint (defaults to ENDPOINT env var or "http://localhost:8080") + log_handler: Optional logging handler for worker logging + + Returns: + Configured DurableTaskSchedulerWorker instance + """ + taskhub_name = taskhub or os.getenv("TASKHUB", "default") + endpoint_url = endpoint or os.getenv("ENDPOINT", "http://localhost:8080") + + logger.debug(f"Using taskhub: {taskhub_name}") + logger.debug(f"Using endpoint: {endpoint_url}") + + credential = None if endpoint_url == "http://localhost:8080" else DefaultAzureCredential() + + return DurableTaskSchedulerWorker( + host_address=endpoint_url, + secure_channel=endpoint_url != "http://localhost:8080", + taskhub=taskhub_name, + token_credential=credential, + log_handler=log_handler + ) + + +def setup_worker(worker: DurableTaskSchedulerWorker) -> DurableAIAgentWorker: + """Set up the worker with agents, orchestrations, and activities registered. + + Args: + worker: The DurableTaskSchedulerWorker instance + + Returns: + DurableAIAgentWorker with agents, orchestrations, and activities registered + """ + # Wrap it with the agent worker + agent_worker = DurableAIAgentWorker(worker) + + # Create and register the writer agent + logger.debug("Creating and registering Writer agent...") + writer_agent = create_writer_agent() + agent_worker.add_agent(writer_agent) + + logger.debug(f"✓ Registered agent: {writer_agent.name}") + + # Register activity functions + logger.debug("Registering activity functions...") + worker.add_activity(notify_user_for_approval) # type: ignore + worker.add_activity(publish_content) # type: ignore + logger.debug(f"✓ Registered activity: notify_user_for_approval") + logger.debug(f"✓ Registered activity: publish_content") + + # Register the orchestration function + logger.debug("Registering orchestration function...") + worker.add_orchestrator(content_generation_hitl_orchestration) # type: ignore + logger.debug(f"✓ Registered orchestration: {content_generation_hitl_orchestration.__name__}") + + return agent_worker + + +async def main(): + """Main entry point for the worker process.""" + logger.debug("Starting Durable Task HITL Content Generation Worker...") + + # Create a worker using the helper function + worker = get_worker() + + # Setup worker with agents, orchestrations, and activities + setup_worker(worker) + + logger.debug("Worker is ready and listening for requests...") + logger.debug("Press Ctrl+C to stop.") + + try: + # Start the worker (this blocks until stopped) + worker.start() + + # Keep the worker running + while True: + await asyncio.sleep(1) + except KeyboardInterrupt: + logger.debug("Worker shutdown initiated") + + logger.debug("Worker stopped") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/durabletask/README.md b/python/samples/getting_started/durabletask/README.md new file mode 100644 index 0000000000..8700380a14 --- /dev/null +++ b/python/samples/getting_started/durabletask/README.md @@ -0,0 +1,148 @@ +# Durable Task Samples + +This directory contains samples for durable agent hosting using the Durable Task Scheduler. These samples demonstrate the worker-client architecture pattern, enabling distributed agent execution with persistent conversation state. + +## Sample Catalog + +### Basic Patterns +- **[01_single_agent](01_single_agent/)**: Host a single conversational agent and interact with it via a client. Demonstrates basic worker-client architecture and agent state management. +- **[02_multi_agent](02_multi_agent/)**: Host multiple domain-specific agents (physicist and chemist) and route requests to the appropriate agent based on the question topic. +- **[03_single_agent_streaming](03_single_agent_streaming/)**: Enable reliable, resumable streaming using Redis Streams with agent response callbacks. Demonstrates non-blocking agent execution and cursor-based resumption for disconnected clients. + +### Orchestration Patterns +- **[04_single_agent_orchestration_chaining](04_single_agent_orchestration_chaining/)**: Chain multiple invocations of the same agent using durable orchestration, preserving conversation context across sequential runs. +- **[05_multi_agent_orchestration_concurrency](05_multi_agent_orchestration_concurrency/)**: Run multiple agents concurrently within an orchestration, aggregating their responses in parallel. +- **[06_multi_agent_orchestration_conditionals](06_multi_agent_orchestration_conditionals/)**: Implement conditional branching in orchestrations with spam detection and email assistant agents. Demonstrates structured outputs with Pydantic models and activity functions for side effects. +- **[07_single_agent_orchestration_hitl](07_single_agent_orchestration_hitl/)**: Human-in-the-loop pattern with external event handling, timeouts, and iterative refinement based on human feedback. Shows long-running workflows with external interactions. + +## Running the Samples + +These samples are designed to be run locally in a cloned repository. + +### Prerequisites + +The following prerequisites are required to run the samples: + +- [Python 3.9 or later](https://www.python.org/downloads/) +- [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed and authenticated (`az login`) or an API key for the Azure OpenAI service +- [Azure OpenAI Service](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource) with a deployed model (gpt-4o-mini or better is recommended) +- [Durable Task Scheduler](https://learn.microsoft.com/azure/azure-functions/durable/durable-task-scheduler/develop-with-durable-task-scheduler) (local emulator or Azure-hosted) +- [Docker](https://docs.docker.com/get-docker/) installed if running the Durable Task Scheduler emulator locally + +### Configuring RBAC Permissions for Azure OpenAI + +These samples are configured to use the Azure OpenAI service with RBAC permissions to access the model. You'll need to configure the RBAC permissions for the Azure OpenAI service to allow the Python app to access the model. + +Below is an example of how to configure the RBAC permissions for the Azure OpenAI service to allow the current user to access the model. + +Bash (Linux/macOS/WSL): + +```bash +az role assignment create \ + --assignee "yourname@contoso.com" \ + --role "Cognitive Services OpenAI User" \ + --scope /subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts/ +``` + +PowerShell: + +```powershell +az role assignment create ` + --assignee "yourname@contoso.com" ` + --role "Cognitive Services OpenAI User" ` + --scope /subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts/ +``` + +More information on how to configure RBAC permissions for Azure OpenAI can be found in the [Azure OpenAI documentation](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=cli). + +### Setting an API key for the Azure OpenAI service + +As an alternative to configuring Azure RBAC permissions, you can set an API key for the Azure OpenAI service by setting the `AZURE_OPENAI_API_KEY` environment variable. + +Bash (Linux/macOS/WSL): + +```bash +export AZURE_OPENAI_API_KEY="your-api-key" +``` + +PowerShell: + +```powershell +$env:AZURE_OPENAI_API_KEY="your-api-key" +``` + +### Start Durable Task Scheduler + +Most samples use the Durable Task Scheduler (DTS) to support hosted agents and durable orchestrations. DTS also allows you to view the status of orchestrations and their inputs and outputs from a web UI. + +To run the Durable Task Scheduler locally, you can use the following `docker` command: + +```bash +docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 mcr.microsoft.com/dts/dts-emulator:latest +``` + +The DTS dashboard will be available at `http://localhost:8082`. + +### Environment Configuration + +Each sample reads configuration from environment variables. You'll need to set the following environment variables: + +Bash (Linux/macOS/WSL): + +```bash +export AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/" +export AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="your-deployment-name" +``` + +PowerShell: + +```powershell +$env:AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/" +$env:AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="your-deployment-name" +``` + +### Installing Dependencies + +Navigate to the sample directory and install dependencies. For example: + +```bash +cd samples/getting_started/durabletask/01_single_agent +pip install -r requirements.txt +``` + +If you're using `uv` for package management: + +```bash +uv pip install -r requirements.txt +``` + +### Running the Samples + +Each sample follows a worker-client architecture. Most samples provide separate `worker.py` and `client.py` files, though some include a combined `sample.py` for convenience. + +**Running with separate worker and client:** + +In one terminal, start the worker: + +```bash +python worker.py +``` + +In another terminal, run the client: + +```bash +python client.py +``` + +**Running with combined sample:** + +```bash +python sample.py +``` + +### Viewing the Sample Output + +The sample output is displayed directly in the terminal where you ran the Python script. Agent responses are printed to stdout with log formatting for better readability. + +You can also see the state of agents and orchestrations in the Durable Task Scheduler dashboard at `http://localhost:8082`. + diff --git a/python/uv.lock b/python/uv.lock index 082d1a49f0..f6c5852446 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -39,6 +39,7 @@ members = [ "agent-framework-core", "agent-framework-declarative", "agent-framework-devui", + "agent-framework-durabletask", "agent-framework-foundry-local", "agent-framework-github-copilot", "agent-framework-lab", @@ -258,6 +259,7 @@ version = "1.0.0b260127" source = { editable = "packages/azurefunctions" } dependencies = [ { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "agent-framework-durabletask", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "azure-functions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "azure-functions-durable", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] @@ -270,6 +272,7 @@ dev = [ [package.metadata] requires-dist = [ { name = "agent-framework-core", editable = "packages/core" }, + { name = "agent-framework-durabletask", editable = "packages/durabletask" }, { name = "azure-functions" }, { name = "azure-functions-durable" }, ] @@ -353,6 +356,7 @@ all = [ { name = "agent-framework-copilotstudio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-declarative", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-devui", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "agent-framework-durabletask", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-github-copilot", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-lab", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-mem0", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -373,6 +377,7 @@ requires-dist = [ { name = "agent-framework-copilotstudio", marker = "extra == 'all'", editable = "packages/copilotstudio" }, { name = "agent-framework-declarative", marker = "extra == 'all'", editable = "packages/declarative" }, { name = "agent-framework-devui", marker = "extra == 'all'", editable = "packages/devui" }, + { name = "agent-framework-durabletask", marker = "extra == 'all'", editable = "packages/durabletask" }, { name = "agent-framework-github-copilot", marker = "extra == 'all'", editable = "packages/github_copilot" }, { name = "agent-framework-lab", marker = "extra == 'all'", editable = "packages/lab" }, { name = "agent-framework-mem0", marker = "extra == 'all'", editable = "packages/mem0" }, @@ -451,6 +456,31 @@ requires-dist = [ ] provides-extras = ["dev", "all"] +[[package]] +name = "agent-framework-durabletask" +version = "0.0.2b260126" +source = { editable = "packages/durabletask" } +dependencies = [ + { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "durabletask", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "durabletask-azuremanaged", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.dev-dependencies] +dev = [ + { name = "types-python-dateutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.metadata] +requires-dist = [ + { name = "agent-framework-core", editable = "packages/core" }, + { name = "durabletask", specifier = ">=1.3.0" }, + { name = "durabletask-azuremanaged", specifier = ">=1.3.0" }, +] + +[package.metadata.requires-dev] +dev = [{ name = "types-python-dateutil", specifier = ">=2.9.0" }] + [[package]] name = "agent-framework-foundry-local" version = "1.0.0b260127" @@ -932,6 +962,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fe/ba/e2081de779ca30d473f21f5b30e0e737c438205440784c7dfc81efc2b029/async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c", size = 6233, upload-time = "2024-11-06T16:41:37.9Z" }, ] +[[package]] +name = "asyncio" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/ea/26c489a11f7ca862d5705db67683a7361ce11c23a7b98fc6c2deaeccede2/asyncio-4.0.0.tar.gz", hash = "sha256:570cd9e50db83bc1629152d4d0b7558d6451bb1bfd5dfc2e935d96fc2f40329b", size = 5371, upload-time = "2025-08-05T02:51:46.605Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/57/64/eff2564783bd650ca25e15938d1c5b459cda997574a510f7de69688cb0b4/asyncio-4.0.0-py3-none-any.whl", hash = "sha256:c1eddb0659231837046809e68103969b2bef8b0400d59cfa6363f6b5ed8cc88b", size = 5555, upload-time = "2025-08-05T02:51:45.767Z" }, +] + [[package]] name = "attrs" version = "25.4.0" @@ -1387,7 +1426,7 @@ name = "clr-loader" version = "0.2.10" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/18/24/c12faf3f61614b3131b5c98d3bf0d376b49c7feaa73edca559aeb2aee080/clr_loader-0.2.10.tar.gz", hash = "sha256:81f114afbc5005bafc5efe5af1341d400e22137e275b042a8979f3feb9fc9446", size = 83605, upload-time = "2026-01-03T23:13:06.984Z" } wheels = [ @@ -1573,101 +1612,101 @@ wheels = [ [[package]] name = "coverage" -version = "7.13.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/23/f9/e92df5e07f3fc8d4c7f9a0f146ef75446bf870351cd37b788cf5897f8079/coverage-7.13.1.tar.gz", hash = "sha256:b7593fe7eb5feaa3fbb461ac79aac9f9fc0387a5ca8080b0c6fe2ca27b091afd", size = 825862, upload-time = "2025-12-28T15:42:56.969Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2d/9a/3742e58fd04b233df95c012ee9f3dfe04708a5e1d32613bd2d47d4e1be0d/coverage-7.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e1fa280b3ad78eea5be86f94f461c04943d942697e0dac889fa18fff8f5f9147", size = 218633, upload-time = "2025-12-28T15:40:10.165Z" }, - { url = "https://files.pythonhosted.org/packages/7e/45/7e6bdc94d89cd7c8017ce735cf50478ddfe765d4fbf0c24d71d30ea33d7a/coverage-7.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c3d8c679607220979434f494b139dfb00131ebf70bb406553d69c1ff01a5c33d", size = 219147, upload-time = "2025-12-28T15:40:12.069Z" }, - { url = "https://files.pythonhosted.org/packages/f7/38/0d6a258625fd7f10773fe94097dc16937a5f0e3e0cdf3adef67d3ac6baef/coverage-7.13.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:339dc63b3eba969067b00f41f15ad161bf2946613156fb131266d8debc8e44d0", size = 245894, upload-time = "2025-12-28T15:40:13.556Z" }, - { url = "https://files.pythonhosted.org/packages/27/58/409d15ea487986994cbd4d06376e9860e9b157cfbfd402b1236770ab8dd2/coverage-7.13.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:db622b999ffe49cb891f2fff3b340cdc2f9797d01a0a202a0973ba2562501d90", size = 247721, upload-time = "2025-12-28T15:40:15.37Z" }, - { url = "https://files.pythonhosted.org/packages/da/bf/6e8056a83fd7a96c93341f1ffe10df636dd89f26d5e7b9ca511ce3bcf0df/coverage-7.13.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1443ba9acbb593fa7c1c29e011d7c9761545fe35e7652e85ce7f51a16f7e08d", size = 249585, upload-time = "2025-12-28T15:40:17.226Z" }, - { url = "https://files.pythonhosted.org/packages/f4/15/e1daff723f9f5959acb63cbe35b11203a9df77ee4b95b45fffd38b318390/coverage-7.13.1-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c832ec92c4499ac463186af72f9ed4d8daec15499b16f0a879b0d1c8e5cf4a3b", size = 246597, upload-time = "2025-12-28T15:40:19.028Z" }, - { url = "https://files.pythonhosted.org/packages/74/a6/1efd31c5433743a6ddbc9d37ac30c196bb07c7eab3d74fbb99b924c93174/coverage-7.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:562ec27dfa3f311e0db1ba243ec6e5f6ab96b1edfcfc6cf86f28038bc4961ce6", size = 247626, upload-time = "2025-12-28T15:40:20.846Z" }, - { url = "https://files.pythonhosted.org/packages/6d/9f/1609267dd3e749f57fdd66ca6752567d1c13b58a20a809dc409b263d0b5f/coverage-7.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4de84e71173d4dada2897e5a0e1b7877e5eefbfe0d6a44edee6ce31d9b8ec09e", size = 245629, upload-time = "2025-12-28T15:40:22.397Z" }, - { url = "https://files.pythonhosted.org/packages/e2/f6/6815a220d5ec2466383d7cc36131b9fa6ecbe95c50ec52a631ba733f306a/coverage-7.13.1-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:a5a68357f686f8c4d527a2dc04f52e669c2fc1cbde38f6f7eb6a0e58cbd17cae", size = 245901, upload-time = "2025-12-28T15:40:23.836Z" }, - { url = "https://files.pythonhosted.org/packages/ac/58/40576554cd12e0872faf6d2c0eb3bc85f71d78427946ddd19ad65201e2c0/coverage-7.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:77cc258aeb29a3417062758975521eae60af6f79e930d6993555eeac6a8eac29", size = 246505, upload-time = "2025-12-28T15:40:25.421Z" }, - { url = "https://files.pythonhosted.org/packages/3b/77/9233a90253fba576b0eee81707b5781d0e21d97478e5377b226c5b096c0f/coverage-7.13.1-cp310-cp310-win32.whl", hash = "sha256:bb4f8c3c9a9f34423dba193f241f617b08ffc63e27f67159f60ae6baf2dcfe0f", size = 221257, upload-time = "2025-12-28T15:40:27.217Z" }, - { url = "https://files.pythonhosted.org/packages/e0/43/e842ff30c1a0a623ec80db89befb84a3a7aad7bfe44a6ea77d5a3e61fedd/coverage-7.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:c8e2706ceb622bc63bac98ebb10ef5da80ed70fbd8a7999a5076de3afaef0fb1", size = 222191, upload-time = "2025-12-28T15:40:28.916Z" }, - { url = "https://files.pythonhosted.org/packages/b4/9b/77baf488516e9ced25fc215a6f75d803493fc3f6a1a1227ac35697910c2a/coverage-7.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a55d509a1dc5a5b708b5dad3b5334e07a16ad4c2185e27b40e4dba796ab7f88", size = 218755, upload-time = "2025-12-28T15:40:30.812Z" }, - { url = "https://files.pythonhosted.org/packages/d7/cd/7ab01154e6eb79ee2fab76bf4d89e94c6648116557307ee4ebbb85e5c1bf/coverage-7.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4d010d080c4888371033baab27e47c9df7d6fb28d0b7b7adf85a4a49be9298b3", size = 219257, upload-time = "2025-12-28T15:40:32.333Z" }, - { url = "https://files.pythonhosted.org/packages/01/d5/b11ef7863ffbbdb509da0023fad1e9eda1c0eaea61a6d2ea5b17d4ac706e/coverage-7.13.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d938b4a840fb1523b9dfbbb454f652967f18e197569c32266d4d13f37244c3d9", size = 249657, upload-time = "2025-12-28T15:40:34.1Z" }, - { url = "https://files.pythonhosted.org/packages/f7/7c/347280982982383621d29b8c544cf497ae07ac41e44b1ca4903024131f55/coverage-7.13.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf100a3288f9bb7f919b87eb84f87101e197535b9bd0e2c2b5b3179633324fee", size = 251581, upload-time = "2025-12-28T15:40:36.131Z" }, - { url = "https://files.pythonhosted.org/packages/82/f6/ebcfed11036ade4c0d75fa4453a6282bdd225bc073862766eec184a4c643/coverage-7.13.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef6688db9bf91ba111ae734ba6ef1a063304a881749726e0d3575f5c10a9facf", size = 253691, upload-time = "2025-12-28T15:40:37.626Z" }, - { url = "https://files.pythonhosted.org/packages/02/92/af8f5582787f5d1a8b130b2dcba785fa5e9a7a8e121a0bb2220a6fdbdb8a/coverage-7.13.1-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0b609fc9cdbd1f02e51f67f51e5aee60a841ef58a68d00d5ee2c0faf357481a3", size = 249799, upload-time = "2025-12-28T15:40:39.47Z" }, - { url = "https://files.pythonhosted.org/packages/24/aa/0e39a2a3b16eebf7f193863323edbff38b6daba711abaaf807d4290cf61a/coverage-7.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c43257717611ff5e9a1d79dce8e47566235ebda63328718d9b65dd640bc832ef", size = 251389, upload-time = "2025-12-28T15:40:40.954Z" }, - { url = "https://files.pythonhosted.org/packages/73/46/7f0c13111154dc5b978900c0ccee2e2ca239b910890e674a77f1363d483e/coverage-7.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e09fbecc007f7b6afdfb3b07ce5bd9f8494b6856dd4f577d26c66c391b829851", size = 249450, upload-time = "2025-12-28T15:40:42.489Z" }, - { url = "https://files.pythonhosted.org/packages/ac/ca/e80da6769e8b669ec3695598c58eef7ad98b0e26e66333996aee6316db23/coverage-7.13.1-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:a03a4f3a19a189919c7055098790285cc5c5b0b3976f8d227aea39dbf9f8bfdb", size = 249170, upload-time = "2025-12-28T15:40:44.279Z" }, - { url = "https://files.pythonhosted.org/packages/af/18/9e29baabdec1a8644157f572541079b4658199cfd372a578f84228e860de/coverage-7.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3820778ea1387c2b6a818caec01c63adc5b3750211af6447e8dcfb9b6f08dbba", size = 250081, upload-time = "2025-12-28T15:40:45.748Z" }, - { url = "https://files.pythonhosted.org/packages/00/f8/c3021625a71c3b2f516464d322e41636aea381018319050a8114105872ee/coverage-7.13.1-cp311-cp311-win32.whl", hash = "sha256:ff10896fa55167371960c5908150b434b71c876dfab97b69478f22c8b445ea19", size = 221281, upload-time = "2025-12-28T15:40:47.232Z" }, - { url = "https://files.pythonhosted.org/packages/27/56/c216625f453df6e0559ed666d246fcbaaa93f3aa99eaa5080cea1229aa3d/coverage-7.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:a998cc0aeeea4c6d5622a3754da5a493055d2d95186bad877b0a34ea6e6dbe0a", size = 222215, upload-time = "2025-12-28T15:40:49.19Z" }, - { url = "https://files.pythonhosted.org/packages/5c/9a/be342e76f6e531cae6406dc46af0d350586f24d9b67fdfa6daee02df71af/coverage-7.13.1-cp311-cp311-win_arm64.whl", hash = "sha256:fea07c1a39a22614acb762e3fbbb4011f65eedafcb2948feeef641ac78b4ee5c", size = 220886, upload-time = "2025-12-28T15:40:51.067Z" }, - { url = "https://files.pythonhosted.org/packages/ce/8a/87af46cccdfa78f53db747b09f5f9a21d5fc38d796834adac09b30a8ce74/coverage-7.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6f34591000f06e62085b1865c9bc5f7858df748834662a51edadfd2c3bfe0dd3", size = 218927, upload-time = "2025-12-28T15:40:52.814Z" }, - { url = "https://files.pythonhosted.org/packages/82/a8/6e22fdc67242a4a5a153f9438d05944553121c8f4ba70cb072af4c41362e/coverage-7.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b67e47c5595b9224599016e333f5ec25392597a89d5744658f837d204e16c63e", size = 219288, upload-time = "2025-12-28T15:40:54.262Z" }, - { url = "https://files.pythonhosted.org/packages/d0/0a/853a76e03b0f7c4375e2ca025df45c918beb367f3e20a0a8e91967f6e96c/coverage-7.13.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e7b8bd70c48ffb28461ebe092c2345536fb18bbbf19d287c8913699735f505c", size = 250786, upload-time = "2025-12-28T15:40:56.059Z" }, - { url = "https://files.pythonhosted.org/packages/ea/b4/694159c15c52b9f7ec7adf49d50e5f8ee71d3e9ef38adb4445d13dd56c20/coverage-7.13.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c223d078112e90dc0e5c4e35b98b9584164bea9fbbd221c0b21c5241f6d51b62", size = 253543, upload-time = "2025-12-28T15:40:57.585Z" }, - { url = "https://files.pythonhosted.org/packages/96/b2/7f1f0437a5c855f87e17cf5d0dc35920b6440ff2b58b1ba9788c059c26c8/coverage-7.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:794f7c05af0763b1bbd1b9e6eff0e52ad068be3b12cd96c87de037b01390c968", size = 254635, upload-time = "2025-12-28T15:40:59.443Z" }, - { url = "https://files.pythonhosted.org/packages/e9/d1/73c3fdb8d7d3bddd9473c9c6a2e0682f09fc3dfbcb9c3f36412a7368bcab/coverage-7.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0642eae483cc8c2902e4af7298bf886d605e80f26382124cddc3967c2a3df09e", size = 251202, upload-time = "2025-12-28T15:41:01.328Z" }, - { url = "https://files.pythonhosted.org/packages/66/3c/f0edf75dcc152f145d5598329e864bbbe04ab78660fe3e8e395f9fff010f/coverage-7.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9f5e772ed5fef25b3de9f2008fe67b92d46831bd2bc5bdc5dd6bfd06b83b316f", size = 252566, upload-time = "2025-12-28T15:41:03.319Z" }, - { url = "https://files.pythonhosted.org/packages/17/b3/e64206d3c5f7dcbceafd14941345a754d3dbc78a823a6ed526e23b9cdaab/coverage-7.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45980ea19277dc0a579e432aef6a504fe098ef3a9032ead15e446eb0f1191aee", size = 250711, upload-time = "2025-12-28T15:41:06.411Z" }, - { url = "https://files.pythonhosted.org/packages/dc/ad/28a3eb970a8ef5b479ee7f0c484a19c34e277479a5b70269dc652b730733/coverage-7.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:e4f18eca6028ffa62adbd185a8f1e1dd242f2e68164dba5c2b74a5204850b4cf", size = 250278, upload-time = "2025-12-28T15:41:08.285Z" }, - { url = "https://files.pythonhosted.org/packages/54/e3/c8f0f1a93133e3e1291ca76cbb63565bd4b5c5df63b141f539d747fff348/coverage-7.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f8dca5590fec7a89ed6826fce625595279e586ead52e9e958d3237821fbc750c", size = 252154, upload-time = "2025-12-28T15:41:09.969Z" }, - { url = "https://files.pythonhosted.org/packages/d0/bf/9939c5d6859c380e405b19e736321f1c7d402728792f4c752ad1adcce005/coverage-7.13.1-cp312-cp312-win32.whl", hash = "sha256:ff86d4e85188bba72cfb876df3e11fa243439882c55957184af44a35bd5880b7", size = 221487, upload-time = "2025-12-28T15:41:11.468Z" }, - { url = "https://files.pythonhosted.org/packages/fa/dc/7282856a407c621c2aad74021680a01b23010bb8ebf427cf5eacda2e876f/coverage-7.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:16cc1da46c04fb0fb128b4dc430b78fa2aba8a6c0c9f8eb391fd5103409a6ac6", size = 222299, upload-time = "2025-12-28T15:41:13.386Z" }, - { url = "https://files.pythonhosted.org/packages/10/79/176a11203412c350b3e9578620013af35bcdb79b651eb976f4a4b32044fa/coverage-7.13.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d9bc218650022a768f3775dd7fdac1886437325d8d295d923ebcfef4892ad5c", size = 220941, upload-time = "2025-12-28T15:41:14.975Z" }, - { url = "https://files.pythonhosted.org/packages/a3/a4/e98e689347a1ff1a7f67932ab535cef82eb5e78f32a9e4132e114bbb3a0a/coverage-7.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cb237bfd0ef4d5eb6a19e29f9e528ac67ac3be932ea6b44fb6cc09b9f3ecff78", size = 218951, upload-time = "2025-12-28T15:41:16.653Z" }, - { url = "https://files.pythonhosted.org/packages/32/33/7cbfe2bdc6e2f03d6b240d23dc45fdaf3fd270aaf2d640be77b7f16989ab/coverage-7.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1dcb645d7e34dcbcc96cd7c132b1fc55c39263ca62eb961c064eb3928997363b", size = 219325, upload-time = "2025-12-28T15:41:18.609Z" }, - { url = "https://files.pythonhosted.org/packages/59/f6/efdabdb4929487baeb7cb2a9f7dac457d9356f6ad1b255be283d58b16316/coverage-7.13.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3d42df8201e00384736f0df9be2ced39324c3907607d17d50d50116c989d84cd", size = 250309, upload-time = "2025-12-28T15:41:20.629Z" }, - { url = "https://files.pythonhosted.org/packages/12/da/91a52516e9d5aea87d32d1523f9cdcf7a35a3b298e6be05d6509ba3cfab2/coverage-7.13.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fa3edde1aa8807de1d05934982416cb3ec46d1d4d91e280bcce7cca01c507992", size = 252907, upload-time = "2025-12-28T15:41:22.257Z" }, - { url = "https://files.pythonhosted.org/packages/75/38/f1ea837e3dc1231e086db1638947e00d264e7e8c41aa8ecacf6e1e0c05f4/coverage-7.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9edd0e01a343766add6817bc448408858ba6b489039eaaa2018474e4001651a4", size = 254148, upload-time = "2025-12-28T15:41:23.87Z" }, - { url = "https://files.pythonhosted.org/packages/7f/43/f4f16b881aaa34954ba446318dea6b9ed5405dd725dd8daac2358eda869a/coverage-7.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:985b7836931d033570b94c94713c6dba5f9d3ff26045f72c3e5dbc5fe3361e5a", size = 250515, upload-time = "2025-12-28T15:41:25.437Z" }, - { url = "https://files.pythonhosted.org/packages/84/34/8cba7f00078bd468ea914134e0144263194ce849ec3baad187ffb6203d1c/coverage-7.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ffed1e4980889765c84a5d1a566159e363b71d6b6fbaf0bebc9d3c30bc016766", size = 252292, upload-time = "2025-12-28T15:41:28.459Z" }, - { url = "https://files.pythonhosted.org/packages/8c/a4/cffac66c7652d84ee4ac52d3ccb94c015687d3b513f9db04bfcac2ac800d/coverage-7.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8842af7f175078456b8b17f1b73a0d16a65dcbdc653ecefeb00a56b3c8c298c4", size = 250242, upload-time = "2025-12-28T15:41:30.02Z" }, - { url = "https://files.pythonhosted.org/packages/f4/78/9a64d462263dde416f3c0067efade7b52b52796f489b1037a95b0dc389c9/coverage-7.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:ccd7a6fca48ca9c131d9b0a2972a581e28b13416fc313fb98b6d24a03ce9a398", size = 250068, upload-time = "2025-12-28T15:41:32.007Z" }, - { url = "https://files.pythonhosted.org/packages/69/c8/a8994f5fece06db7c4a97c8fc1973684e178599b42e66280dded0524ef00/coverage-7.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0403f647055de2609be776965108447deb8e384fe4a553c119e3ff6bfbab4784", size = 251846, upload-time = "2025-12-28T15:41:33.946Z" }, - { url = "https://files.pythonhosted.org/packages/cc/f7/91fa73c4b80305c86598a2d4e54ba22df6bf7d0d97500944af7ef155d9f7/coverage-7.13.1-cp313-cp313-win32.whl", hash = "sha256:549d195116a1ba1e1ae2f5ca143f9777800f6636eab917d4f02b5310d6d73461", size = 221512, upload-time = "2025-12-28T15:41:35.519Z" }, - { url = "https://files.pythonhosted.org/packages/45/0b/0768b4231d5a044da8f75e097a8714ae1041246bb765d6b5563bab456735/coverage-7.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:5899d28b5276f536fcf840b18b61a9fce23cc3aec1d114c44c07fe94ebeaa500", size = 222321, upload-time = "2025-12-28T15:41:37.371Z" }, - { url = "https://files.pythonhosted.org/packages/9b/b8/bdcb7253b7e85157282450262008f1366aa04663f3e3e4c30436f596c3e2/coverage-7.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:868a2fae76dfb06e87291bcbd4dcbcc778a8500510b618d50496e520bd94d9b9", size = 220949, upload-time = "2025-12-28T15:41:39.553Z" }, - { url = "https://files.pythonhosted.org/packages/70/52/f2be52cc445ff75ea8397948c96c1b4ee14f7f9086ea62fc929c5ae7b717/coverage-7.13.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:67170979de0dacac3f3097d02b0ad188d8edcea44ccc44aaa0550af49150c7dc", size = 219643, upload-time = "2025-12-28T15:41:41.567Z" }, - { url = "https://files.pythonhosted.org/packages/47/79/c85e378eaa239e2edec0c5523f71542c7793fe3340954eafb0bc3904d32d/coverage-7.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f80e2bb21bfab56ed7405c2d79d34b5dc0bc96c2c1d2a067b643a09fb756c43a", size = 219997, upload-time = "2025-12-28T15:41:43.418Z" }, - { url = "https://files.pythonhosted.org/packages/fe/9b/b1ade8bfb653c0bbce2d6d6e90cc6c254cbb99b7248531cc76253cb4da6d/coverage-7.13.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f83351e0f7dcdb14d7326c3d8d8c4e915fa685cbfdc6281f9470d97a04e9dfe4", size = 261296, upload-time = "2025-12-28T15:41:45.207Z" }, - { url = "https://files.pythonhosted.org/packages/1f/af/ebf91e3e1a2473d523e87e87fd8581e0aa08741b96265730e2d79ce78d8d/coverage-7.13.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bb3f6562e89bad0110afbe64e485aac2462efdce6232cdec7862a095dc3412f6", size = 263363, upload-time = "2025-12-28T15:41:47.163Z" }, - { url = "https://files.pythonhosted.org/packages/c4/8b/fb2423526d446596624ac7fde12ea4262e66f86f5120114c3cfd0bb2befa/coverage-7.13.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77545b5dcda13b70f872c3b5974ac64c21d05e65b1590b441c8560115dc3a0d1", size = 265783, upload-time = "2025-12-28T15:41:49.03Z" }, - { url = "https://files.pythonhosted.org/packages/9b/26/ef2adb1e22674913b89f0fe7490ecadcef4a71fa96f5ced90c60ec358789/coverage-7.13.1-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a4d240d260a1aed814790bbe1f10a5ff31ce6c21bc78f0da4a1e8268d6c80dbd", size = 260508, upload-time = "2025-12-28T15:41:51.035Z" }, - { url = "https://files.pythonhosted.org/packages/ce/7d/f0f59b3404caf662e7b5346247883887687c074ce67ba453ea08c612b1d5/coverage-7.13.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:d2287ac9360dec3837bfdad969963a5d073a09a85d898bd86bea82aa8876ef3c", size = 263357, upload-time = "2025-12-28T15:41:52.631Z" }, - { url = "https://files.pythonhosted.org/packages/1a/b1/29896492b0b1a047604d35d6fa804f12818fa30cdad660763a5f3159e158/coverage-7.13.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:0d2c11f3ea4db66b5cbded23b20185c35066892c67d80ec4be4bab257b9ad1e0", size = 260978, upload-time = "2025-12-28T15:41:54.589Z" }, - { url = "https://files.pythonhosted.org/packages/48/f2/971de1238a62e6f0a4128d37adadc8bb882ee96afbe03ff1570291754629/coverage-7.13.1-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:3fc6a169517ca0d7ca6846c3c5392ef2b9e38896f61d615cb75b9e7134d4ee1e", size = 259877, upload-time = "2025-12-28T15:41:56.263Z" }, - { url = "https://files.pythonhosted.org/packages/6a/fc/0474efcbb590ff8628830e9aaec5f1831594874360e3251f1fdec31d07a3/coverage-7.13.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d10a2ed46386e850bb3de503a54f9fe8192e5917fcbb143bfef653a9355e9a53", size = 262069, upload-time = "2025-12-28T15:41:58.093Z" }, - { url = "https://files.pythonhosted.org/packages/88/4f/3c159b7953db37a7b44c0eab8a95c37d1aa4257c47b4602c04022d5cb975/coverage-7.13.1-cp313-cp313t-win32.whl", hash = "sha256:75a6f4aa904301dab8022397a22c0039edc1f51e90b83dbd4464b8a38dc87842", size = 222184, upload-time = "2025-12-28T15:41:59.763Z" }, - { url = "https://files.pythonhosted.org/packages/58/a5/6b57d28f81417f9335774f20679d9d13b9a8fb90cd6160957aa3b54a2379/coverage-7.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:309ef5706e95e62578cda256b97f5e097916a2c26247c287bbe74794e7150df2", size = 223250, upload-time = "2025-12-28T15:42:01.52Z" }, - { url = "https://files.pythonhosted.org/packages/81/7c/160796f3b035acfbb58be80e02e484548595aa67e16a6345e7910ace0a38/coverage-7.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:92f980729e79b5d16d221038dbf2e8f9a9136afa072f9d5d6ed4cb984b126a09", size = 221521, upload-time = "2025-12-28T15:42:03.275Z" }, - { url = "https://files.pythonhosted.org/packages/aa/8e/ba0e597560c6563fc0adb902fda6526df5d4aa73bb10adf0574d03bd2206/coverage-7.13.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:97ab3647280d458a1f9adb85244e81587505a43c0c7cff851f5116cd2814b894", size = 218996, upload-time = "2025-12-28T15:42:04.978Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8e/764c6e116f4221dc7aa26c4061181ff92edb9c799adae6433d18eeba7a14/coverage-7.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8f572d989142e0908e6acf57ad1b9b86989ff057c006d13b76c146ec6a20216a", size = 219326, upload-time = "2025-12-28T15:42:06.691Z" }, - { url = "https://files.pythonhosted.org/packages/4f/a6/6130dc6d8da28cdcbb0f2bf8865aeca9b157622f7c0031e48c6cf9a0e591/coverage-7.13.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d72140ccf8a147e94274024ff6fd8fb7811354cf7ef88b1f0a988ebaa5bc774f", size = 250374, upload-time = "2025-12-28T15:42:08.786Z" }, - { url = "https://files.pythonhosted.org/packages/82/2b/783ded568f7cd6b677762f780ad338bf4b4750205860c17c25f7c708995e/coverage-7.13.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d3c9f051b028810f5a87c88e5d6e9af3c0ff32ef62763bf15d29f740453ca909", size = 252882, upload-time = "2025-12-28T15:42:10.515Z" }, - { url = "https://files.pythonhosted.org/packages/cd/b2/9808766d082e6a4d59eb0cc881a57fc1600eb2c5882813eefff8254f71b5/coverage-7.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f398ba4df52d30b1763f62eed9de5620dcde96e6f491f4c62686736b155aa6e4", size = 254218, upload-time = "2025-12-28T15:42:12.208Z" }, - { url = "https://files.pythonhosted.org/packages/44/ea/52a985bb447c871cb4d2e376e401116520991b597c85afdde1ea9ef54f2c/coverage-7.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:132718176cc723026d201e347f800cd1a9e4b62ccd3f82476950834dad501c75", size = 250391, upload-time = "2025-12-28T15:42:14.21Z" }, - { url = "https://files.pythonhosted.org/packages/7f/1d/125b36cc12310718873cfc8209ecfbc1008f14f4f5fa0662aa608e579353/coverage-7.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9e549d642426e3579b3f4b92d0431543b012dcb6e825c91619d4e93b7363c3f9", size = 252239, upload-time = "2025-12-28T15:42:16.292Z" }, - { url = "https://files.pythonhosted.org/packages/6a/16/10c1c164950cade470107f9f14bbac8485f8fb8515f515fca53d337e4a7f/coverage-7.13.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:90480b2134999301eea795b3a9dbf606c6fbab1b489150c501da84a959442465", size = 250196, upload-time = "2025-12-28T15:42:18.54Z" }, - { url = "https://files.pythonhosted.org/packages/2a/c6/cd860fac08780c6fd659732f6ced1b40b79c35977c1356344e44d72ba6c4/coverage-7.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e825dbb7f84dfa24663dd75835e7257f8882629fc11f03ecf77d84a75134b864", size = 250008, upload-time = "2025-12-28T15:42:20.365Z" }, - { url = "https://files.pythonhosted.org/packages/f0/3a/a8c58d3d38f82a5711e1e0a67268362af48e1a03df27c03072ac30feefcf/coverage-7.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:623dcc6d7a7ba450bbdbeedbaa0c42b329bdae16491af2282f12a7e809be7eb9", size = 251671, upload-time = "2025-12-28T15:42:22.114Z" }, - { url = "https://files.pythonhosted.org/packages/f0/bc/fd4c1da651d037a1e3d53e8cb3f8182f4b53271ffa9a95a2e211bacc0349/coverage-7.13.1-cp314-cp314-win32.whl", hash = "sha256:6e73ebb44dca5f708dc871fe0b90cf4cff1a13f9956f747cc87b535a840386f5", size = 221777, upload-time = "2025-12-28T15:42:23.919Z" }, - { url = "https://files.pythonhosted.org/packages/4b/50/71acabdc8948464c17e90b5ffd92358579bd0910732c2a1c9537d7536aa6/coverage-7.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:be753b225d159feb397bd0bf91ae86f689bad0da09d3b301478cd39b878ab31a", size = 222592, upload-time = "2025-12-28T15:42:25.619Z" }, - { url = "https://files.pythonhosted.org/packages/f7/c8/a6fb943081bb0cc926499c7907731a6dc9efc2cbdc76d738c0ab752f1a32/coverage-7.13.1-cp314-cp314-win_arm64.whl", hash = "sha256:228b90f613b25ba0019361e4ab81520b343b622fc657daf7e501c4ed6a2366c0", size = 221169, upload-time = "2025-12-28T15:42:27.629Z" }, - { url = "https://files.pythonhosted.org/packages/16/61/d5b7a0a0e0e40d62e59bc8c7aa1afbd86280d82728ba97f0673b746b78e2/coverage-7.13.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:60cfb538fe9ef86e5b2ab0ca8fc8d62524777f6c611dcaf76dc16fbe9b8e698a", size = 219730, upload-time = "2025-12-28T15:42:29.306Z" }, - { url = "https://files.pythonhosted.org/packages/a3/2c/8881326445fd071bb49514d1ce97d18a46a980712b51fee84f9ab42845b4/coverage-7.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:57dfc8048c72ba48a8c45e188d811e5efd7e49b387effc8fb17e97936dde5bf6", size = 220001, upload-time = "2025-12-28T15:42:31.319Z" }, - { url = "https://files.pythonhosted.org/packages/b5/d7/50de63af51dfa3a7f91cc37ad8fcc1e244b734232fbc8b9ab0f3c834a5cd/coverage-7.13.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3f2f725aa3e909b3c5fdb8192490bdd8e1495e85906af74fe6e34a2a77ba0673", size = 261370, upload-time = "2025-12-28T15:42:32.992Z" }, - { url = "https://files.pythonhosted.org/packages/e1/2c/d31722f0ec918fd7453b2758312729f645978d212b410cd0f7c2aed88a94/coverage-7.13.1-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9ee68b21909686eeb21dfcba2c3b81fee70dcf38b140dcd5aa70680995fa3aa5", size = 263485, upload-time = "2025-12-28T15:42:34.759Z" }, - { url = "https://files.pythonhosted.org/packages/fa/7a/2c114fa5c5fc08ba0777e4aec4c97e0b4a1afcb69c75f1f54cff78b073ab/coverage-7.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:724b1b270cb13ea2e6503476e34541a0b1f62280bc997eab443f87790202033d", size = 265890, upload-time = "2025-12-28T15:42:36.517Z" }, - { url = "https://files.pythonhosted.org/packages/65/d9/f0794aa1c74ceabc780fe17f6c338456bbc4e96bd950f2e969f48ac6fb20/coverage-7.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:916abf1ac5cf7eb16bc540a5bf75c71c43a676f5c52fcb9fe75a2bd75fb944e8", size = 260445, upload-time = "2025-12-28T15:42:38.646Z" }, - { url = "https://files.pythonhosted.org/packages/49/23/184b22a00d9bb97488863ced9454068c79e413cb23f472da6cbddc6cfc52/coverage-7.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:776483fd35b58d8afe3acbd9988d5de592ab6da2d2a865edfdbc9fdb43e7c486", size = 263357, upload-time = "2025-12-28T15:42:40.788Z" }, - { url = "https://files.pythonhosted.org/packages/7d/bd/58af54c0c9199ea4190284f389005779d7daf7bf3ce40dcd2d2b2f96da69/coverage-7.13.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b6f3b96617e9852703f5b633ea01315ca45c77e879584f283c44127f0f1ec564", size = 260959, upload-time = "2025-12-28T15:42:42.808Z" }, - { url = "https://files.pythonhosted.org/packages/4b/2a/6839294e8f78a4891bf1df79d69c536880ba2f970d0ff09e7513d6e352e9/coverage-7.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:bd63e7b74661fed317212fab774e2a648bc4bb09b35f25474f8e3325d2945cd7", size = 259792, upload-time = "2025-12-28T15:42:44.818Z" }, - { url = "https://files.pythonhosted.org/packages/ba/c3/528674d4623283310ad676c5af7414b9850ab6d55c2300e8aa4b945ec554/coverage-7.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:933082f161bbb3e9f90d00990dc956120f608cdbcaeea15c4d897f56ef4fe416", size = 262123, upload-time = "2025-12-28T15:42:47.108Z" }, - { url = "https://files.pythonhosted.org/packages/06/c5/8c0515692fb4c73ac379d8dc09b18eaf0214ecb76ea6e62467ba7a1556ff/coverage-7.13.1-cp314-cp314t-win32.whl", hash = "sha256:18be793c4c87de2965e1c0f060f03d9e5aff66cfeae8e1dbe6e5b88056ec153f", size = 222562, upload-time = "2025-12-28T15:42:49.144Z" }, - { url = "https://files.pythonhosted.org/packages/05/0e/c0a0c4678cb30dac735811db529b321d7e1c9120b79bd728d4f4d6b010e9/coverage-7.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:0e42e0ec0cd3e0d851cb3c91f770c9301f48647cb2877cb78f74bdaa07639a79", size = 223670, upload-time = "2025-12-28T15:42:51.218Z" }, - { url = "https://files.pythonhosted.org/packages/f5/5f/b177aa0011f354abf03a8f30a85032686d290fdeed4222b27d36b4372a50/coverage-7.13.1-cp314-cp314t-win_arm64.whl", hash = "sha256:eaecf47ef10c72ece9a2a92118257da87e460e113b83cc0d2905cbbe931792b4", size = 221707, upload-time = "2025-12-28T15:42:53.034Z" }, - { url = "https://files.pythonhosted.org/packages/cc/48/d9f421cb8da5afaa1a64570d9989e00fb7955e6acddc5a12979f7666ef60/coverage-7.13.1-py3-none-any.whl", hash = "sha256:2016745cb3ba554469d02819d78958b571792bb68e31302610e898f80dd3a573", size = 210722, upload-time = "2025-12-28T15:42:54.901Z" }, +version = "7.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ad/49/349848445b0e53660e258acbcc9b0d014895b6739237920886672240f84b/coverage-7.13.2.tar.gz", hash = "sha256:044c6951ec37146b72a50cc81ef02217d27d4c3640efd2640311393cbbf143d3", size = 826523, upload-time = "2026-01-25T13:00:04.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/2d/63e37369c8e81a643afe54f76073b020f7b97ddbe698c5c944b51b0a2bc5/coverage-7.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4af3b01763909f477ea17c962e2cca8f39b350a4e46e3a30838b2c12e31b81b", size = 218842, upload-time = "2026-01-25T12:57:15.3Z" }, + { url = "https://files.pythonhosted.org/packages/57/06/86ce882a8d58cbcb3030e298788988e618da35420d16a8c66dac34f138d0/coverage-7.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:36393bd2841fa0b59498f75466ee9bdec4f770d3254f031f23e8fd8e140ffdd2", size = 219360, upload-time = "2026-01-25T12:57:17.572Z" }, + { url = "https://files.pythonhosted.org/packages/cd/84/70b0eb1ee19ca4ef559c559054c59e5b2ae4ec9af61398670189e5d276e9/coverage-7.13.2-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9cc7573518b7e2186bd229b1a0fe24a807273798832c27032c4510f47ffdb896", size = 246123, upload-time = "2026-01-25T12:57:19.087Z" }, + { url = "https://files.pythonhosted.org/packages/35/fb/05b9830c2e8275ebc031e0019387cda99113e62bb500ab328bb72578183b/coverage-7.13.2-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ca9566769b69a5e216a4e176d54b9df88f29d750c5b78dbb899e379b4e14b30c", size = 247930, upload-time = "2026-01-25T12:57:20.929Z" }, + { url = "https://files.pythonhosted.org/packages/81/aa/3f37858ca2eed4f09b10ca3c6ddc9041be0a475626cd7fd2712f4a2d526f/coverage-7.13.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c9bdea644e94fd66d75a6f7e9a97bb822371e1fe7eadae2cacd50fcbc28e4dc", size = 249804, upload-time = "2026-01-25T12:57:22.904Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b3/c904f40c56e60a2d9678a5ee8df3d906d297d15fb8bec5756c3b0a67e2df/coverage-7.13.2-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5bd447332ec4f45838c1ad42268ce21ca87c40deb86eabd59888859b66be22a5", size = 246815, upload-time = "2026-01-25T12:57:24.314Z" }, + { url = "https://files.pythonhosted.org/packages/41/91/ddc1c5394ca7fd086342486440bfdd6b9e9bda512bf774599c7c7a0081e0/coverage-7.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7c79ad5c28a16a1277e1187cf83ea8dafdcc689a784228a7d390f19776db7c31", size = 247843, upload-time = "2026-01-25T12:57:26.544Z" }, + { url = "https://files.pythonhosted.org/packages/87/d2/cdff8f4cd33697883c224ea8e003e9c77c0f1a837dc41d95a94dd26aad67/coverage-7.13.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:76e06ccacd1fb6ada5d076ed98a8c6f66e2e6acd3df02819e2ee29fd637b76ad", size = 245850, upload-time = "2026-01-25T12:57:28.507Z" }, + { url = "https://files.pythonhosted.org/packages/f5/42/e837febb7866bf2553ab53dd62ed52f9bb36d60c7e017c55376ad21fbb05/coverage-7.13.2-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:49d49e9a5e9f4dc3d3dac95278a020afa6d6bdd41f63608a76fa05a719d5b66f", size = 246116, upload-time = "2026-01-25T12:57:30.16Z" }, + { url = "https://files.pythonhosted.org/packages/09/b1/4a3f935d7df154df02ff4f71af8d61298d713a7ba305d050ae475bfbdde2/coverage-7.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ed2bce0e7bfa53f7b0b01c722da289ef6ad4c18ebd52b1f93704c21f116360c8", size = 246720, upload-time = "2026-01-25T12:57:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/e1/fe/538a6fd44c515f1c5197a3f078094cbaf2ce9f945df5b44e29d95c864bff/coverage-7.13.2-cp310-cp310-win32.whl", hash = "sha256:1574983178b35b9af4db4a9f7328a18a14a0a0ce76ffaa1c1bacb4cc82089a7c", size = 221465, upload-time = "2026-01-25T12:57:33.511Z" }, + { url = "https://files.pythonhosted.org/packages/5e/09/4b63a024295f326ec1a40ec8def27799300ce8775b1cbf0d33b1790605c4/coverage-7.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:a360a8baeb038928ceb996f5623a4cd508728f8f13e08d4e96ce161702f3dd99", size = 222397, upload-time = "2026-01-25T12:57:34.927Z" }, + { url = "https://files.pythonhosted.org/packages/6c/01/abca50583a8975bb6e1c59eff67ed8e48bb127c07dad5c28d9e96ccc09ec/coverage-7.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:060ebf6f2c51aff5ba38e1f43a2095e087389b1c69d559fde6049a4b0001320e", size = 218971, upload-time = "2026-01-25T12:57:36.953Z" }, + { url = "https://files.pythonhosted.org/packages/eb/0e/b6489f344d99cd1e5b4d5e1be52dfd3f8a3dc5112aa6c33948da8cabad4e/coverage-7.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1ea8ca9db5e7469cd364552985e15911548ea5b69c48a17291f0cac70484b2e", size = 219473, upload-time = "2026-01-25T12:57:38.934Z" }, + { url = "https://files.pythonhosted.org/packages/17/11/db2f414915a8e4ec53f60b17956c27f21fb68fcf20f8a455ce7c2ccec638/coverage-7.13.2-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b780090d15fd58f07cf2011943e25a5f0c1c894384b13a216b6c86c8a8a7c508", size = 249896, upload-time = "2026-01-25T12:57:40.365Z" }, + { url = "https://files.pythonhosted.org/packages/80/06/0823fe93913663c017e508e8810c998c8ebd3ec2a5a85d2c3754297bdede/coverage-7.13.2-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:88a800258d83acb803c38175b4495d293656d5fac48659c953c18e5f539a274b", size = 251810, upload-time = "2026-01-25T12:57:42.045Z" }, + { url = "https://files.pythonhosted.org/packages/61/dc/b151c3cc41b28cdf7f0166c5fa1271cbc305a8ec0124cce4b04f74791a18/coverage-7.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6326e18e9a553e674d948536a04a80d850a5eeefe2aae2e6d7cf05d54046c01b", size = 253920, upload-time = "2026-01-25T12:57:44.026Z" }, + { url = "https://files.pythonhosted.org/packages/2d/35/e83de0556e54a4729a2b94ea816f74ce08732e81945024adee46851c2264/coverage-7.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:59562de3f797979e1ff07c587e2ac36ba60ca59d16c211eceaa579c266c5022f", size = 250025, upload-time = "2026-01-25T12:57:45.624Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/af2eb9c3926ce3ea0d58a0d2516fcbdacf7a9fc9559fe63076beaf3f2596/coverage-7.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:27ba1ed6f66b0e2d61bfa78874dffd4f8c3a12f8e2b5410e515ab345ba7bc9c3", size = 251612, upload-time = "2026-01-25T12:57:47.713Z" }, + { url = "https://files.pythonhosted.org/packages/26/62/5be2e25f3d6c711d23b71296f8b44c978d4c8b4e5b26871abfc164297502/coverage-7.13.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8be48da4d47cc68754ce643ea50b3234557cbefe47c2f120495e7bd0a2756f2b", size = 249670, upload-time = "2026-01-25T12:57:49.378Z" }, + { url = "https://files.pythonhosted.org/packages/b3/51/400d1b09a8344199f9b6a6fc1868005d766b7ea95e7882e494fa862ca69c/coverage-7.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2a47a4223d3361b91176aedd9d4e05844ca67d7188456227b6bf5e436630c9a1", size = 249395, upload-time = "2026-01-25T12:57:50.86Z" }, + { url = "https://files.pythonhosted.org/packages/e0/36/f02234bc6e5230e2f0a63fd125d0a2093c73ef20fdf681c7af62a140e4e7/coverage-7.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c6f141b468740197d6bd38f2b26ade124363228cc3f9858bd9924ab059e00059", size = 250298, upload-time = "2026-01-25T12:57:52.287Z" }, + { url = "https://files.pythonhosted.org/packages/b0/06/713110d3dd3151b93611c9cbfc65c15b4156b44f927fced49ac0b20b32a4/coverage-7.13.2-cp311-cp311-win32.whl", hash = "sha256:89567798404af067604246e01a49ef907d112edf2b75ef814b1364d5ce267031", size = 221485, upload-time = "2026-01-25T12:57:53.876Z" }, + { url = "https://files.pythonhosted.org/packages/16/0c/3ae6255fa1ebcb7dec19c9a59e85ef5f34566d1265c70af5b2fc981da834/coverage-7.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:21dd57941804ae2ac7e921771a5e21bbf9aabec317a041d164853ad0a96ce31e", size = 222421, upload-time = "2026-01-25T12:57:55.433Z" }, + { url = "https://files.pythonhosted.org/packages/b5/37/fabc3179af4d61d89ea47bd04333fec735cd5e8b59baad44fed9fc4170d7/coverage-7.13.2-cp311-cp311-win_arm64.whl", hash = "sha256:10758e0586c134a0bafa28f2d37dd2cdb5e4a90de25c0fc0c77dabbad46eca28", size = 221088, upload-time = "2026-01-25T12:57:57.41Z" }, + { url = "https://files.pythonhosted.org/packages/46/39/e92a35f7800222d3f7b2cbb7bbc3b65672ae8d501cb31801b2d2bd7acdf1/coverage-7.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f106b2af193f965d0d3234f3f83fc35278c7fb935dfbde56ae2da3dd2c03b84d", size = 219142, upload-time = "2026-01-25T12:58:00.448Z" }, + { url = "https://files.pythonhosted.org/packages/45/7a/8bf9e9309c4c996e65c52a7c5a112707ecdd9fbaf49e10b5a705a402bbb4/coverage-7.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78f45d21dc4d5d6bd29323f0320089ef7eae16e4bef712dff79d184fa7330af3", size = 219503, upload-time = "2026-01-25T12:58:02.451Z" }, + { url = "https://files.pythonhosted.org/packages/87/93/17661e06b7b37580923f3f12406ac91d78aeed293fb6da0b69cc7957582f/coverage-7.13.2-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:fae91dfecd816444c74531a9c3d6ded17a504767e97aa674d44f638107265b99", size = 251006, upload-time = "2026-01-25T12:58:04.059Z" }, + { url = "https://files.pythonhosted.org/packages/12/f0/f9e59fb8c310171497f379e25db060abef9fa605e09d63157eebec102676/coverage-7.13.2-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:264657171406c114787b441484de620e03d8f7202f113d62fcd3d9688baa3e6f", size = 253750, upload-time = "2026-01-25T12:58:05.574Z" }, + { url = "https://files.pythonhosted.org/packages/e5/b1/1935e31add2232663cf7edd8269548b122a7d100047ff93475dbaaae673e/coverage-7.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae47d8dcd3ded0155afbb59c62bd8ab07ea0fd4902e1c40567439e6db9dcaf2f", size = 254862, upload-time = "2026-01-25T12:58:07.647Z" }, + { url = "https://files.pythonhosted.org/packages/af/59/b5e97071ec13df5f45da2b3391b6cdbec78ba20757bc92580a5b3d5fa53c/coverage-7.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a0b33e9fd838220b007ce8f299114d406c1e8edb21336af4c97a26ecfd185aa", size = 251420, upload-time = "2026-01-25T12:58:09.309Z" }, + { url = "https://files.pythonhosted.org/packages/3f/75/9495932f87469d013dc515fb0ce1aac5fa97766f38f6b1a1deb1ee7b7f3a/coverage-7.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b3becbea7f3ce9a2d4d430f223ec15888e4deb31395840a79e916368d6004cce", size = 252786, upload-time = "2026-01-25T12:58:10.909Z" }, + { url = "https://files.pythonhosted.org/packages/6a/59/af550721f0eb62f46f7b8cb7e6f1860592189267b1c411a4e3a057caacee/coverage-7.13.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f819c727a6e6eeb8711e4ce63d78c620f69630a2e9d53bc95ca5379f57b6ba94", size = 250928, upload-time = "2026-01-25T12:58:12.449Z" }, + { url = "https://files.pythonhosted.org/packages/9b/b1/21b4445709aae500be4ab43bbcfb4e53dc0811c3396dcb11bf9f23fd0226/coverage-7.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:4f7b71757a3ab19f7ba286e04c181004c1d61be921795ee8ba6970fd0ec91da5", size = 250496, upload-time = "2026-01-25T12:58:14.047Z" }, + { url = "https://files.pythonhosted.org/packages/ba/b1/0f5d89dfe0392990e4f3980adbde3eb34885bc1effb2dc369e0bf385e389/coverage-7.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b7fc50d2afd2e6b4f6f2f403b70103d280a8e0cb35320cbbe6debcda02a1030b", size = 252373, upload-time = "2026-01-25T12:58:15.976Z" }, + { url = "https://files.pythonhosted.org/packages/01/c9/0cf1a6a57a9968cc049a6b896693faa523c638a5314b1fc374eb2b2ac904/coverage-7.13.2-cp312-cp312-win32.whl", hash = "sha256:292250282cf9bcf206b543d7608bda17ca6fc151f4cbae949fc7e115112fbd41", size = 221696, upload-time = "2026-01-25T12:58:17.517Z" }, + { url = "https://files.pythonhosted.org/packages/4d/05/d7540bf983f09d32803911afed135524570f8c47bb394bf6206c1dc3a786/coverage-7.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:eeea10169fac01549a7921d27a3e517194ae254b542102267bef7a93ed38c40e", size = 222504, upload-time = "2026-01-25T12:58:19.115Z" }, + { url = "https://files.pythonhosted.org/packages/15/8b/1a9f037a736ced0a12aacf6330cdaad5008081142a7070bc58b0f7930cbc/coverage-7.13.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a5b567f0b635b592c917f96b9a9cb3dbd4c320d03f4bf94e9084e494f2e8894", size = 221120, upload-time = "2026-01-25T12:58:21.334Z" }, + { url = "https://files.pythonhosted.org/packages/a7/f0/3d3eac7568ab6096ff23791a526b0048a1ff3f49d0e236b2af6fb6558e88/coverage-7.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ed75de7d1217cf3b99365d110975f83af0528c849ef5180a12fd91b5064df9d6", size = 219168, upload-time = "2026-01-25T12:58:23.376Z" }, + { url = "https://files.pythonhosted.org/packages/a3/a6/f8b5cfeddbab95fdef4dcd682d82e5dcff7a112ced57a959f89537ee9995/coverage-7.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97e596de8fa9bada4d88fde64a3f4d37f1b6131e4faa32bad7808abc79887ddc", size = 219537, upload-time = "2026-01-25T12:58:24.932Z" }, + { url = "https://files.pythonhosted.org/packages/7b/e6/8d8e6e0c516c838229d1e41cadcec91745f4b1031d4db17ce0043a0423b4/coverage-7.13.2-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:68c86173562ed4413345410c9480a8d64864ac5e54a5cda236748031e094229f", size = 250528, upload-time = "2026-01-25T12:58:26.567Z" }, + { url = "https://files.pythonhosted.org/packages/8e/78/befa6640f74092b86961f957f26504c8fba3d7da57cc2ab7407391870495/coverage-7.13.2-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7be4d613638d678b2b3773b8f687537b284d7074695a43fe2fbbfc0e31ceaed1", size = 253132, upload-time = "2026-01-25T12:58:28.251Z" }, + { url = "https://files.pythonhosted.org/packages/9d/10/1630db1edd8ce675124a2ee0f7becc603d2bb7b345c2387b4b95c6907094/coverage-7.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7f63ce526a96acd0e16c4af8b50b64334239550402fb1607ce6a584a6d62ce9", size = 254374, upload-time = "2026-01-25T12:58:30.294Z" }, + { url = "https://files.pythonhosted.org/packages/ed/1d/0d9381647b1e8e6d310ac4140be9c428a0277330991e0c35bdd751e338a4/coverage-7.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:406821f37f864f968e29ac14c3fccae0fec9fdeba48327f0341decf4daf92d7c", size = 250762, upload-time = "2026-01-25T12:58:32.036Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5636dfc9a7c871ee8776af83ee33b4c26bc508ad6cee1e89b6419a366582/coverage-7.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ee68e5a4e3e5443623406b905db447dceddffee0dceb39f4e0cd9ec2a35004b5", size = 252502, upload-time = "2026-01-25T12:58:33.961Z" }, + { url = "https://files.pythonhosted.org/packages/02/2a/7ff2884d79d420cbb2d12fed6fff727b6d0ef27253140d3cdbbd03187ee0/coverage-7.13.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2ee0e58cca0c17dd9c6c1cdde02bb705c7b3fbfa5f3b0b5afeda20d4ebff8ef4", size = 250463, upload-time = "2026-01-25T12:58:35.529Z" }, + { url = "https://files.pythonhosted.org/packages/91/c0/ba51087db645b6c7261570400fc62c89a16278763f36ba618dc8657a187b/coverage-7.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:6e5bbb5018bf76a56aabdb64246b5288d5ae1b7d0dd4d0534fe86df2c2992d1c", size = 250288, upload-time = "2026-01-25T12:58:37.226Z" }, + { url = "https://files.pythonhosted.org/packages/03/07/44e6f428551c4d9faf63ebcefe49b30e5c89d1be96f6a3abd86a52da9d15/coverage-7.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a55516c68ef3e08e134e818d5e308ffa6b1337cc8b092b69b24287bf07d38e31", size = 252063, upload-time = "2026-01-25T12:58:38.821Z" }, + { url = "https://files.pythonhosted.org/packages/c2/67/35b730ad7e1859dd57e834d1bc06080d22d2f87457d53f692fce3f24a5a9/coverage-7.13.2-cp313-cp313-win32.whl", hash = "sha256:5b20211c47a8abf4abc3319d8ce2464864fa9f30c5fcaf958a3eed92f4f1fef8", size = 221716, upload-time = "2026-01-25T12:58:40.484Z" }, + { url = "https://files.pythonhosted.org/packages/0d/82/e5fcf5a97c72f45fc14829237a6550bf49d0ab882ac90e04b12a69db76b4/coverage-7.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:14f500232e521201cf031549fb1ebdfc0a40f401cf519157f76c397e586c3beb", size = 222522, upload-time = "2026-01-25T12:58:43.247Z" }, + { url = "https://files.pythonhosted.org/packages/b1/f1/25d7b2f946d239dd2d6644ca2cc060d24f97551e2af13b6c24c722ae5f97/coverage-7.13.2-cp313-cp313-win_arm64.whl", hash = "sha256:9779310cb5a9778a60c899f075a8514c89fa6d10131445c2207fc893e0b14557", size = 221145, upload-time = "2026-01-25T12:58:45Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f7/080376c029c8f76fadfe43911d0daffa0cbdc9f9418a0eead70c56fb7f4b/coverage-7.13.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e64fa5a1e41ce5df6b547cbc3d3699381c9e2c2c369c67837e716ed0f549d48e", size = 219861, upload-time = "2026-01-25T12:58:46.586Z" }, + { url = "https://files.pythonhosted.org/packages/42/11/0b5e315af5ab35f4c4a70e64d3314e4eec25eefc6dec13be3a7d5ffe8ac5/coverage-7.13.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b01899e82a04085b6561eb233fd688474f57455e8ad35cd82286463ba06332b7", size = 220207, upload-time = "2026-01-25T12:58:48.277Z" }, + { url = "https://files.pythonhosted.org/packages/b2/0c/0874d0318fb1062117acbef06a09cf8b63f3060c22265adaad24b36306b7/coverage-7.13.2-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:838943bea48be0e2768b0cf7819544cdedc1bbb2f28427eabb6eb8c9eb2285d3", size = 261504, upload-time = "2026-01-25T12:58:49.904Z" }, + { url = "https://files.pythonhosted.org/packages/83/5e/1cd72c22ecb30751e43a72f40ba50fcef1b7e93e3ea823bd9feda8e51f9a/coverage-7.13.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:93d1d25ec2b27e90bcfef7012992d1f5121b51161b8bffcda756a816cf13c2c3", size = 263582, upload-time = "2026-01-25T12:58:51.582Z" }, + { url = "https://files.pythonhosted.org/packages/9b/da/8acf356707c7a42df4d0657020308e23e5a07397e81492640c186268497c/coverage-7.13.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93b57142f9621b0d12349c43fc7741fe578e4bc914c1e5a54142856cfc0bf421", size = 266008, upload-time = "2026-01-25T12:58:53.234Z" }, + { url = "https://files.pythonhosted.org/packages/41/41/ea1730af99960309423c6ea8d6a4f1fa5564b2d97bd1d29dda4b42611f04/coverage-7.13.2-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f06799ae1bdfff7ccb8665d75f8291c69110ba9585253de254688aa8a1ccc6c5", size = 260762, upload-time = "2026-01-25T12:58:55.372Z" }, + { url = "https://files.pythonhosted.org/packages/22/fa/02884d2080ba71db64fdc127b311db60e01fe6ba797d9c8363725e39f4d5/coverage-7.13.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f9405ab4f81d490811b1d91c7a20361135a2df4c170e7f0b747a794da5b7f23", size = 263571, upload-time = "2026-01-25T12:58:57.52Z" }, + { url = "https://files.pythonhosted.org/packages/d2/6b/4083aaaeba9b3112f55ac57c2ce7001dc4d8fa3fcc228a39f09cc84ede27/coverage-7.13.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:f9ab1d5b86f8fbc97a5b3cd6280a3fd85fef3b028689d8a2c00918f0d82c728c", size = 261200, upload-time = "2026-01-25T12:58:59.255Z" }, + { url = "https://files.pythonhosted.org/packages/e9/d2/aea92fa36d61955e8c416ede9cf9bf142aa196f3aea214bb67f85235a050/coverage-7.13.2-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:f674f59712d67e841525b99e5e2b595250e39b529c3bda14764e4f625a3fa01f", size = 260095, upload-time = "2026-01-25T12:59:01.066Z" }, + { url = "https://files.pythonhosted.org/packages/0d/ae/04ffe96a80f107ea21b22b2367175c621da920063260a1c22f9452fd7866/coverage-7.13.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c6cadac7b8ace1ba9144feb1ae3cb787a6065ba6d23ffc59a934b16406c26573", size = 262284, upload-time = "2026-01-25T12:59:02.802Z" }, + { url = "https://files.pythonhosted.org/packages/1c/7a/6f354dcd7dfc41297791d6fb4e0d618acb55810bde2c1fd14b3939e05c2b/coverage-7.13.2-cp313-cp313t-win32.whl", hash = "sha256:14ae4146465f8e6e6253eba0cccd57423e598a4cb925958b240c805300918343", size = 222389, upload-time = "2026-01-25T12:59:04.563Z" }, + { url = "https://files.pythonhosted.org/packages/8d/d5/080ad292a4a3d3daf411574be0a1f56d6dee2c4fdf6b005342be9fac807f/coverage-7.13.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9074896edd705a05769e3de0eac0a8388484b503b68863dd06d5e473f874fd47", size = 223450, upload-time = "2026-01-25T12:59:06.677Z" }, + { url = "https://files.pythonhosted.org/packages/88/96/df576fbacc522e9fb8d1c4b7a7fc62eb734be56e2cba1d88d2eabe08ea3f/coverage-7.13.2-cp313-cp313t-win_arm64.whl", hash = "sha256:69e526e14f3f854eda573d3cf40cffd29a1a91c684743d904c33dbdcd0e0f3e7", size = 221707, upload-time = "2026-01-25T12:59:08.363Z" }, + { url = "https://files.pythonhosted.org/packages/55/53/1da9e51a0775634b04fcc11eb25c002fc58ee4f92ce2e8512f94ac5fc5bf/coverage-7.13.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:387a825f43d680e7310e6f325b2167dd093bc8ffd933b83e9aa0983cf6e0a2ef", size = 219213, upload-time = "2026-01-25T12:59:11.909Z" }, + { url = "https://files.pythonhosted.org/packages/46/35/b3caac3ebbd10230fea5a33012b27d19e999a17c9285c4228b4b2e35b7da/coverage-7.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f0d7fea9d8e5d778cd5a9e8fc38308ad688f02040e883cdc13311ef2748cb40f", size = 219549, upload-time = "2026-01-25T12:59:13.638Z" }, + { url = "https://files.pythonhosted.org/packages/76/9c/e1cf7def1bdc72c1907e60703983a588f9558434a2ff94615747bd73c192/coverage-7.13.2-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e080afb413be106c95c4ee96b4fffdc9e2fa56a8bbf90b5c0918e5c4449412f5", size = 250586, upload-time = "2026-01-25T12:59:15.808Z" }, + { url = "https://files.pythonhosted.org/packages/ba/49/f54ec02ed12be66c8d8897270505759e057b0c68564a65c429ccdd1f139e/coverage-7.13.2-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a7fc042ba3c7ce25b8a9f097eb0f32a5ce1ccdb639d9eec114e26def98e1f8a4", size = 253093, upload-time = "2026-01-25T12:59:17.491Z" }, + { url = "https://files.pythonhosted.org/packages/fb/5e/aaf86be3e181d907e23c0f61fccaeb38de8e6f6b47aed92bf57d8fc9c034/coverage-7.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d0ba505e021557f7f8173ee8cd6b926373d8653e5ff7581ae2efce1b11ef4c27", size = 254446, upload-time = "2026-01-25T12:59:19.752Z" }, + { url = "https://files.pythonhosted.org/packages/28/c8/a5fa01460e2d75b0c853b392080d6829d3ca8b5ab31e158fa0501bc7c708/coverage-7.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7de326f80e3451bd5cc7239ab46c73ddb658fe0b7649476bc7413572d36cd548", size = 250615, upload-time = "2026-01-25T12:59:21.928Z" }, + { url = "https://files.pythonhosted.org/packages/86/0b/6d56315a55f7062bb66410732c24879ccb2ec527ab6630246de5fe45a1df/coverage-7.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:abaea04f1e7e34841d4a7b343904a3f59481f62f9df39e2cd399d69a187a9660", size = 252452, upload-time = "2026-01-25T12:59:23.592Z" }, + { url = "https://files.pythonhosted.org/packages/30/19/9bc550363ebc6b0ea121977ee44d05ecd1e8bf79018b8444f1028701c563/coverage-7.13.2-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9f93959ee0c604bccd8e0697be21de0887b1f73efcc3aa73a3ec0fd13feace92", size = 250418, upload-time = "2026-01-25T12:59:25.392Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/580530a31ca2f0cc6f07a8f2ab5460785b02bb11bdf815d4c4d37a4c5169/coverage-7.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:13fe81ead04e34e105bf1b3c9f9cdf32ce31736ee5d90a8d2de02b9d3e1bcb82", size = 250231, upload-time = "2026-01-25T12:59:27.888Z" }, + { url = "https://files.pythonhosted.org/packages/e2/42/dd9093f919dc3088cb472893651884bd675e3df3d38a43f9053656dca9a2/coverage-7.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d6d16b0f71120e365741bca2cb473ca6fe38930bc5431c5e850ba949f708f892", size = 251888, upload-time = "2026-01-25T12:59:29.636Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a6/0af4053e6e819774626e133c3d6f70fae4d44884bfc4b126cb647baee8d3/coverage-7.13.2-cp314-cp314-win32.whl", hash = "sha256:9b2f4714bb7d99ba3790ee095b3b4ac94767e1347fe424278a0b10acb3ff04fe", size = 221968, upload-time = "2026-01-25T12:59:31.424Z" }, + { url = "https://files.pythonhosted.org/packages/c4/cc/5aff1e1f80d55862442855517bb8ad8ad3a68639441ff6287dde6a58558b/coverage-7.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:e4121a90823a063d717a96e0a0529c727fb31ea889369a0ee3ec00ed99bf6859", size = 222783, upload-time = "2026-01-25T12:59:33.118Z" }, + { url = "https://files.pythonhosted.org/packages/de/20/09abafb24f84b3292cc658728803416c15b79f9ee5e68d25238a895b07d9/coverage-7.13.2-cp314-cp314-win_arm64.whl", hash = "sha256:6873f0271b4a15a33e7590f338d823f6f66f91ed147a03938d7ce26efd04eee6", size = 221348, upload-time = "2026-01-25T12:59:34.939Z" }, + { url = "https://files.pythonhosted.org/packages/b6/60/a3820c7232db63be060e4019017cd3426751c2699dab3c62819cdbcea387/coverage-7.13.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:f61d349f5b7cd95c34017f1927ee379bfbe9884300d74e07cf630ccf7a610c1b", size = 219950, upload-time = "2026-01-25T12:59:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/fd/37/e4ef5975fdeb86b1e56db9a82f41b032e3d93a840ebaf4064f39e770d5c5/coverage-7.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a43d34ce714f4ca674c0d90beb760eb05aad906f2c47580ccee9da8fe8bfb417", size = 220209, upload-time = "2026-01-25T12:59:38.339Z" }, + { url = "https://files.pythonhosted.org/packages/54/df/d40e091d00c51adca1e251d3b60a8b464112efa3004949e96a74d7c19a64/coverage-7.13.2-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bff1b04cb9d4900ce5c56c4942f047dc7efe57e2608cb7c3c8936e9970ccdbee", size = 261576, upload-time = "2026-01-25T12:59:40.446Z" }, + { url = "https://files.pythonhosted.org/packages/c5/44/5259c4bed54e3392e5c176121af9f71919d96dde853386e7730e705f3520/coverage-7.13.2-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6ae99e4560963ad8e163e819e5d77d413d331fd00566c1e0856aa252303552c1", size = 263704, upload-time = "2026-01-25T12:59:42.346Z" }, + { url = "https://files.pythonhosted.org/packages/16/bd/ae9f005827abcbe2c70157459ae86053971c9fa14617b63903abbdce26d9/coverage-7.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e79a8c7d461820257d9aa43716c4efc55366d7b292e46b5b37165be1d377405d", size = 266109, upload-time = "2026-01-25T12:59:44.073Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c0/8e279c1c0f5b1eaa3ad9b0fb7a5637fc0379ea7d85a781c0fe0bb3cfc2ab/coverage-7.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:060ee84f6a769d40c492711911a76811b4befb6fba50abb450371abb720f5bd6", size = 260686, upload-time = "2026-01-25T12:59:45.804Z" }, + { url = "https://files.pythonhosted.org/packages/b2/47/3a8112627e9d863e7cddd72894171c929e94491a597811725befdcd76bce/coverage-7.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3bca209d001fd03ea2d978f8a4985093240a355c93078aee3f799852c23f561a", size = 263568, upload-time = "2026-01-25T12:59:47.929Z" }, + { url = "https://files.pythonhosted.org/packages/92/bc/7ea367d84afa3120afc3ce6de294fd2dcd33b51e2e7fbe4bbfd200f2cb8c/coverage-7.13.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:6b8092aa38d72f091db61ef83cb66076f18f02da3e1a75039a4f218629600e04", size = 261174, upload-time = "2026-01-25T12:59:49.717Z" }, + { url = "https://files.pythonhosted.org/packages/33/b7/f1092dcecb6637e31cc2db099581ee5c61a17647849bae6b8261a2b78430/coverage-7.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:4a3158dc2dcce5200d91ec28cd315c999eebff355437d2765840555d765a6e5f", size = 260017, upload-time = "2026-01-25T12:59:51.463Z" }, + { url = "https://files.pythonhosted.org/packages/2b/cd/f3d07d4b95fbe1a2ef0958c15da614f7e4f557720132de34d2dc3aa7e911/coverage-7.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3973f353b2d70bd9796cc12f532a05945232ccae966456c8ed7034cb96bbfd6f", size = 262337, upload-time = "2026-01-25T12:59:53.407Z" }, + { url = "https://files.pythonhosted.org/packages/e0/db/b0d5b2873a07cb1e06a55d998697c0a5a540dcefbf353774c99eb3874513/coverage-7.13.2-cp314-cp314t-win32.whl", hash = "sha256:79f6506a678a59d4ded048dc72f1859ebede8ec2b9a2d509ebe161f01c2879d3", size = 222749, upload-time = "2026-01-25T12:59:56.316Z" }, + { url = "https://files.pythonhosted.org/packages/e5/2f/838a5394c082ac57d85f57f6aba53093b30d9089781df72412126505716f/coverage-7.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:196bfeabdccc5a020a57d5a368c681e3a6ceb0447d153aeccc1ab4d70a5032ba", size = 223857, upload-time = "2026-01-25T12:59:58.201Z" }, + { url = "https://files.pythonhosted.org/packages/44/d4/b608243e76ead3a4298824b50922b89ef793e50069ce30316a65c1b4d7ef/coverage-7.13.2-cp314-cp314t-win_arm64.whl", hash = "sha256:69269ab58783e090bfbf5b916ab3d188126e22d6070bbfc93098fdd474ef937c", size = 221881, upload-time = "2026-01-25T13:00:00.449Z" }, + { url = "https://files.pythonhosted.org/packages/d2/db/d291e30fdf7ea617a335531e72294e0c723356d7fdde8fba00610a76bda9/coverage-7.13.2-py3-none-any.whl", hash = "sha256:40ce1ea1e25125556d8e76bd0b61500839a07944cc287ac21d5626f3e620cad5", size = 210943, upload-time = "2026-01-25T13:00:02.388Z" }, ] [package.optional-dependencies] @@ -1848,6 +1887,35 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/93/69/e391bd51bc08ed9141ecd899a0ddb61ab6465309f1eb470905c0c8868081/docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc", size = 570472, upload-time = "2022-07-05T20:17:26.388Z" }, ] +[[package]] +name = "durabletask" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asyncio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "grpcio", version = "1.67.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "grpcio", version = "1.76.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.14' and sys_platform == 'darwin') or (python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')" }, + { name = "packaging", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "protobuf", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/27/3d021e6b36fc1aab6099fafc56dfc8059b4e8968615a26c1a0418601e50a/durabletask-1.3.0.tar.gz", hash = "sha256:11e38dda6df4737fadca0c71fc0a0f769955877c8a8bdb25ccbf90cf45afbf63", size = 57830, upload-time = "2026-01-12T21:54:30.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/87/31ea460dbfaf50d9877f143e2ce9829cac2fb106747d9900cc353356ea77/durabletask-1.3.0-py3-none-any.whl", hash = "sha256:411f23e13391b8845edca010873dd7a87ee7cfc1fe05753ab28a7cd7c3c1bd77", size = 64112, upload-time = "2026-01-12T21:54:29.471Z" }, +] + +[[package]] +name = "durabletask-azuremanaged" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-identity", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "durabletask", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/29/29/6bb0b5fe51aa92e117adcdc93efe97cf5476d86c1496e5c5ab35d99a8d07/durabletask_azuremanaged-1.3.0.tar.gz", hash = "sha256:55172588e075afa80d46dcc2e5ddbd84be0a20cc78c74f687040c3720677d34c", size = 4343, upload-time = "2026-01-12T21:58:23.95Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/11/4d34fec302c4813e626080f1532d189767eb31d6d80e8f3698c230512f14/durabletask_azuremanaged-1.3.0-py3-none-any.whl", hash = "sha256:9da914f569da1597c858d494a95eda37e4372726c0ee65f30080dcafab262d60", size = 6366, upload-time = "2026-01-12T21:58:23.28Z" }, +] + [[package]] name = "email-validator" version = "2.3.0" @@ -1866,7 +1934,7 @@ name = "exceptiongroup" version = "1.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.13' and sys_platform == 'darwin') or (python_full_version < '3.13' and sys_platform == 'linux') or (python_full_version < '3.13' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } wheels = [ @@ -2255,16 +2323,16 @@ wheels = [ [[package]] name = "github-copilot-sdk" -version = "0.1.15" +version = "0.1.18" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "python-dateutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0b/f4/3e8f7fde88c5491ce5d29818d850b9508868660cad5359a9352bb804364a/github_copilot_sdk-0.1.15.tar.gz", hash = "sha256:6f713fc80b282844344bc4aaa495f58f346d63cdc93d25df3bf7e1e8d0d4f20a", size = 89915, upload-time = "2026-01-22T04:25:20.18Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/00/be64b9b33015d5e79fb5e5e95d871484e79a907b3792935b855ab40308ce/github_copilot_sdk-0.1.18.tar.gz", hash = "sha256:b2d56d40c0f48e81f2899d32fb4a8d2b8df22620913547da93fddf9b2f368e9e", size = 81318, upload-time = "2026-01-24T18:09:57.617Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/80/be/1f1166914c2a769d43761b7da6a2137711e1cb2c8de77bf9c2da5c171ec7/github_copilot_sdk-0.1.15-py3-none-any.whl", hash = "sha256:64a45e4c63b4ae6a4863470f74d6cf6bcf2a94b8a6efa7dfbdc8bcea316c5e78", size = 31972, upload-time = "2026-01-22T04:25:18.652Z" }, + { url = "https://files.pythonhosted.org/packages/ae/0f/f832b32bca9d89a26a2b810c69fdc37ac925e34855ee93a11bb3d90ca2b7/github_copilot_sdk-0.1.18-py3-none-any.whl", hash = "sha256:99cfdf4d4d0da6d92d5bf36a952546157785df83d6b0783b3f7a8e93a2762171", size = 33740, upload-time = "2026-01-24T18:09:55.696Z" }, ] [[package]] @@ -2285,15 +2353,16 @@ wheels = [ [[package]] name = "google-auth" -version = "2.47.0" +version = "2.48.0" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "cryptography", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "pyasn1-modules", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "rsa", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/60/3c/ec64b9a275ca22fa1cd3b6e77fefcf837b0732c890aa32d2bd21313d9b33/google_auth-2.47.0.tar.gz", hash = "sha256:833229070a9dfee1a353ae9877dcd2dec069a8281a4e72e72f77d4a70ff945da", size = 323719, upload-time = "2026-01-06T21:55:31.045Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0c/41/242044323fbd746615884b1c16639749e73665b718209946ebad7ba8a813/google_auth-2.48.0.tar.gz", hash = "sha256:4f7e706b0cd3208a3d940a19a822c37a476ddba5450156c3e6624a71f7c841ce", size = 326522, upload-time = "2026-01-26T19:22:47.157Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/db/18/79e9008530b79527e0d5f79e7eef08d3b179b7f851cfd3a2f27822fbdfa9/google_auth-2.47.0-py3-none-any.whl", hash = "sha256:c516d68336bfde7cf0da26aab674a36fedcf04b37ac4edd59c597178760c3498", size = 234867, upload-time = "2026-01-06T21:55:28.6Z" }, + { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, ] [[package]] @@ -2319,57 +2388,56 @@ wheels = [ [[package]] name = "greenlet" -version = "3.3.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c7/e5/40dbda2736893e3e53d25838e0f19a2b417dfc122b9989c91918db30b5d3/greenlet-3.3.0.tar.gz", hash = "sha256:a82bb225a4e9e4d653dd2fb7b8b2d36e4fb25bc0165422a11e48b88e9e6f78fb", size = 190651, upload-time = "2025-12-04T14:49:44.05Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/32/6a/33d1702184d94106d3cdd7bfb788e19723206fce152e303473ca3b946c7b/greenlet-3.3.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:6f8496d434d5cb2dce025773ba5597f71f5410ae499d5dd9533e0653258cdb3d", size = 273658, upload-time = "2025-12-04T14:23:37.494Z" }, - { url = "https://files.pythonhosted.org/packages/d6/b7/2b5805bbf1907c26e434f4e448cd8b696a0b71725204fa21a211ff0c04a7/greenlet-3.3.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b96dc7eef78fd404e022e165ec55327f935b9b52ff355b067eb4a0267fc1cffb", size = 574810, upload-time = "2025-12-04T14:50:04.154Z" }, - { url = "https://files.pythonhosted.org/packages/94/38/343242ec12eddf3d8458c73f555c084359883d4ddc674240d9e61ec51fd6/greenlet-3.3.0-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:73631cd5cccbcfe63e3f9492aaa664d278fda0ce5c3d43aeda8e77317e38efbd", size = 586248, upload-time = "2025-12-04T14:57:39.35Z" }, - { url = "https://files.pythonhosted.org/packages/f0/d0/0ae86792fb212e4384041e0ef8e7bc66f59a54912ce407d26a966ed2914d/greenlet-3.3.0-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b299a0cb979f5d7197442dccc3aee67fce53500cd88951b7e6c35575701c980b", size = 597403, upload-time = "2025-12-04T15:07:10.831Z" }, - { url = "https://files.pythonhosted.org/packages/b6/a8/15d0aa26c0036a15d2659175af00954aaaa5d0d66ba538345bd88013b4d7/greenlet-3.3.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7dee147740789a4632cace364816046e43310b59ff8fb79833ab043aefa72fd5", size = 586910, upload-time = "2025-12-04T14:25:59.705Z" }, - { url = "https://files.pythonhosted.org/packages/e1/9b/68d5e3b7ccaba3907e5532cf8b9bf16f9ef5056a008f195a367db0ff32db/greenlet-3.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:39b28e339fc3c348427560494e28d8a6f3561c8d2bcf7d706e1c624ed8d822b9", size = 1547206, upload-time = "2025-12-04T15:04:21.027Z" }, - { url = "https://files.pythonhosted.org/packages/66/bd/e3086ccedc61e49f91e2cfb5ffad9d8d62e5dc85e512a6200f096875b60c/greenlet-3.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b3c374782c2935cc63b2a27ba8708471de4ad1abaa862ffdb1ef45a643ddbb7d", size = 1613359, upload-time = "2025-12-04T14:27:26.548Z" }, - { url = "https://files.pythonhosted.org/packages/f4/6b/d4e73f5dfa888364bbf02efa85616c6714ae7c631c201349782e5b428925/greenlet-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:b49e7ed51876b459bd645d83db257f0180e345d3f768a35a85437a24d5a49082", size = 300740, upload-time = "2025-12-04T14:47:52.773Z" }, - { url = "https://files.pythonhosted.org/packages/1f/cb/48e964c452ca2b92175a9b2dca037a553036cb053ba69e284650ce755f13/greenlet-3.3.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e29f3018580e8412d6aaf5641bb7745d38c85228dacf51a73bd4e26ddf2a6a8e", size = 274908, upload-time = "2025-12-04T14:23:26.435Z" }, - { url = "https://files.pythonhosted.org/packages/28/da/38d7bff4d0277b594ec557f479d65272a893f1f2a716cad91efeb8680953/greenlet-3.3.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a687205fb22794e838f947e2194c0566d3812966b41c78709554aa883183fb62", size = 577113, upload-time = "2025-12-04T14:50:05.493Z" }, - { url = "https://files.pythonhosted.org/packages/3c/f2/89c5eb0faddc3ff014f1c04467d67dee0d1d334ab81fadbf3744847f8a8a/greenlet-3.3.0-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4243050a88ba61842186cb9e63c7dfa677ec146160b0efd73b855a3d9c7fcf32", size = 590338, upload-time = "2025-12-04T14:57:41.136Z" }, - { url = "https://files.pythonhosted.org/packages/80/d7/db0a5085035d05134f8c089643da2b44cc9b80647c39e93129c5ef170d8f/greenlet-3.3.0-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:670d0f94cd302d81796e37299bcd04b95d62403883b24225c6b5271466612f45", size = 601098, upload-time = "2025-12-04T15:07:11.898Z" }, - { url = "https://files.pythonhosted.org/packages/dc/a6/e959a127b630a58e23529972dbc868c107f9d583b5a9f878fb858c46bc1a/greenlet-3.3.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cb3a8ec3db4a3b0eb8a3c25436c2d49e3505821802074969db017b87bc6a948", size = 590206, upload-time = "2025-12-04T14:26:01.254Z" }, - { url = "https://files.pythonhosted.org/packages/48/60/29035719feb91798693023608447283b266b12efc576ed013dd9442364bb/greenlet-3.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2de5a0b09eab81fc6a382791b995b1ccf2b172a9fec934747a7a23d2ff291794", size = 1550668, upload-time = "2025-12-04T15:04:22.439Z" }, - { url = "https://files.pythonhosted.org/packages/0a/5f/783a23754b691bfa86bd72c3033aa107490deac9b2ef190837b860996c9f/greenlet-3.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4449a736606bd30f27f8e1ff4678ee193bc47f6ca810d705981cfffd6ce0d8c5", size = 1615483, upload-time = "2025-12-04T14:27:28.083Z" }, - { url = "https://files.pythonhosted.org/packages/1d/d5/c339b3b4bc8198b7caa4f2bd9fd685ac9f29795816d8db112da3d04175bb/greenlet-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:7652ee180d16d447a683c04e4c5f6441bae7ba7b17ffd9f6b3aff4605e9e6f71", size = 301164, upload-time = "2025-12-04T14:42:51.577Z" }, - { url = "https://files.pythonhosted.org/packages/f8/0a/a3871375c7b9727edaeeea994bfff7c63ff7804c9829c19309ba2e058807/greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb", size = 276379, upload-time = "2025-12-04T14:23:30.498Z" }, - { url = "https://files.pythonhosted.org/packages/43/ab/7ebfe34dce8b87be0d11dae91acbf76f7b8246bf9d6b319c741f99fa59c6/greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3", size = 597294, upload-time = "2025-12-04T14:50:06.847Z" }, - { url = "https://files.pythonhosted.org/packages/a4/39/f1c8da50024feecd0793dbd5e08f526809b8ab5609224a2da40aad3a7641/greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655", size = 607742, upload-time = "2025-12-04T14:57:42.349Z" }, - { url = "https://files.pythonhosted.org/packages/77/cb/43692bcd5f7a0da6ec0ec6d58ee7cddb606d055ce94a62ac9b1aa481e969/greenlet-3.3.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c024b1e5696626890038e34f76140ed1daf858e37496d33f2af57f06189e70d7", size = 622297, upload-time = "2025-12-04T15:07:13.552Z" }, - { url = "https://files.pythonhosted.org/packages/75/b0/6bde0b1011a60782108c01de5913c588cf51a839174538d266de15e4bf4d/greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b", size = 609885, upload-time = "2025-12-04T14:26:02.368Z" }, - { url = "https://files.pythonhosted.org/packages/49/0e/49b46ac39f931f59f987b7cd9f34bfec8ef81d2a1e6e00682f55be5de9f4/greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53", size = 1567424, upload-time = "2025-12-04T15:04:23.757Z" }, - { url = "https://files.pythonhosted.org/packages/05/f5/49a9ac2dff7f10091935def9165c90236d8f175afb27cbed38fb1d61ab6b/greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614", size = 1636017, upload-time = "2025-12-04T14:27:29.688Z" }, - { url = "https://files.pythonhosted.org/packages/6c/79/3912a94cf27ec503e51ba493692d6db1e3cd8ac7ac52b0b47c8e33d7f4f9/greenlet-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7a34b13d43a6b78abf828a6d0e87d3385680eaf830cd60d20d52f249faabf39", size = 301964, upload-time = "2025-12-04T14:36:58.316Z" }, - { url = "https://files.pythonhosted.org/packages/02/2f/28592176381b9ab2cafa12829ba7b472d177f3acc35d8fbcf3673d966fff/greenlet-3.3.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a1e41a81c7e2825822f4e068c48cb2196002362619e2d70b148f20a831c00739", size = 275140, upload-time = "2025-12-04T14:23:01.282Z" }, - { url = "https://files.pythonhosted.org/packages/2c/80/fbe937bf81e9fca98c981fe499e59a3f45df2a04da0baa5c2be0dca0d329/greenlet-3.3.0-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9f515a47d02da4d30caaa85b69474cec77b7929b2e936ff7fb853d42f4bf8808", size = 599219, upload-time = "2025-12-04T14:50:08.309Z" }, - { url = "https://files.pythonhosted.org/packages/c2/ff/7c985128f0514271b8268476af89aee6866df5eec04ac17dcfbc676213df/greenlet-3.3.0-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7d2d9fd66bfadf230b385fdc90426fcd6eb64db54b40c495b72ac0feb5766c54", size = 610211, upload-time = "2025-12-04T14:57:43.968Z" }, - { url = "https://files.pythonhosted.org/packages/79/07/c47a82d881319ec18a4510bb30463ed6891f2ad2c1901ed5ec23d3de351f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30a6e28487a790417d036088b3bcb3f3ac7d8babaa7d0139edbaddebf3af9492", size = 624311, upload-time = "2025-12-04T15:07:14.697Z" }, - { url = "https://files.pythonhosted.org/packages/fd/8e/424b8c6e78bd9837d14ff7df01a9829fc883ba2ab4ea787d4f848435f23f/greenlet-3.3.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:087ea5e004437321508a8d6f20efc4cfec5e3c30118e1417ea96ed1d93950527", size = 612833, upload-time = "2025-12-04T14:26:03.669Z" }, - { url = "https://files.pythonhosted.org/packages/b5/ba/56699ff9b7c76ca12f1cdc27a886d0f81f2189c3455ff9f65246780f713d/greenlet-3.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab97cf74045343f6c60a39913fa59710e4bd26a536ce7ab2397adf8b27e67c39", size = 1567256, upload-time = "2025-12-04T15:04:25.276Z" }, - { url = "https://files.pythonhosted.org/packages/1e/37/f31136132967982d698c71a281a8901daf1a8fbab935dce7c0cf15f942cc/greenlet-3.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5375d2e23184629112ca1ea89a53389dddbffcf417dad40125713d88eb5f96e8", size = 1636483, upload-time = "2025-12-04T14:27:30.804Z" }, - { url = "https://files.pythonhosted.org/packages/7e/71/ba21c3fb8c5dce83b8c01f458a42e99ffdb1963aeec08fff5a18588d8fd7/greenlet-3.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:9ee1942ea19550094033c35d25d20726e4f1c40d59545815e1128ac58d416d38", size = 301833, upload-time = "2025-12-04T14:32:23.929Z" }, - { url = "https://files.pythonhosted.org/packages/d7/7c/f0a6d0ede2c7bf092d00bc83ad5bafb7e6ec9b4aab2fbdfa6f134dc73327/greenlet-3.3.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:60c2ef0f578afb3c8d92ea07ad327f9a062547137afe91f38408f08aacab667f", size = 275671, upload-time = "2025-12-04T14:23:05.267Z" }, - { url = "https://files.pythonhosted.org/packages/44/06/dac639ae1a50f5969d82d2e3dd9767d30d6dbdbab0e1a54010c8fe90263c/greenlet-3.3.0-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5d554d0712ba1de0a6c94c640f7aeba3f85b3a6e1f2899c11c2c0428da9365", size = 646360, upload-time = "2025-12-04T14:50:10.026Z" }, - { url = "https://files.pythonhosted.org/packages/e0/94/0fb76fe6c5369fba9bf98529ada6f4c3a1adf19e406a47332245ef0eb357/greenlet-3.3.0-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3a898b1e9c5f7307ebbde4102908e6cbfcb9ea16284a3abe15cab996bee8b9b3", size = 658160, upload-time = "2025-12-04T14:57:45.41Z" }, - { url = "https://files.pythonhosted.org/packages/93/79/d2c70cae6e823fac36c3bbc9077962105052b7ef81db2f01ec3b9bf17e2b/greenlet-3.3.0-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dcd2bdbd444ff340e8d6bdf54d2f206ccddbb3ccfdcd3c25bf4afaa7b8f0cf45", size = 671388, upload-time = "2025-12-04T15:07:15.789Z" }, - { url = "https://files.pythonhosted.org/packages/b8/14/bab308fc2c1b5228c3224ec2bf928ce2e4d21d8046c161e44a2012b5203e/greenlet-3.3.0-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5773edda4dc00e173820722711d043799d3adb4f01731f40619e07ea2750b955", size = 660166, upload-time = "2025-12-04T14:26:05.099Z" }, - { url = "https://files.pythonhosted.org/packages/4b/d2/91465d39164eaa0085177f61983d80ffe746c5a1860f009811d498e7259c/greenlet-3.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ac0549373982b36d5fd5d30beb8a7a33ee541ff98d2b502714a09f1169f31b55", size = 1615193, upload-time = "2025-12-04T15:04:27.041Z" }, - { url = "https://files.pythonhosted.org/packages/42/1b/83d110a37044b92423084d52d5d5a3b3a73cafb51b547e6d7366ff62eff1/greenlet-3.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d198d2d977460358c3b3a4dc844f875d1adb33817f0613f663a656f463764ccc", size = 1683653, upload-time = "2025-12-04T14:27:32.366Z" }, - { url = "https://files.pythonhosted.org/packages/7c/9a/9030e6f9aa8fd7808e9c31ba4c38f87c4f8ec324ee67431d181fe396d705/greenlet-3.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:73f51dd0e0bdb596fb0417e475fa3c5e32d4c83638296e560086b8d7da7c4170", size = 305387, upload-time = "2025-12-04T14:26:51.063Z" }, - { url = "https://files.pythonhosted.org/packages/a0/66/bd6317bc5932accf351fc19f177ffba53712a202f9df10587da8df257c7e/greenlet-3.3.0-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:d6ed6f85fae6cdfdb9ce04c9bf7a08d666cfcfb914e7d006f44f840b46741931", size = 282638, upload-time = "2025-12-04T14:25:20.941Z" }, - { url = "https://files.pythonhosted.org/packages/30/cf/cc81cb030b40e738d6e69502ccbd0dd1bced0588e958f9e757945de24404/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d9125050fcf24554e69c4cacb086b87b3b55dc395a8b3ebe6487b045b2614388", size = 651145, upload-time = "2025-12-04T14:50:11.039Z" }, - { url = "https://files.pythonhosted.org/packages/9c/ea/1020037b5ecfe95ca7df8d8549959baceb8186031da83d5ecceff8b08cd2/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:87e63ccfa13c0a0f6234ed0add552af24cc67dd886731f2261e46e241608bee3", size = 654236, upload-time = "2025-12-04T14:57:47.007Z" }, - { url = "https://files.pythonhosted.org/packages/69/cc/1e4bae2e45ca2fa55299f4e85854606a78ecc37fead20d69322f96000504/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2662433acbca297c9153a4023fe2161c8dcfdcc91f10433171cf7e7d94ba2221", size = 662506, upload-time = "2025-12-04T15:07:16.906Z" }, - { url = "https://files.pythonhosted.org/packages/57/b9/f8025d71a6085c441a7eaff0fd928bbb275a6633773667023d19179fe815/greenlet-3.3.0-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3c6e9b9c1527a78520357de498b0e709fb9e2f49c3a513afd5a249007261911b", size = 653783, upload-time = "2025-12-04T14:26:06.225Z" }, - { url = "https://files.pythonhosted.org/packages/f6/c7/876a8c7a7485d5d6b5c6821201d542ef28be645aa024cfe1145b35c120c1/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:286d093f95ec98fdd92fcb955003b8a3d054b4e2cab3e2707a5039e7b50520fd", size = 1614857, upload-time = "2025-12-04T15:04:28.484Z" }, - { url = "https://files.pythonhosted.org/packages/4f/dc/041be1dff9f23dac5f48a43323cd0789cb798342011c19a248d9c9335536/greenlet-3.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c10513330af5b8ae16f023e8ddbfb486ab355d04467c4679c5cfe4659975dd9", size = 1676034, upload-time = "2025-12-04T14:27:33.531Z" }, +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/99/1cd3411c56a410994669062bd73dd58270c00cc074cac15f385a1fd91f8a/greenlet-3.3.1.tar.gz", hash = "sha256:41848f3230b58c08bb43dee542e74a2a2e34d3c59dc3076cec9151aeeedcae98", size = 184690, upload-time = "2026-01-23T15:31:02.076Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/65/5b235b40581ad75ab97dcd8b4218022ae8e3ab77c13c919f1a1dfe9171fd/greenlet-3.3.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:04bee4775f40ecefcdaa9d115ab44736cd4b9c5fba733575bfe9379419582e13", size = 273723, upload-time = "2026-01-23T15:30:37.521Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ad/eb4729b85cba2d29499e0a04ca6fbdd8f540afd7be142fd571eea43d712f/greenlet-3.3.1-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:50e1457f4fed12a50e427988a07f0f9df53cf0ee8da23fab16e6732c2ec909d4", size = 574874, upload-time = "2026-01-23T16:00:54.551Z" }, + { url = "https://files.pythonhosted.org/packages/87/32/57cad7fe4c8b82fdaa098c89498ef85ad92dfbb09d5eb713adedfc2ae1f5/greenlet-3.3.1-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:070472cd156f0656f86f92e954591644e158fd65aa415ffbe2d44ca77656a8f5", size = 586309, upload-time = "2026-01-23T16:05:25.18Z" }, + { url = "https://files.pythonhosted.org/packages/87/eb/8a1ec2da4d55824f160594a75a9d8354a5fe0a300fb1c48e7944265217e1/greenlet-3.3.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a300354f27dd86bae5fbf7002e6dd2b3255cd372e9242c933faf5e859b703fe", size = 586985, upload-time = "2026-01-23T15:32:47.968Z" }, + { url = "https://files.pythonhosted.org/packages/15/1c/0621dd4321dd8c351372ee8f9308136acb628600658a49be1b7504208738/greenlet-3.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e84b51cbebf9ae573b5fbd15df88887815e3253fc000a7d0ff95170e8f7e9729", size = 1547271, upload-time = "2026-01-23T16:04:18.977Z" }, + { url = "https://files.pythonhosted.org/packages/9d/53/24047f8924c83bea7a59c8678d9571209c6bfe5f4c17c94a78c06024e9f2/greenlet-3.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0093bd1a06d899892427217f0ff2a3c8f306182b8c754336d32e2d587c131b4", size = 1613427, upload-time = "2026-01-23T15:33:44.428Z" }, + { url = "https://files.pythonhosted.org/packages/ff/07/ac9bf1ec008916d1a3373cae212884c1dcff4a4ba0d41127ce81a8deb4e9/greenlet-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:7932f5f57609b6a3b82cc11877709aa7a98e3308983ed93552a1c377069b20c8", size = 226100, upload-time = "2026-01-23T15:30:56.957Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e8/2e1462c8fdbe0f210feb5ac7ad2d9029af8be3bf45bd9fa39765f821642f/greenlet-3.3.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:5fd23b9bc6d37b563211c6abbb1b3cab27db385a4449af5c32e932f93017080c", size = 274974, upload-time = "2026-01-23T15:31:02.891Z" }, + { url = "https://files.pythonhosted.org/packages/7e/a8/530a401419a6b302af59f67aaf0b9ba1015855ea7e56c036b5928793c5bd/greenlet-3.3.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f51496a0bfbaa9d74d36a52d2580d1ef5ed4fdfcff0a73730abfbbbe1403dd", size = 577175, upload-time = "2026-01-23T16:00:56.213Z" }, + { url = "https://files.pythonhosted.org/packages/8e/89/7e812bb9c05e1aaef9b597ac1d0962b9021d2c6269354966451e885c4e6b/greenlet-3.3.1-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb0feb07fe6e6a74615ee62a880007d976cf739b6669cce95daa7373d4fc69c5", size = 590401, upload-time = "2026-01-23T16:05:26.365Z" }, + { url = "https://files.pythonhosted.org/packages/5c/ae/8d472e1f5ac5efe55c563f3eabb38c98a44b832602e12910750a7c025802/greenlet-3.3.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:39eda9ba259cc9801da05351eaa8576e9aa83eb9411e8f0c299e05d712a210f2", size = 590272, upload-time = "2026-01-23T15:32:49.411Z" }, + { url = "https://files.pythonhosted.org/packages/a8/51/0fde34bebfcadc833550717eade64e35ec8738e6b097d5d248274a01258b/greenlet-3.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e2e7e882f83149f0a71ac822ebf156d902e7a5d22c9045e3e0d1daf59cee2cc9", size = 1550729, upload-time = "2026-01-23T16:04:20.867Z" }, + { url = "https://files.pythonhosted.org/packages/16/c9/2fb47bee83b25b119d5a35d580807bb8b92480a54b68fef009a02945629f/greenlet-3.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80aa4d79eb5564f2e0a6144fcc744b5a37c56c4a92d60920720e99210d88db0f", size = 1615552, upload-time = "2026-01-23T15:33:45.743Z" }, + { url = "https://files.pythonhosted.org/packages/1f/54/dcf9f737b96606f82f8dd05becfb8d238db0633dd7397d542a296fe9cad3/greenlet-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:32e4ca9777c5addcbf42ff3915d99030d8e00173a56f80001fb3875998fe410b", size = 226462, upload-time = "2026-01-23T15:36:50.422Z" }, + { url = "https://files.pythonhosted.org/packages/91/37/61e1015cf944ddd2337447d8e97fb423ac9bc21f9963fb5f206b53d65649/greenlet-3.3.1-cp311-cp311-win_arm64.whl", hash = "sha256:da19609432f353fed186cc1b85e9440db93d489f198b4bdf42ae19cc9d9ac9b4", size = 225715, upload-time = "2026-01-23T15:33:17.298Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c8/9d76a66421d1ae24340dfae7e79c313957f6e3195c144d2c73333b5bfe34/greenlet-3.3.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:7e806ca53acf6d15a888405880766ec84721aa4181261cd11a457dfe9a7a4975", size = 276443, upload-time = "2026-01-23T15:30:10.066Z" }, + { url = "https://files.pythonhosted.org/packages/81/99/401ff34bb3c032d1f10477d199724f5e5f6fbfb59816ad1455c79c1eb8e7/greenlet-3.3.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d842c94b9155f1c9b3058036c24ffb8ff78b428414a19792b2380be9cecf4f36", size = 597359, upload-time = "2026-01-23T16:00:57.394Z" }, + { url = "https://files.pythonhosted.org/packages/2b/bc/4dcc0871ed557792d304f50be0f7487a14e017952ec689effe2180a6ff35/greenlet-3.3.1-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:20fedaadd422fa02695f82093f9a98bad3dab5fcda793c658b945fcde2ab27ba", size = 607805, upload-time = "2026-01-23T16:05:28.068Z" }, + { url = "https://files.pythonhosted.org/packages/cf/05/821587cf19e2ce1f2b24945d890b164401e5085f9d09cbd969b0c193cd20/greenlet-3.3.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14194f5f4305800ff329cbf02c5fcc88f01886cadd29941b807668a45f0d2336", size = 609947, upload-time = "2026-01-23T15:32:51.004Z" }, + { url = "https://files.pythonhosted.org/packages/a4/52/ee8c46ed9f8babaa93a19e577f26e3d28a519feac6350ed6f25f1afee7e9/greenlet-3.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7b2fe4150a0cf59f847a67db8c155ac36aed89080a6a639e9f16df5d6c6096f1", size = 1567487, upload-time = "2026-01-23T16:04:22.125Z" }, + { url = "https://files.pythonhosted.org/packages/8f/7c/456a74f07029597626f3a6db71b273a3632aecb9afafeeca452cfa633197/greenlet-3.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:49f4ad195d45f4a66a0eb9c1ba4832bb380570d361912fa3554746830d332149", size = 1636087, upload-time = "2026-01-23T15:33:47.486Z" }, + { url = "https://files.pythonhosted.org/packages/34/2f/5e0e41f33c69655300a5e54aeb637cf8ff57f1786a3aba374eacc0228c1d/greenlet-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:cc98b9c4e4870fa983436afa999d4eb16b12872fab7071423d5262fa7120d57a", size = 227156, upload-time = "2026-01-23T15:34:34.808Z" }, + { url = "https://files.pythonhosted.org/packages/c8/ab/717c58343cf02c5265b531384b248787e04d8160b8afe53d9eec053d7b44/greenlet-3.3.1-cp312-cp312-win_arm64.whl", hash = "sha256:bfb2d1763d777de5ee495c85309460f6fd8146e50ec9d0ae0183dbf6f0a829d1", size = 226403, upload-time = "2026-01-23T15:31:39.372Z" }, + { url = "https://files.pythonhosted.org/packages/ec/ab/d26750f2b7242c2b90ea2ad71de70cfcd73a948a49513188a0fc0d6fc15a/greenlet-3.3.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:7ab327905cabb0622adca5971e488064e35115430cec2c35a50fd36e72a315b3", size = 275205, upload-time = "2026-01-23T15:30:24.556Z" }, + { url = "https://files.pythonhosted.org/packages/10/d3/be7d19e8fad7c5a78eeefb2d896a08cd4643e1e90c605c4be3b46264998f/greenlet-3.3.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:65be2f026ca6a176f88fb935ee23c18333ccea97048076aef4db1ef5bc0713ac", size = 599284, upload-time = "2026-01-23T16:00:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/ae/21/fe703aaa056fdb0f17e5afd4b5c80195bbdab701208918938bd15b00d39b/greenlet-3.3.1-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7a3ae05b3d225b4155bda56b072ceb09d05e974bc74be6c3fc15463cf69f33fd", size = 610274, upload-time = "2026-01-23T16:05:29.312Z" }, + { url = "https://files.pythonhosted.org/packages/cb/86/5c6ab23bb3c28c21ed6bebad006515cfe08b04613eb105ca0041fecca852/greenlet-3.3.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6423481193bbbe871313de5fd06a082f2649e7ce6e08015d2a76c1e9186ca5b3", size = 612904, upload-time = "2026-01-23T15:32:52.317Z" }, + { url = "https://files.pythonhosted.org/packages/c2/f3/7949994264e22639e40718c2daf6f6df5169bf48fb038c008a489ec53a50/greenlet-3.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:33a956fe78bbbda82bfc95e128d61129b32d66bcf0a20a1f0c08aa4839ffa951", size = 1567316, upload-time = "2026-01-23T16:04:23.316Z" }, + { url = "https://files.pythonhosted.org/packages/8d/6e/d73c94d13b6465e9f7cd6231c68abde838bb22408596c05d9059830b7872/greenlet-3.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b065d3284be43728dd280f6f9a13990b56470b81be20375a207cdc814a983f2", size = 1636549, upload-time = "2026-01-23T15:33:48.643Z" }, + { url = "https://files.pythonhosted.org/packages/5e/b3/c9c23a6478b3bcc91f979ce4ca50879e4d0b2bd7b9a53d8ecded719b92e2/greenlet-3.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:27289986f4e5b0edec7b5a91063c109f0276abb09a7e9bdab08437525977c946", size = 227042, upload-time = "2026-01-23T15:33:58.216Z" }, + { url = "https://files.pythonhosted.org/packages/90/e7/824beda656097edee36ab15809fd063447b200cc03a7f6a24c34d520bc88/greenlet-3.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:2f080e028001c5273e0b42690eaf359aeef9cb1389da0f171ea51a5dc3c7608d", size = 226294, upload-time = "2026-01-23T15:30:52.73Z" }, + { url = "https://files.pythonhosted.org/packages/ae/fb/011c7c717213182caf78084a9bea51c8590b0afda98001f69d9f853a495b/greenlet-3.3.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:bd59acd8529b372775cd0fcbc5f420ae20681c5b045ce25bd453ed8455ab99b5", size = 275737, upload-time = "2026-01-23T15:32:16.889Z" }, + { url = "https://files.pythonhosted.org/packages/41/2e/a3a417d620363fdbb08a48b1dd582956a46a61bf8fd27ee8164f9dfe87c2/greenlet-3.3.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b31c05dd84ef6871dd47120386aed35323c944d86c3d91a17c4b8d23df62f15b", size = 646422, upload-time = "2026-01-23T16:01:00.354Z" }, + { url = "https://files.pythonhosted.org/packages/b4/09/c6c4a0db47defafd2d6bab8ddfe47ad19963b4e30f5bed84d75328059f8c/greenlet-3.3.1-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:02925a0bfffc41e542c70aa14c7eda3593e4d7e274bfcccca1827e6c0875902e", size = 658219, upload-time = "2026-01-23T16:05:30.956Z" }, + { url = "https://files.pythonhosted.org/packages/80/38/9d42d60dffb04b45f03dbab9430898352dba277758640751dc5cc316c521/greenlet-3.3.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34a729e2e4e4ffe9ae2408d5ecaf12f944853f40ad724929b7585bca808a9d6f", size = 660237, upload-time = "2026-01-23T15:32:53.967Z" }, + { url = "https://files.pythonhosted.org/packages/96/61/373c30b7197f9e756e4c81ae90a8d55dc3598c17673f91f4d31c3c689c3f/greenlet-3.3.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aec9ab04e82918e623415947921dea15851b152b822661cce3f8e4393c3df683", size = 1615261, upload-time = "2026-01-23T16:04:25.066Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d3/ca534310343f5945316f9451e953dcd89b36fe7a19de652a1dc5a0eeef3f/greenlet-3.3.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:71c767cf281a80d02b6c1bdc41c9468e1f5a494fb11bc8688c360524e273d7b1", size = 1683719, upload-time = "2026-01-23T15:33:50.61Z" }, + { url = "https://files.pythonhosted.org/packages/52/cb/c21a3fd5d2c9c8b622e7bede6d6d00e00551a5ee474ea6d831b5f567a8b4/greenlet-3.3.1-cp314-cp314-win_amd64.whl", hash = "sha256:96aff77af063b607f2489473484e39a0bbae730f2ea90c9e5606c9b73c44174a", size = 228125, upload-time = "2026-01-23T15:32:45.265Z" }, + { url = "https://files.pythonhosted.org/packages/6a/8e/8a2db6d11491837af1de64b8aff23707c6e85241be13c60ed399a72e2ef8/greenlet-3.3.1-cp314-cp314-win_arm64.whl", hash = "sha256:b066e8b50e28b503f604fa538adc764a638b38cf8e81e025011d26e8a627fa79", size = 227519, upload-time = "2026-01-23T15:31:47.284Z" }, + { url = "https://files.pythonhosted.org/packages/28/24/cbbec49bacdcc9ec652a81d3efef7b59f326697e7edf6ed775a5e08e54c2/greenlet-3.3.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:3e63252943c921b90abb035ebe9de832c436401d9c45f262d80e2d06cc659242", size = 282706, upload-time = "2026-01-23T15:33:05.525Z" }, + { url = "https://files.pythonhosted.org/packages/86/2e/4f2b9323c144c4fe8842a4e0d92121465485c3c2c5b9e9b30a52e80f523f/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:76e39058e68eb125de10c92524573924e827927df5d3891fbc97bd55764a8774", size = 651209, upload-time = "2026-01-23T16:01:01.517Z" }, + { url = "https://files.pythonhosted.org/packages/d9/87/50ca60e515f5bb55a2fbc5f0c9b5b156de7d2fc51a0a69abc9d23914a237/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c9f9d5e7a9310b7a2f416dd13d2e3fd8b42d803968ea580b7c0f322ccb389b97", size = 654300, upload-time = "2026-01-23T16:05:32.199Z" }, + { url = "https://files.pythonhosted.org/packages/1d/94/74310866dfa2b73dd08659a3d18762f83985ad3281901ba0ee9a815194fb/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92497c78adf3ac703b57f1e3813c2d874f27f71a178f9ea5887855da413cd6d2", size = 653842, upload-time = "2026-01-23T15:32:55.671Z" }, + { url = "https://files.pythonhosted.org/packages/97/43/8bf0ffa3d498eeee4c58c212a3905dd6146c01c8dc0b0a046481ca29b18c/greenlet-3.3.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ed6b402bc74d6557a705e197d47f9063733091ed6357b3de33619d8a8d93ac53", size = 1614917, upload-time = "2026-01-23T16:04:26.276Z" }, + { url = "https://files.pythonhosted.org/packages/89/90/a3be7a5f378fc6e84abe4dcfb2ba32b07786861172e502388b4c90000d1b/greenlet-3.3.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:59913f1e5ada20fde795ba906916aea25d442abcc0593fba7e26c92b7ad76249", size = 1676092, upload-time = "2026-01-23T15:33:52.176Z" }, + { url = "https://files.pythonhosted.org/packages/e1/2b/98c7f93e6db9977aaee07eb1e51ca63bd5f779b900d362791d3252e60558/greenlet-3.3.1-cp314-cp314t-win_amd64.whl", hash = "sha256:301860987846c24cb8964bdec0e31a96ad4a2a801b41b4ef40963c1b44f33451", size = 233181, upload-time = "2026-01-23T15:33:00.29Z" }, ] [[package]] @@ -2633,7 +2701,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.3.3" +version = "1.3.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -2647,9 +2715,9 @@ dependencies = [ { name = "typer-slim", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/02/c3/544cd4cdd4b3c6de8591b56bb69efc3682e9ac81e36135c02e909dd98c5b/huggingface_hub-1.3.3.tar.gz", hash = "sha256:f8be6f468da4470db48351e8c77d6d8115dff9b3daeb30276e568767b1ff7574", size = 627649, upload-time = "2026-01-22T13:59:46.931Z" } +sdist = { url = "https://files.pythonhosted.org/packages/af/25/74af9d16cd59ae15b12467a79a84aa0fe24be4aba68fc4da0c1864d49c17/huggingface_hub-1.3.4.tar.gz", hash = "sha256:c20d5484a611b7b7891d272e8fc9f77d5de025b0480bdacfa858efb3780b455f", size = 627683, upload-time = "2026-01-26T14:05:10.656Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/48/e8/0d032698916b9773b710c46e3b8e0154fc34cd017b151cc316c84c6c34fe/huggingface_hub-1.3.3-py3-none-any.whl", hash = "sha256:44af7b62380efc87c1c3bde7e1bf0661899b5bdfca1fc60975c61ee68410e10e", size = 536604, upload-time = "2026-01-22T13:59:45.391Z" }, + { url = "https://files.pythonhosted.org/packages/55/07/3d0c34c345043c6a398a5882e196b2220dc5861adfa18322448b90908f26/huggingface_hub-1.3.4-py3-none-any.whl", hash = "sha256:a0c526e76eb316e96a91e8a1a7a93cf66b0dd210be1a17bd5fc5ae53cba76bfd", size = 536611, upload-time = "2026-01-26T14:05:08.549Z" }, ] [[package]] @@ -3106,14 +3174,12 @@ wheels = [ [[package]] name = "litellm" -version = "1.81.1" +version = "1.81.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "click", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "fastuuid", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, - { name = "grpcio", version = "1.67.1", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, - { name = "grpcio", version = "1.76.0", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.14' and sys_platform == 'darwin') or (python_full_version >= '3.14' and sys_platform == 'linux') or (python_full_version >= '3.14' and sys_platform == 'win32')" }, { name = "httpx", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "importlib-metadata", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "jinja2", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3124,9 +3190,9 @@ dependencies = [ { name = "tiktoken", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "tokenizers", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c5/2b/299d54f95e02e9ed551c186e881a4ac0eaa5a948a6be93ecaa26a748f1be/litellm-1.81.1.tar.gz", hash = "sha256:9c758db8abff04a2f1f43582d042080e36f245fe34cfbafe2f8b7ca8f1de29b6", size = 13487469, upload-time = "2026-01-21T12:55:58.271Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ce/dd/d70835d5b231617761717cd5ba60342b677693093a71d5ce13ae9d254aee/litellm-1.81.3.tar.gz", hash = "sha256:a7688b429a88abfdd02f2a8c3158ebb5385689cfb7f9d4ac1473d018b2047e1b", size = 13612652, upload-time = "2026-01-25T02:45:58.888Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/59/73/8d100c4e48935f6a381df60f894ca9c063ea412ce354fbe7a17770ad4092/litellm-1.81.1-py3-none-any.whl", hash = "sha256:503512a8a7f3cddf9d8fed6182c14f1e77c5655635fe67b09efb09c75234bb87", size = 11795146, upload-time = "2026-01-21T12:55:55.613Z" }, + { url = "https://files.pythonhosted.org/packages/83/62/d3f53c665261fdd5bb2401246e005a4ea8194ad1c4d8c663318ae3d638bf/litellm-1.81.3-py3-none-any.whl", hash = "sha256:3f60fd8b727587952ad3dd18b68f5fed538d6f43d15bb0356f4c3a11bccb2b92", size = 11946995, upload-time = "2026-01-25T02:45:55.887Z" }, ] [package.optional-dependencies] @@ -3168,11 +3234,11 @@ wheels = [ [[package]] name = "litellm-proxy-extras" -version = "0.4.25" +version = "0.4.27" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0d/97/48222feea258b987b43c79f0d2c66a311c886e958220308bc0fa7f2f7251/litellm_proxy_extras-0.4.25.tar.gz", hash = "sha256:a03790e574ec6b8098c74d49836313651c0a0e72354a716c76c50ed16b087815", size = 22424, upload-time = "2026-01-20T23:22:33.786Z" } +sdist = { url = "https://files.pythonhosted.org/packages/01/af/9fdc22e7e3dcaa44c0f206a3f12065286c32d7e453f87e14dac1e69cf49a/litellm_proxy_extras-0.4.27.tar.gz", hash = "sha256:81059120016cfc03c82aa9664424912bdcffad103f66a5f925fef6b26f2cc151", size = 23269, upload-time = "2026-01-24T22:03:26.97Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4b/50/6a59c33eb5fdcdd4f8c121af576dac8d1c0f337c0d222bedd835e66d4b98/litellm_proxy_extras-0.4.25-py3-none-any.whl", hash = "sha256:da79e1a7a999020a82ec33c45d8fd35eb390ff3d0bc3d7686542b3529aff2cda", size = 48767, upload-time = "2026-01-20T23:22:31.912Z" }, + { url = "https://files.pythonhosted.org/packages/50/c8/508b5a277e5d56e71ef51c5fe8111c7ec045ffd98f126089af803171ccc6/litellm_proxy_extras-0.4.27-py3-none-any.whl", hash = "sha256:752c1faabc86ce3d2b1fa451495d34de82323798e37b9cb5c0fea93deae1c5c8", size = 50073, upload-time = "2026-01-24T22:03:25.757Z" }, ] [[package]] @@ -3362,7 +3428,7 @@ wheels = [ [[package]] name = "mcp" -version = "1.25.0" +version = "1.26.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -3380,9 +3446,9 @@ dependencies = [ { name = "typing-inspection", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "uvicorn", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/6d/62e76bbb8144d6ed86e202b5edd8a4cb631e7c8130f3f4893c3f90262b10/mcp-1.26.0.tar.gz", hash = "sha256:db6e2ef491eecc1a0d93711a76f28dec2e05999f93afd48795da1c1137142c66", size = 608005, upload-time = "2026-01-24T19:40:32.468Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e2/fc/6dc7659c2ae5ddf280477011f4213a74f806862856b796ef08f028e664bf/mcp-1.25.0-py3-none-any.whl", hash = "sha256:b37c38144a666add0862614cc79ec276e97d72aa8ca26d622818d4e278b9721a", size = 233076, upload-time = "2025-12-19T10:19:55.416Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d9/eaa1f80170d2b7c5ba23f3b59f766f3a0bb41155fbc32a69adfa1adaaef9/mcp-1.26.0-py3-none-any.whl", hash = "sha256:904a21c33c25aa98ddbeb47273033c435e595bbacfdb177f4bd87f6dceebe1ca", size = 233615, upload-time = "2026-01-24T19:40:30.652Z" }, ] [package.optional-dependencies] @@ -3540,140 +3606,140 @@ wheels = [ [[package]] name = "multidict" -version = "6.7.0" +version = "6.7.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a9/63/7bdd4adc330abcca54c85728db2327130e49e52e8c3ce685cec44e0f2e9f/multidict-6.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9f474ad5acda359c8758c8accc22032c6abe6dc87a8be2440d097785e27a9349", size = 77153, upload-time = "2025-10-06T14:48:26.409Z" }, - { url = "https://files.pythonhosted.org/packages/3f/bb/b6c35ff175ed1a3142222b78455ee31be71a8396ed3ab5280fbe3ebe4e85/multidict-6.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b7a9db5a870f780220e931d0002bbfd88fb53aceb6293251e2c839415c1b20e", size = 44993, upload-time = "2025-10-06T14:48:28.4Z" }, - { url = "https://files.pythonhosted.org/packages/e0/1f/064c77877c5fa6df6d346e68075c0f6998547afe952d6471b4c5f6a7345d/multidict-6.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:03ca744319864e92721195fa28c7a3b2bc7b686246b35e4078c1e4d0eb5466d3", size = 44607, upload-time = "2025-10-06T14:48:29.581Z" }, - { url = "https://files.pythonhosted.org/packages/04/7a/bf6aa92065dd47f287690000b3d7d332edfccb2277634cadf6a810463c6a/multidict-6.7.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f0e77e3c0008bc9316e662624535b88d360c3a5d3f81e15cf12c139a75250046", size = 241847, upload-time = "2025-10-06T14:48:32.107Z" }, - { url = "https://files.pythonhosted.org/packages/94/39/297a8de920f76eda343e4ce05f3b489f0ab3f9504f2576dfb37b7c08ca08/multidict-6.7.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08325c9e5367aa379a3496aa9a022fe8837ff22e00b94db256d3a1378c76ab32", size = 242616, upload-time = "2025-10-06T14:48:34.054Z" }, - { url = "https://files.pythonhosted.org/packages/39/3a/d0eee2898cfd9d654aea6cb8c4addc2f9756e9a7e09391cfe55541f917f7/multidict-6.7.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e2862408c99f84aa571ab462d25236ef9cb12a602ea959ba9c9009a54902fc73", size = 222333, upload-time = "2025-10-06T14:48:35.9Z" }, - { url = "https://files.pythonhosted.org/packages/05/48/3b328851193c7a4240815b71eea165b49248867bbb6153a0aee227a0bb47/multidict-6.7.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4d72a9a2d885f5c208b0cb91ff2ed43636bb7e345ec839ff64708e04f69a13cc", size = 253239, upload-time = "2025-10-06T14:48:37.302Z" }, - { url = "https://files.pythonhosted.org/packages/b1/ca/0706a98c8d126a89245413225ca4a3fefc8435014de309cf8b30acb68841/multidict-6.7.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:478cc36476687bac1514d651cbbaa94b86b0732fb6855c60c673794c7dd2da62", size = 251618, upload-time = "2025-10-06T14:48:38.963Z" }, - { url = "https://files.pythonhosted.org/packages/5e/4f/9c7992f245554d8b173f6f0a048ad24b3e645d883f096857ec2c0822b8bd/multidict-6.7.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6843b28b0364dc605f21481c90fadb5f60d9123b442eb8a726bb74feef588a84", size = 241655, upload-time = "2025-10-06T14:48:40.312Z" }, - { url = "https://files.pythonhosted.org/packages/31/79/26a85991ae67efd1c0b1fc2e0c275b8a6aceeb155a68861f63f87a798f16/multidict-6.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23bfeee5316266e5ee2d625df2d2c602b829435fc3a235c2ba2131495706e4a0", size = 239245, upload-time = "2025-10-06T14:48:41.848Z" }, - { url = "https://files.pythonhosted.org/packages/14/1e/75fa96394478930b79d0302eaf9a6c69f34005a1a5251ac8b9c336486ec9/multidict-6.7.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:680878b9f3d45c31e1f730eef731f9b0bc1da456155688c6745ee84eb818e90e", size = 233523, upload-time = "2025-10-06T14:48:43.749Z" }, - { url = "https://files.pythonhosted.org/packages/b2/5e/085544cb9f9c4ad2b5d97467c15f856df8d9bac410cffd5c43991a5d878b/multidict-6.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:eb866162ef2f45063acc7a53a88ef6fe8bf121d45c30ea3c9cd87ce7e191a8d4", size = 243129, upload-time = "2025-10-06T14:48:45.225Z" }, - { url = "https://files.pythonhosted.org/packages/b9/c3/e9d9e2f20c9474e7a8fcef28f863c5cbd29bb5adce6b70cebe8bdad0039d/multidict-6.7.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:df0e3bf7993bdbeca5ac25aa859cf40d39019e015c9c91809ba7093967f7a648", size = 248999, upload-time = "2025-10-06T14:48:46.703Z" }, - { url = "https://files.pythonhosted.org/packages/b5/3f/df171b6efa3239ae33b97b887e42671cd1d94d460614bfb2c30ffdab3b95/multidict-6.7.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:661709cdcd919a2ece2234f9bae7174e5220c80b034585d7d8a755632d3e2111", size = 243711, upload-time = "2025-10-06T14:48:48.146Z" }, - { url = "https://files.pythonhosted.org/packages/3c/2f/9b5564888c4e14b9af64c54acf149263721a283aaf4aa0ae89b091d5d8c1/multidict-6.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:096f52730c3fb8ed419db2d44391932b63891b2c5ed14850a7e215c0ba9ade36", size = 237504, upload-time = "2025-10-06T14:48:49.447Z" }, - { url = "https://files.pythonhosted.org/packages/6c/3a/0bd6ca0f7d96d790542d591c8c3354c1e1b6bfd2024d4d92dc3d87485ec7/multidict-6.7.0-cp310-cp310-win32.whl", hash = "sha256:afa8a2978ec65d2336305550535c9c4ff50ee527914328c8677b3973ade52b85", size = 41422, upload-time = "2025-10-06T14:48:50.789Z" }, - { url = "https://files.pythonhosted.org/packages/00/35/f6a637ea2c75f0d3b7c7d41b1189189acff0d9deeb8b8f35536bb30f5e33/multidict-6.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:b15b3afff74f707b9275d5ba6a91ae8f6429c3ffb29bbfd216b0b375a56f13d7", size = 46050, upload-time = "2025-10-06T14:48:51.938Z" }, - { url = "https://files.pythonhosted.org/packages/e7/b8/f7bf8329b39893d02d9d95cf610c75885d12fc0f402b1c894e1c8e01c916/multidict-6.7.0-cp310-cp310-win_arm64.whl", hash = "sha256:4b73189894398d59131a66ff157837b1fafea9974be486d036bb3d32331fdbf0", size = 43153, upload-time = "2025-10-06T14:48:53.146Z" }, - { url = "https://files.pythonhosted.org/packages/34/9e/5c727587644d67b2ed479041e4b1c58e30afc011e3d45d25bbe35781217c/multidict-6.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4d409aa42a94c0b3fa617708ef5276dfe81012ba6753a0370fcc9d0195d0a1fc", size = 76604, upload-time = "2025-10-06T14:48:54.277Z" }, - { url = "https://files.pythonhosted.org/packages/17/e4/67b5c27bd17c085a5ea8f1ec05b8a3e5cba0ca734bfcad5560fb129e70ca/multidict-6.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14c9e076eede3b54c636f8ce1c9c252b5f057c62131211f0ceeec273810c9721", size = 44715, upload-time = "2025-10-06T14:48:55.445Z" }, - { url = "https://files.pythonhosted.org/packages/4d/e1/866a5d77be6ea435711bef2a4291eed11032679b6b28b56b4776ab06ba3e/multidict-6.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c09703000a9d0fa3c3404b27041e574cc7f4df4c6563873246d0e11812a94b6", size = 44332, upload-time = "2025-10-06T14:48:56.706Z" }, - { url = "https://files.pythonhosted.org/packages/31/61/0c2d50241ada71ff61a79518db85ada85fdabfcf395d5968dae1cbda04e5/multidict-6.7.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a265acbb7bb33a3a2d626afbe756371dce0279e7b17f4f4eda406459c2b5ff1c", size = 245212, upload-time = "2025-10-06T14:48:58.042Z" }, - { url = "https://files.pythonhosted.org/packages/ac/e0/919666a4e4b57fff1b57f279be1c9316e6cdc5de8a8b525d76f6598fefc7/multidict-6.7.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51cb455de290ae462593e5b1cb1118c5c22ea7f0d3620d9940bf695cea5a4bd7", size = 246671, upload-time = "2025-10-06T14:49:00.004Z" }, - { url = "https://files.pythonhosted.org/packages/a1/cc/d027d9c5a520f3321b65adea289b965e7bcbd2c34402663f482648c716ce/multidict-6.7.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:db99677b4457c7a5c5a949353e125ba72d62b35f74e26da141530fbb012218a7", size = 225491, upload-time = "2025-10-06T14:49:01.393Z" }, - { url = "https://files.pythonhosted.org/packages/75/c4/bbd633980ce6155a28ff04e6a6492dd3335858394d7bb752d8b108708558/multidict-6.7.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f470f68adc395e0183b92a2f4689264d1ea4b40504a24d9882c27375e6662bb9", size = 257322, upload-time = "2025-10-06T14:49:02.745Z" }, - { url = "https://files.pythonhosted.org/packages/4c/6d/d622322d344f1f053eae47e033b0b3f965af01212de21b10bcf91be991fb/multidict-6.7.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0db4956f82723cc1c270de9c6e799b4c341d327762ec78ef82bb962f79cc07d8", size = 254694, upload-time = "2025-10-06T14:49:04.15Z" }, - { url = "https://files.pythonhosted.org/packages/a8/9f/78f8761c2705d4c6d7516faed63c0ebdac569f6db1bef95e0d5218fdc146/multidict-6.7.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3e56d780c238f9e1ae66a22d2adf8d16f485381878250db8d496623cd38b22bd", size = 246715, upload-time = "2025-10-06T14:49:05.967Z" }, - { url = "https://files.pythonhosted.org/packages/78/59/950818e04f91b9c2b95aab3d923d9eabd01689d0dcd889563988e9ea0fd8/multidict-6.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9d14baca2ee12c1a64740d4531356ba50b82543017f3ad6de0deb943c5979abb", size = 243189, upload-time = "2025-10-06T14:49:07.37Z" }, - { url = "https://files.pythonhosted.org/packages/7a/3d/77c79e1934cad2ee74991840f8a0110966d9599b3af95964c0cd79bb905b/multidict-6.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:295a92a76188917c7f99cda95858c822f9e4aae5824246bba9b6b44004ddd0a6", size = 237845, upload-time = "2025-10-06T14:49:08.759Z" }, - { url = "https://files.pythonhosted.org/packages/63/1b/834ce32a0a97a3b70f86437f685f880136677ac00d8bce0027e9fd9c2db7/multidict-6.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39f1719f57adbb767ef592a50ae5ebb794220d1188f9ca93de471336401c34d2", size = 246374, upload-time = "2025-10-06T14:49:10.574Z" }, - { url = "https://files.pythonhosted.org/packages/23/ef/43d1c3ba205b5dec93dc97f3fba179dfa47910fc73aaaea4f7ceb41cec2a/multidict-6.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0a13fb8e748dfc94749f622de065dd5c1def7e0d2216dba72b1d8069a389c6ff", size = 253345, upload-time = "2025-10-06T14:49:12.331Z" }, - { url = "https://files.pythonhosted.org/packages/6b/03/eaf95bcc2d19ead522001f6a650ef32811aa9e3624ff0ad37c445c7a588c/multidict-6.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e3aa16de190d29a0ea1b48253c57d99a68492c8dd8948638073ab9e74dc9410b", size = 246940, upload-time = "2025-10-06T14:49:13.821Z" }, - { url = "https://files.pythonhosted.org/packages/e8/df/ec8a5fd66ea6cd6f525b1fcbb23511b033c3e9bc42b81384834ffa484a62/multidict-6.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a048ce45dcdaaf1defb76b2e684f997fb5abf74437b6cb7b22ddad934a964e34", size = 242229, upload-time = "2025-10-06T14:49:15.603Z" }, - { url = "https://files.pythonhosted.org/packages/8a/a2/59b405d59fd39ec86d1142630e9049243015a5f5291ba49cadf3c090c541/multidict-6.7.0-cp311-cp311-win32.whl", hash = "sha256:a90af66facec4cebe4181b9e62a68be65e45ac9b52b67de9eec118701856e7ff", size = 41308, upload-time = "2025-10-06T14:49:16.871Z" }, - { url = "https://files.pythonhosted.org/packages/32/0f/13228f26f8b882c34da36efa776c3b7348455ec383bab4a66390e42963ae/multidict-6.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:95b5ffa4349df2887518bb839409bcf22caa72d82beec453216802f475b23c81", size = 46037, upload-time = "2025-10-06T14:49:18.457Z" }, - { url = "https://files.pythonhosted.org/packages/84/1f/68588e31b000535a3207fd3c909ebeec4fb36b52c442107499c18a896a2a/multidict-6.7.0-cp311-cp311-win_arm64.whl", hash = "sha256:329aa225b085b6f004a4955271a7ba9f1087e39dcb7e65f6284a988264a63912", size = 43023, upload-time = "2025-10-06T14:49:19.648Z" }, - { url = "https://files.pythonhosted.org/packages/c2/9e/9f61ac18d9c8b475889f32ccfa91c9f59363480613fc807b6e3023d6f60b/multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184", size = 76877, upload-time = "2025-10-06T14:49:20.884Z" }, - { url = "https://files.pythonhosted.org/packages/38/6f/614f09a04e6184f8824268fce4bc925e9849edfa654ddd59f0b64508c595/multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45", size = 45467, upload-time = "2025-10-06T14:49:22.054Z" }, - { url = "https://files.pythonhosted.org/packages/b3/93/c4f67a436dd026f2e780c433277fff72be79152894d9fc36f44569cab1a6/multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa", size = 43834, upload-time = "2025-10-06T14:49:23.566Z" }, - { url = "https://files.pythonhosted.org/packages/7f/f5/013798161ca665e4a422afbc5e2d9e4070142a9ff8905e482139cd09e4d0/multidict-6.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0934f3843a1860dd465d38895c17fce1f1cb37295149ab05cd1b9a03afacb2a7", size = 250545, upload-time = "2025-10-06T14:49:24.882Z" }, - { url = "https://files.pythonhosted.org/packages/71/2f/91dbac13e0ba94669ea5119ba267c9a832f0cb65419aca75549fcf09a3dc/multidict-6.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3e34f3a1b8131ba06f1a73adab24f30934d148afcd5f5de9a73565a4404384e", size = 258305, upload-time = "2025-10-06T14:49:26.778Z" }, - { url = "https://files.pythonhosted.org/packages/ef/b0/754038b26f6e04488b48ac621f779c341338d78503fb45403755af2df477/multidict-6.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:efbb54e98446892590dc2458c19c10344ee9a883a79b5cec4bc34d6656e8d546", size = 242363, upload-time = "2025-10-06T14:49:28.562Z" }, - { url = "https://files.pythonhosted.org/packages/87/15/9da40b9336a7c9fa606c4cf2ed80a649dffeb42b905d4f63a1d7eb17d746/multidict-6.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a35c5fc61d4f51eb045061e7967cfe3123d622cd500e8868e7c0c592a09fedc4", size = 268375, upload-time = "2025-10-06T14:49:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/82/72/c53fcade0cc94dfaad583105fd92b3a783af2091eddcb41a6d5a52474000/multidict-6.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29fe6740ebccba4175af1b9b87bf553e9c15cd5868ee967e010efcf94e4fd0f1", size = 269346, upload-time = "2025-10-06T14:49:31.404Z" }, - { url = "https://files.pythonhosted.org/packages/0d/e2/9baffdae21a76f77ef8447f1a05a96ec4bc0a24dae08767abc0a2fe680b8/multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:123e2a72e20537add2f33a79e605f6191fba2afda4cbb876e35c1a7074298a7d", size = 256107, upload-time = "2025-10-06T14:49:32.974Z" }, - { url = "https://files.pythonhosted.org/packages/3c/06/3f06f611087dc60d65ef775f1fb5aca7c6d61c6db4990e7cda0cef9b1651/multidict-6.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b284e319754366c1aee2267a2036248b24eeb17ecd5dc16022095e747f2f4304", size = 253592, upload-time = "2025-10-06T14:49:34.52Z" }, - { url = "https://files.pythonhosted.org/packages/20/24/54e804ec7945b6023b340c412ce9c3f81e91b3bf5fa5ce65558740141bee/multidict-6.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:803d685de7be4303b5a657b76e2f6d1240e7e0a8aa2968ad5811fa2285553a12", size = 251024, upload-time = "2025-10-06T14:49:35.956Z" }, - { url = "https://files.pythonhosted.org/packages/14/48/011cba467ea0b17ceb938315d219391d3e421dfd35928e5dbdc3f4ae76ef/multidict-6.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c04a328260dfd5db8c39538f999f02779012268f54614902d0afc775d44e0a62", size = 251484, upload-time = "2025-10-06T14:49:37.631Z" }, - { url = "https://files.pythonhosted.org/packages/0d/2f/919258b43bb35b99fa127435cfb2d91798eb3a943396631ef43e3720dcf4/multidict-6.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8a19cdb57cd3df4cd865849d93ee14920fb97224300c88501f16ecfa2604b4e0", size = 263579, upload-time = "2025-10-06T14:49:39.502Z" }, - { url = "https://files.pythonhosted.org/packages/31/22/a0e884d86b5242b5a74cf08e876bdf299e413016b66e55511f7a804a366e/multidict-6.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b2fd74c52accced7e75de26023b7dccee62511a600e62311b918ec5c168fc2a", size = 259654, upload-time = "2025-10-06T14:49:41.32Z" }, - { url = "https://files.pythonhosted.org/packages/b2/e5/17e10e1b5c5f5a40f2fcbb45953c9b215f8a4098003915e46a93f5fcaa8f/multidict-6.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e8bfdd0e487acf992407a140d2589fe598238eaeffa3da8448d63a63cd363f8", size = 251511, upload-time = "2025-10-06T14:49:46.021Z" }, - { url = "https://files.pythonhosted.org/packages/e3/9a/201bb1e17e7af53139597069c375e7b0dcbd47594604f65c2d5359508566/multidict-6.7.0-cp312-cp312-win32.whl", hash = "sha256:dd32a49400a2c3d52088e120ee00c1e3576cbff7e10b98467962c74fdb762ed4", size = 41895, upload-time = "2025-10-06T14:49:48.718Z" }, - { url = "https://files.pythonhosted.org/packages/46/e2/348cd32faad84eaf1d20cce80e2bb0ef8d312c55bca1f7fa9865e7770aaf/multidict-6.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:92abb658ef2d7ef22ac9f8bb88e8b6c3e571671534e029359b6d9e845923eb1b", size = 46073, upload-time = "2025-10-06T14:49:50.28Z" }, - { url = "https://files.pythonhosted.org/packages/25/ec/aad2613c1910dce907480e0c3aa306905830f25df2e54ccc9dea450cb5aa/multidict-6.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:490dab541a6a642ce1a9d61a4781656b346a55c13038f0b1244653828e3a83ec", size = 43226, upload-time = "2025-10-06T14:49:52.304Z" }, - { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, - { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, - { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, - { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, - { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, - { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, - { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, - { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, - { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, - { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, - { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, - { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, - { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, - { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, - { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, - { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, - { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, - { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, - { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, - { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, - { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, - { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, - { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, - { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, - { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, - { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, - { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, - { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, - { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, - { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, - { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, - { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, - { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, - { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, - { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, - { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, - { url = "https://files.pythonhosted.org/packages/e2/b1/3da6934455dd4b261d4c72f897e3a5728eba81db59959f3a639245891baa/multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842", size = 75128, upload-time = "2025-10-06T14:50:51.92Z" }, - { url = "https://files.pythonhosted.org/packages/14/2c/f069cab5b51d175a1a2cb4ccdf7a2c2dabd58aa5bd933fa036a8d15e2404/multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b", size = 44410, upload-time = "2025-10-06T14:50:53.275Z" }, - { url = "https://files.pythonhosted.org/packages/42/e2/64bb41266427af6642b6b128e8774ed84c11b80a90702c13ac0a86bb10cc/multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38", size = 43205, upload-time = "2025-10-06T14:50:54.911Z" }, - { url = "https://files.pythonhosted.org/packages/02/68/6b086fef8a3f1a8541b9236c594f0c9245617c29841f2e0395d979485cde/multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128", size = 245084, upload-time = "2025-10-06T14:50:56.369Z" }, - { url = "https://files.pythonhosted.org/packages/15/ee/f524093232007cd7a75c1d132df70f235cfd590a7c9eaccd7ff422ef4ae8/multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34", size = 252667, upload-time = "2025-10-06T14:50:57.991Z" }, - { url = "https://files.pythonhosted.org/packages/02/a5/eeb3f43ab45878f1895118c3ef157a480db58ede3f248e29b5354139c2c9/multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99", size = 233590, upload-time = "2025-10-06T14:50:59.589Z" }, - { url = "https://files.pythonhosted.org/packages/6a/1e/76d02f8270b97269d7e3dbd45644b1785bda457b474315f8cf999525a193/multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202", size = 264112, upload-time = "2025-10-06T14:51:01.183Z" }, - { url = "https://files.pythonhosted.org/packages/76/0b/c28a70ecb58963847c2a8efe334904cd254812b10e535aefb3bcce513918/multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1", size = 261194, upload-time = "2025-10-06T14:51:02.794Z" }, - { url = "https://files.pythonhosted.org/packages/b4/63/2ab26e4209773223159b83aa32721b4021ffb08102f8ac7d689c943fded1/multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3", size = 248510, upload-time = "2025-10-06T14:51:04.724Z" }, - { url = "https://files.pythonhosted.org/packages/93/cd/06c1fa8282af1d1c46fd55c10a7930af652afdce43999501d4d68664170c/multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d", size = 248395, upload-time = "2025-10-06T14:51:06.306Z" }, - { url = "https://files.pythonhosted.org/packages/99/ac/82cb419dd6b04ccf9e7e61befc00c77614fc8134362488b553402ecd55ce/multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6", size = 239520, upload-time = "2025-10-06T14:51:08.091Z" }, - { url = "https://files.pythonhosted.org/packages/fa/f3/a0f9bf09493421bd8716a362e0cd1d244f5a6550f5beffdd6b47e885b331/multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7", size = 245479, upload-time = "2025-10-06T14:51:10.365Z" }, - { url = "https://files.pythonhosted.org/packages/8d/01/476d38fc73a212843f43c852b0eee266b6971f0e28329c2184a8df90c376/multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb", size = 258903, upload-time = "2025-10-06T14:51:12.466Z" }, - { url = "https://files.pythonhosted.org/packages/49/6d/23faeb0868adba613b817d0e69c5f15531b24d462af8012c4f6de4fa8dc3/multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f", size = 252333, upload-time = "2025-10-06T14:51:14.48Z" }, - { url = "https://files.pythonhosted.org/packages/1e/cc/48d02ac22b30fa247f7dad82866e4b1015431092f4ba6ebc7e77596e0b18/multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f", size = 243411, upload-time = "2025-10-06T14:51:16.072Z" }, - { url = "https://files.pythonhosted.org/packages/4a/03/29a8bf5a18abf1fe34535c88adbdfa88c9fb869b5a3b120692c64abe8284/multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885", size = 40940, upload-time = "2025-10-06T14:51:17.544Z" }, - { url = "https://files.pythonhosted.org/packages/82/16/7ed27b680791b939de138f906d5cf2b4657b0d45ca6f5dd6236fdddafb1a/multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c", size = 45087, upload-time = "2025-10-06T14:51:18.875Z" }, - { url = "https://files.pythonhosted.org/packages/cd/3c/e3e62eb35a1950292fe39315d3c89941e30a9d07d5d2df42965ab041da43/multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000", size = 42368, upload-time = "2025-10-06T14:51:20.225Z" }, - { url = "https://files.pythonhosted.org/packages/8b/40/cd499bd0dbc5f1136726db3153042a735fffd0d77268e2ee20d5f33c010f/multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63", size = 82326, upload-time = "2025-10-06T14:51:21.588Z" }, - { url = "https://files.pythonhosted.org/packages/13/8a/18e031eca251c8df76daf0288e6790561806e439f5ce99a170b4af30676b/multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718", size = 48065, upload-time = "2025-10-06T14:51:22.93Z" }, - { url = "https://files.pythonhosted.org/packages/40/71/5e6701277470a87d234e433fb0a3a7deaf3bcd92566e421e7ae9776319de/multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2", size = 46475, upload-time = "2025-10-06T14:51:24.352Z" }, - { url = "https://files.pythonhosted.org/packages/fe/6a/bab00cbab6d9cfb57afe1663318f72ec28289ea03fd4e8236bb78429893a/multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e", size = 239324, upload-time = "2025-10-06T14:51:25.822Z" }, - { url = "https://files.pythonhosted.org/packages/2a/5f/8de95f629fc22a7769ade8b41028e3e5a822c1f8904f618d175945a81ad3/multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064", size = 246877, upload-time = "2025-10-06T14:51:27.604Z" }, - { url = "https://files.pythonhosted.org/packages/23/b4/38881a960458f25b89e9f4a4fdcb02ac101cfa710190db6e5528841e67de/multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e", size = 225824, upload-time = "2025-10-06T14:51:29.664Z" }, - { url = "https://files.pythonhosted.org/packages/1e/39/6566210c83f8a261575f18e7144736059f0c460b362e96e9cf797a24b8e7/multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd", size = 253558, upload-time = "2025-10-06T14:51:31.684Z" }, - { url = "https://files.pythonhosted.org/packages/00/a3/67f18315100f64c269f46e6c0319fa87ba68f0f64f2b8e7fd7c72b913a0b/multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a", size = 252339, upload-time = "2025-10-06T14:51:33.699Z" }, - { url = "https://files.pythonhosted.org/packages/c8/2a/1cb77266afee2458d82f50da41beba02159b1d6b1f7973afc9a1cad1499b/multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96", size = 244895, upload-time = "2025-10-06T14:51:36.189Z" }, - { url = "https://files.pythonhosted.org/packages/dd/72/09fa7dd487f119b2eb9524946ddd36e2067c08510576d43ff68469563b3b/multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e", size = 241862, upload-time = "2025-10-06T14:51:41.291Z" }, - { url = "https://files.pythonhosted.org/packages/65/92/bc1f8bd0853d8669300f732c801974dfc3702c3eeadae2f60cef54dc69d7/multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599", size = 232376, upload-time = "2025-10-06T14:51:43.55Z" }, - { url = "https://files.pythonhosted.org/packages/09/86/ac39399e5cb9d0c2ac8ef6e10a768e4d3bc933ac808d49c41f9dc23337eb/multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394", size = 240272, upload-time = "2025-10-06T14:51:45.265Z" }, - { url = "https://files.pythonhosted.org/packages/3d/b6/fed5ac6b8563ec72df6cb1ea8dac6d17f0a4a1f65045f66b6d3bf1497c02/multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38", size = 248774, upload-time = "2025-10-06T14:51:46.836Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8d/b954d8c0dc132b68f760aefd45870978deec6818897389dace00fcde32ff/multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9", size = 242731, upload-time = "2025-10-06T14:51:48.541Z" }, - { url = "https://files.pythonhosted.org/packages/16/9d/a2dac7009125d3540c2f54e194829ea18ac53716c61b655d8ed300120b0f/multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0", size = 240193, upload-time = "2025-10-06T14:51:50.355Z" }, - { url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" }, - { url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" }, - { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/1a/c2/c2d94cbe6ac1753f3fc980da97b3d930efe1da3af3c9f5125354436c073d/multidict-6.7.1.tar.gz", hash = "sha256:ec6652a1bee61c53a3e5776b6049172c53b6aaba34f18c9ad04f82712bac623d", size = 102010, upload-time = "2026-01-26T02:46:45.979Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/0b/19348d4c98980c4851d2f943f8ebafdece2ae7ef737adcfa5994ce8e5f10/multidict-6.7.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c93c3db7ea657dd4637d57e74ab73de31bccefe144d3d4ce370052035bc85fb5", size = 77176, upload-time = "2026-01-26T02:42:59.784Z" }, + { url = "https://files.pythonhosted.org/packages/ef/04/9de3f8077852e3d438215c81e9b691244532d2e05b4270e89ce67b7d103c/multidict-6.7.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:974e72a2474600827abaeda71af0c53d9ebbc3c2eb7da37b37d7829ae31232d8", size = 44996, upload-time = "2026-01-26T02:43:01.674Z" }, + { url = "https://files.pythonhosted.org/packages/31/5c/08c7f7fe311f32e83f7621cd3f99d805f45519cd06fafb247628b861da7d/multidict-6.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdea2e7b2456cfb6694fb113066fd0ec7ea4d67e3a35e1f4cbeea0b448bf5872", size = 44631, upload-time = "2026-01-26T02:43:03.169Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7f/0e3b1390ae772f27501199996b94b52ceeb64fe6f9120a32c6c3f6b781be/multidict-6.7.1-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:17207077e29342fdc2c9a82e4b306f1127bf1ea91f8b71e02d4798a70bb99991", size = 242561, upload-time = "2026-01-26T02:43:04.733Z" }, + { url = "https://files.pythonhosted.org/packages/dd/f4/8719f4f167586af317b69dd3e90f913416c91ca610cac79a45c53f590312/multidict-6.7.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4f49cb5661344764e4c7c7973e92a47a59b8fc19b6523649ec9dc4960e58a03", size = 242223, upload-time = "2026-01-26T02:43:06.695Z" }, + { url = "https://files.pythonhosted.org/packages/47/ab/7c36164cce64a6ad19c6d9a85377b7178ecf3b89f8fd589c73381a5eedfd/multidict-6.7.1-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a9fc4caa29e2e6ae408d1c450ac8bf19892c5fca83ee634ecd88a53332c59981", size = 222322, upload-time = "2026-01-26T02:43:08.472Z" }, + { url = "https://files.pythonhosted.org/packages/f5/79/a25add6fb38035b5337bc5734f296d9afc99163403bbcf56d4170f97eb62/multidict-6.7.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c5f0c21549ab432b57dcc82130f388d84ad8179824cc3f223d5e7cfbfd4143f6", size = 254005, upload-time = "2026-01-26T02:43:10.127Z" }, + { url = "https://files.pythonhosted.org/packages/4a/7b/64a87cf98e12f756fc8bd444b001232ffff2be37288f018ad0d3f0aae931/multidict-6.7.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7dfb78d966b2c906ae1d28ccf6e6712a3cd04407ee5088cd276fe8cb42186190", size = 251173, upload-time = "2026-01-26T02:43:11.731Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ac/b605473de2bb404e742f2cc3583d12aedb2352a70e49ae8fce455b50c5aa/multidict-6.7.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9b0d9b91d1aa44db9c1f1ecd0d9d2ae610b2f4f856448664e01a3b35899f3f92", size = 243273, upload-time = "2026-01-26T02:43:13.063Z" }, + { url = "https://files.pythonhosted.org/packages/03/65/11492d6a0e259783720f3bc1d9ea55579a76f1407e31ed44045c99542004/multidict-6.7.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:dd96c01a9dcd4889dcfcf9eb5544ca0c77603f239e3ffab0524ec17aea9a93ee", size = 238956, upload-time = "2026-01-26T02:43:14.843Z" }, + { url = "https://files.pythonhosted.org/packages/5f/a7/7ee591302af64e7c196fb63fe856c788993c1372df765102bd0448e7e165/multidict-6.7.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:067343c68cd6612d375710f895337b3a98a033c94f14b9a99eff902f205424e2", size = 233477, upload-time = "2026-01-26T02:43:16.025Z" }, + { url = "https://files.pythonhosted.org/packages/9c/99/c109962d58756c35fd9992fed7f2355303846ea2ff054bb5f5e9d6b888de/multidict-6.7.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5884a04f4ff56c6120f6ccf703bdeb8b5079d808ba604d4d53aec0d55dc33568", size = 243615, upload-time = "2026-01-26T02:43:17.84Z" }, + { url = "https://files.pythonhosted.org/packages/d5/5f/1973e7c771c86e93dcfe1c9cc55a5481b610f6614acfc28c0d326fe6bfad/multidict-6.7.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8affcf1c98b82bc901702eb73b6947a1bfa170823c153fe8a47b5f5f02e48e40", size = 249930, upload-time = "2026-01-26T02:43:19.06Z" }, + { url = "https://files.pythonhosted.org/packages/5d/a5/f170fc2268c3243853580203378cd522446b2df632061e0a5409817854c7/multidict-6.7.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0d17522c37d03e85c8098ec8431636309b2682cf12e58f4dbc76121fb50e4962", size = 243807, upload-time = "2026-01-26T02:43:20.286Z" }, + { url = "https://files.pythonhosted.org/packages/de/01/73856fab6d125e5bc652c3986b90e8699a95e84b48d72f39ade6c0e74a8c/multidict-6.7.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:24c0cf81544ca5e17cfcb6e482e7a82cd475925242b308b890c9452a074d4505", size = 239103, upload-time = "2026-01-26T02:43:21.508Z" }, + { url = "https://files.pythonhosted.org/packages/e7/46/f1220bd9944d8aa40d8ccff100eeeee19b505b857b6f603d6078cb5315b0/multidict-6.7.1-cp310-cp310-win32.whl", hash = "sha256:d82dd730a95e6643802f4454b8fdecdf08667881a9c5670db85bc5a56693f122", size = 41416, upload-time = "2026-01-26T02:43:22.703Z" }, + { url = "https://files.pythonhosted.org/packages/68/00/9b38e272a770303692fc406c36e1a4c740f401522d5787691eb38a8925a8/multidict-6.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:cf37cbe5ced48d417ba045aca1b21bafca67489452debcde94778a576666a1df", size = 46022, upload-time = "2026-01-26T02:43:23.77Z" }, + { url = "https://files.pythonhosted.org/packages/64/65/d8d42490c02ee07b6bbe00f7190d70bb4738b3cce7629aaf9f213ef730dd/multidict-6.7.1-cp310-cp310-win_arm64.whl", hash = "sha256:59bc83d3f66b41dac1e7460aac1d196edc70c9ba3094965c467715a70ecb46db", size = 43238, upload-time = "2026-01-26T02:43:24.882Z" }, + { url = "https://files.pythonhosted.org/packages/ce/f1/a90635c4f88fb913fbf4ce660b83b7445b7a02615bda034b2f8eb38fd597/multidict-6.7.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7ff981b266af91d7b4b3793ca3382e53229088d193a85dfad6f5f4c27fc73e5d", size = 76626, upload-time = "2026-01-26T02:43:26.485Z" }, + { url = "https://files.pythonhosted.org/packages/a6/9b/267e64eaf6fc637a15b35f5de31a566634a2740f97d8d094a69d34f524a4/multidict-6.7.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:844c5bca0b5444adb44a623fb0a1310c2f4cd41f402126bb269cd44c9b3f3e1e", size = 44706, upload-time = "2026-01-26T02:43:27.607Z" }, + { url = "https://files.pythonhosted.org/packages/dd/a4/d45caf2b97b035c57267791ecfaafbd59c68212004b3842830954bb4b02e/multidict-6.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f2a0a924d4c2e9afcd7ec64f9de35fcd96915149b2216e1cb2c10a56df483855", size = 44356, upload-time = "2026-01-26T02:43:28.661Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d2/0a36c8473f0cbaeadd5db6c8b72d15bbceeec275807772bfcd059bef487d/multidict-6.7.1-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:8be1802715a8e892c784c0197c2ace276ea52702a0ede98b6310c8f255a5afb3", size = 244355, upload-time = "2026-01-26T02:43:31.165Z" }, + { url = "https://files.pythonhosted.org/packages/5d/16/8c65be997fd7dd311b7d39c7b6e71a0cb449bad093761481eccbbe4b42a2/multidict-6.7.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e2d2ed645ea29f31c4c7ea1552fcfd7cb7ba656e1eafd4134a6620c9f5fdd9e", size = 246433, upload-time = "2026-01-26T02:43:32.581Z" }, + { url = "https://files.pythonhosted.org/packages/01/fb/4dbd7e848d2799c6a026ec88ad39cf2b8416aa167fcc903baa55ecaa045c/multidict-6.7.1-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:95922cee9a778659e91db6497596435777bd25ed116701a4c034f8e46544955a", size = 225376, upload-time = "2026-01-26T02:43:34.417Z" }, + { url = "https://files.pythonhosted.org/packages/b6/8a/4a3a6341eac3830f6053062f8fbc9a9e54407c80755b3f05bc427295c2d0/multidict-6.7.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6b83cabdc375ffaaa15edd97eb7c0c672ad788e2687004990074d7d6c9b140c8", size = 257365, upload-time = "2026-01-26T02:43:35.741Z" }, + { url = "https://files.pythonhosted.org/packages/f7/a2/dd575a69c1aa206e12d27d0770cdf9b92434b48a9ef0cd0d1afdecaa93c4/multidict-6.7.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:38fb49540705369bab8484db0689d86c0a33a0a9f2c1b197f506b71b4b6c19b0", size = 254747, upload-time = "2026-01-26T02:43:36.976Z" }, + { url = "https://files.pythonhosted.org/packages/5a/56/21b27c560c13822ed93133f08aa6372c53a8e067f11fbed37b4adcdac922/multidict-6.7.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:439cbebd499f92e9aa6793016a8acaa161dfa749ae86d20960189f5398a19144", size = 246293, upload-time = "2026-01-26T02:43:38.258Z" }, + { url = "https://files.pythonhosted.org/packages/5a/a4/23466059dc3854763423d0ad6c0f3683a379d97673b1b89ec33826e46728/multidict-6.7.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6d3bc717b6fe763b8be3f2bee2701d3c8eb1b2a8ae9f60910f1b2860c82b6c49", size = 242962, upload-time = "2026-01-26T02:43:40.034Z" }, + { url = "https://files.pythonhosted.org/packages/1f/67/51dd754a3524d685958001e8fa20a0f5f90a6a856e0a9dcabff69be3dbb7/multidict-6.7.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:619e5a1ac57986dbfec9f0b301d865dddf763696435e2962f6d9cf2fdff2bb71", size = 237360, upload-time = "2026-01-26T02:43:41.752Z" }, + { url = "https://files.pythonhosted.org/packages/64/3f/036dfc8c174934d4b55d86ff4f978e558b0e585cef70cfc1ad01adc6bf18/multidict-6.7.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0b38ebffd9be37c1170d33bc0f36f4f262e0a09bc1aac1c34c7aa51a7293f0b3", size = 245940, upload-time = "2026-01-26T02:43:43.042Z" }, + { url = "https://files.pythonhosted.org/packages/3d/20/6214d3c105928ebc353a1c644a6ef1408bc5794fcb4f170bb524a3c16311/multidict-6.7.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:10ae39c9cfe6adedcdb764f5e8411d4a92b055e35573a2eaa88d3323289ef93c", size = 253502, upload-time = "2026-01-26T02:43:44.371Z" }, + { url = "https://files.pythonhosted.org/packages/b1/e2/c653bc4ae1be70a0f836b82172d643fcf1dade042ba2676ab08ec08bff0f/multidict-6.7.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:25167cc263257660290fba06b9318d2026e3c910be240a146e1f66dd114af2b0", size = 247065, upload-time = "2026-01-26T02:43:45.745Z" }, + { url = "https://files.pythonhosted.org/packages/c8/11/a854b4154cd3bd8b1fd375e8a8ca9d73be37610c361543d56f764109509b/multidict-6.7.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:128441d052254f42989ef98b7b6a6ecb1e6f708aa962c7984235316db59f50fa", size = 241870, upload-time = "2026-01-26T02:43:47.054Z" }, + { url = "https://files.pythonhosted.org/packages/13/bf/9676c0392309b5fdae322333d22a829715b570edb9baa8016a517b55b558/multidict-6.7.1-cp311-cp311-win32.whl", hash = "sha256:d62b7f64ffde3b99d06b707a280db04fb3855b55f5a06df387236051d0668f4a", size = 41302, upload-time = "2026-01-26T02:43:48.753Z" }, + { url = "https://files.pythonhosted.org/packages/c9/68/f16a3a8ba6f7b6dc92a1f19669c0810bd2c43fc5a02da13b1cbf8e253845/multidict-6.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:bdbf9f3b332abd0cdb306e7c2113818ab1e922dc84b8f8fd06ec89ed2a19ab8b", size = 45981, upload-time = "2026-01-26T02:43:49.921Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ad/9dd5305253fa00cd3c7555dbef69d5bf4133debc53b87ab8d6a44d411665/multidict-6.7.1-cp311-cp311-win_arm64.whl", hash = "sha256:b8c990b037d2fff2f4e33d3f21b9b531c5745b33a49a7d6dbe7a177266af44f6", size = 43159, upload-time = "2026-01-26T02:43:51.635Z" }, + { url = "https://files.pythonhosted.org/packages/8d/9c/f20e0e2cf80e4b2e4b1c365bf5fe104ee633c751a724246262db8f1a0b13/multidict-6.7.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a90f75c956e32891a4eda3639ce6dd86e87105271f43d43442a3aedf3cddf172", size = 76893, upload-time = "2026-01-26T02:43:52.754Z" }, + { url = "https://files.pythonhosted.org/packages/fe/cf/18ef143a81610136d3da8193da9d80bfe1cb548a1e2d1c775f26b23d024a/multidict-6.7.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fccb473e87eaa1382689053e4a4618e7ba7b9b9b8d6adf2027ee474597128cd", size = 45456, upload-time = "2026-01-26T02:43:53.893Z" }, + { url = "https://files.pythonhosted.org/packages/a9/65/1caac9d4cd32e8433908683446eebc953e82d22b03d10d41a5f0fefe991b/multidict-6.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0fa96985700739c4c7853a43c0b3e169360d6855780021bfc6d0f1ce7c123e7", size = 43872, upload-time = "2026-01-26T02:43:55.041Z" }, + { url = "https://files.pythonhosted.org/packages/cf/3b/d6bd75dc4f3ff7c73766e04e705b00ed6dbbaccf670d9e05a12b006f5a21/multidict-6.7.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cb2a55f408c3043e42b40cc8eecd575afa27b7e0b956dfb190de0f8499a57a53", size = 251018, upload-time = "2026-01-26T02:43:56.198Z" }, + { url = "https://files.pythonhosted.org/packages/fd/80/c959c5933adedb9ac15152e4067c702a808ea183a8b64cf8f31af8ad3155/multidict-6.7.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb0ce7b2a32d09892b3dd6cc44877a0d02a33241fafca5f25c8b6b62374f8b75", size = 258883, upload-time = "2026-01-26T02:43:57.499Z" }, + { url = "https://files.pythonhosted.org/packages/86/85/7ed40adafea3d4f1c8b916e3b5cc3a8e07dfcdcb9cd72800f4ed3ca1b387/multidict-6.7.1-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c3a32d23520ee37bf327d1e1a656fec76a2edd5c038bf43eddfa0572ec49c60b", size = 242413, upload-time = "2026-01-26T02:43:58.755Z" }, + { url = "https://files.pythonhosted.org/packages/d2/57/b8565ff533e48595503c785f8361ff9a4fde4d67de25c207cd0ba3befd03/multidict-6.7.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9c90fed18bffc0189ba814749fdcc102b536e83a9f738a9003e569acd540a733", size = 268404, upload-time = "2026-01-26T02:44:00.216Z" }, + { url = "https://files.pythonhosted.org/packages/e0/50/9810c5c29350f7258180dfdcb2e52783a0632862eb334c4896ac717cebcb/multidict-6.7.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:da62917e6076f512daccfbbde27f46fed1c98fee202f0559adec8ee0de67f71a", size = 269456, upload-time = "2026-01-26T02:44:02.202Z" }, + { url = "https://files.pythonhosted.org/packages/f3/8d/5e5be3ced1d12966fefb5c4ea3b2a5b480afcea36406559442c6e31d4a48/multidict-6.7.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bfde23ef6ed9db7eaee6c37dcec08524cb43903c60b285b172b6c094711b3961", size = 256322, upload-time = "2026-01-26T02:44:03.56Z" }, + { url = "https://files.pythonhosted.org/packages/31/6e/d8a26d81ac166a5592782d208dd90dfdc0a7a218adaa52b45a672b46c122/multidict-6.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3758692429e4e32f1ba0df23219cd0b4fc0a52f476726fff9337d1a57676a582", size = 253955, upload-time = "2026-01-26T02:44:04.845Z" }, + { url = "https://files.pythonhosted.org/packages/59/4c/7c672c8aad41534ba619bcd4ade7a0dc87ed6b8b5c06149b85d3dd03f0cd/multidict-6.7.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:398c1478926eca669f2fd6a5856b6de9c0acf23a2cb59a14c0ba5844fa38077e", size = 251254, upload-time = "2026-01-26T02:44:06.133Z" }, + { url = "https://files.pythonhosted.org/packages/7b/bd/84c24de512cbafbdbc39439f74e967f19570ce7924e3007174a29c348916/multidict-6.7.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c102791b1c4f3ab36ce4101154549105a53dc828f016356b3e3bcae2e3a039d3", size = 252059, upload-time = "2026-01-26T02:44:07.518Z" }, + { url = "https://files.pythonhosted.org/packages/fa/ba/f5449385510825b73d01c2d4087bf6d2fccc20a2d42ac34df93191d3dd03/multidict-6.7.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a088b62bd733e2ad12c50dad01b7d0166c30287c166e137433d3b410add807a6", size = 263588, upload-time = "2026-01-26T02:44:09.382Z" }, + { url = "https://files.pythonhosted.org/packages/d7/11/afc7c677f68f75c84a69fe37184f0f82fce13ce4b92f49f3db280b7e92b3/multidict-6.7.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3d51ff4785d58d3f6c91bdbffcb5e1f7ddfda557727043aa20d20ec4f65e324a", size = 259642, upload-time = "2026-01-26T02:44:10.73Z" }, + { url = "https://files.pythonhosted.org/packages/2b/17/ebb9644da78c4ab36403739e0e6e0e30ebb135b9caf3440825001a0bddcb/multidict-6.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc5907494fccf3e7d3f94f95c91d6336b092b5fc83811720fae5e2765890dfba", size = 251377, upload-time = "2026-01-26T02:44:12.042Z" }, + { url = "https://files.pythonhosted.org/packages/ca/a4/840f5b97339e27846c46307f2530a2805d9d537d8b8bd416af031cad7fa0/multidict-6.7.1-cp312-cp312-win32.whl", hash = "sha256:28ca5ce2fd9716631133d0e9a9b9a745ad7f60bac2bccafb56aa380fc0b6c511", size = 41887, upload-time = "2026-01-26T02:44:14.245Z" }, + { url = "https://files.pythonhosted.org/packages/80/31/0b2517913687895f5904325c2069d6a3b78f66cc641a86a2baf75a05dcbb/multidict-6.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcee94dfbd638784645b066074b338bc9cc155d4b4bffa4adce1615c5a426c19", size = 46053, upload-time = "2026-01-26T02:44:15.371Z" }, + { url = "https://files.pythonhosted.org/packages/0c/5b/aba28e4ee4006ae4c7df8d327d31025d760ffa992ea23812a601d226e682/multidict-6.7.1-cp312-cp312-win_arm64.whl", hash = "sha256:ba0a9fb644d0c1a2194cf7ffb043bd852cea63a57f66fbd33959f7dae18517bf", size = 43307, upload-time = "2026-01-26T02:44:16.852Z" }, + { url = "https://files.pythonhosted.org/packages/f2/22/929c141d6c0dba87d3e1d38fbdf1ba8baba86b7776469f2bc2d3227a1e67/multidict-6.7.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2b41f5fed0ed563624f1c17630cb9941cf2309d4df00e494b551b5f3e3d67a23", size = 76174, upload-time = "2026-01-26T02:44:18.509Z" }, + { url = "https://files.pythonhosted.org/packages/c7/75/bc704ae15fee974f8fccd871305e254754167dce5f9e42d88a2def741a1d/multidict-6.7.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84e61e3af5463c19b67ced91f6c634effb89ef8bfc5ca0267f954451ed4bb6a2", size = 45116, upload-time = "2026-01-26T02:44:19.745Z" }, + { url = "https://files.pythonhosted.org/packages/79/76/55cd7186f498ed080a18440c9013011eb548f77ae1b297206d030eb1180a/multidict-6.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:935434b9853c7c112eee7ac891bc4cb86455aa631269ae35442cb316790c1445", size = 43524, upload-time = "2026-01-26T02:44:21.571Z" }, + { url = "https://files.pythonhosted.org/packages/e9/3c/414842ef8d5a1628d68edee29ba0e5bcf235dbfb3ccd3ea303a7fe8c72ff/multidict-6.7.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:432feb25a1cb67fe82a9680b4d65fb542e4635cb3166cd9c01560651ad60f177", size = 249368, upload-time = "2026-01-26T02:44:22.803Z" }, + { url = "https://files.pythonhosted.org/packages/f6/32/befed7f74c458b4a525e60519fe8d87eef72bb1e99924fa2b0f9d97a221e/multidict-6.7.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e82d14e3c948952a1a85503817e038cba5905a3352de76b9a465075d072fba23", size = 256952, upload-time = "2026-01-26T02:44:24.306Z" }, + { url = "https://files.pythonhosted.org/packages/03/d6/c878a44ba877f366630c860fdf74bfb203c33778f12b6ac274936853c451/multidict-6.7.1-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4cfb48c6ea66c83bcaaf7e4dfa7ec1b6bbcf751b7db85a328902796dfde4c060", size = 240317, upload-time = "2026-01-26T02:44:25.772Z" }, + { url = "https://files.pythonhosted.org/packages/68/49/57421b4d7ad2e9e60e25922b08ceb37e077b90444bde6ead629095327a6f/multidict-6.7.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1d540e51b7e8e170174555edecddbd5538105443754539193e3e1061864d444d", size = 267132, upload-time = "2026-01-26T02:44:27.648Z" }, + { url = "https://files.pythonhosted.org/packages/b7/fe/ec0edd52ddbcea2a2e89e174f0206444a61440b40f39704e64dc807a70bd/multidict-6.7.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:273d23f4b40f3dce4d6c8a821c741a86dec62cded82e1175ba3d99be128147ed", size = 268140, upload-time = "2026-01-26T02:44:29.588Z" }, + { url = "https://files.pythonhosted.org/packages/b0/73/6e1b01cbeb458807aa0831742232dbdd1fa92bfa33f52a3f176b4ff3dc11/multidict-6.7.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d624335fd4fa1c08a53f8b4be7676ebde19cd092b3895c421045ca87895b429", size = 254277, upload-time = "2026-01-26T02:44:30.902Z" }, + { url = "https://files.pythonhosted.org/packages/6a/b2/5fb8c124d7561a4974c342bc8c778b471ebbeb3cc17df696f034a7e9afe7/multidict-6.7.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:12fad252f8b267cc75b66e8fc51b3079604e8d43a75428ffe193cd9e2195dfd6", size = 252291, upload-time = "2026-01-26T02:44:32.31Z" }, + { url = "https://files.pythonhosted.org/packages/5a/96/51d4e4e06bcce92577fcd488e22600bd38e4fd59c20cb49434d054903bd2/multidict-6.7.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:03ede2a6ffbe8ef936b92cb4529f27f42be7f56afcdab5ab739cd5f27fb1cbf9", size = 250156, upload-time = "2026-01-26T02:44:33.734Z" }, + { url = "https://files.pythonhosted.org/packages/db/6b/420e173eec5fba721a50e2a9f89eda89d9c98fded1124f8d5c675f7a0c0f/multidict-6.7.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:90efbcf47dbe33dcf643a1e400d67d59abeac5db07dc3f27d6bdeae497a2198c", size = 249742, upload-time = "2026-01-26T02:44:35.222Z" }, + { url = "https://files.pythonhosted.org/packages/44/a3/ec5b5bd98f306bc2aa297b8c6f11a46714a56b1e6ef5ebda50a4f5d7c5fb/multidict-6.7.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:5c4b9bfc148f5a91be9244d6264c53035c8a0dcd2f51f1c3c6e30e30ebaa1c84", size = 262221, upload-time = "2026-01-26T02:44:36.604Z" }, + { url = "https://files.pythonhosted.org/packages/cd/f7/e8c0d0da0cd1e28d10e624604e1a36bcc3353aaebdfdc3a43c72bc683a12/multidict-6.7.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:401c5a650f3add2472d1d288c26deebc540f99e2fb83e9525007a74cd2116f1d", size = 258664, upload-time = "2026-01-26T02:44:38.008Z" }, + { url = "https://files.pythonhosted.org/packages/52/da/151a44e8016dd33feed44f730bd856a66257c1ee7aed4f44b649fb7edeb3/multidict-6.7.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:97891f3b1b3ffbded884e2916cacf3c6fc87b66bb0dde46f7357404750559f33", size = 249490, upload-time = "2026-01-26T02:44:39.386Z" }, + { url = "https://files.pythonhosted.org/packages/87/af/a3b86bf9630b732897f6fc3f4c4714b90aa4361983ccbdcd6c0339b21b0c/multidict-6.7.1-cp313-cp313-win32.whl", hash = "sha256:e1c5988359516095535c4301af38d8a8838534158f649c05dd1050222321bcb3", size = 41695, upload-time = "2026-01-26T02:44:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/b2/35/e994121b0e90e46134673422dd564623f93304614f5d11886b1b3e06f503/multidict-6.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:960c83bf01a95b12b08fd54324a4eb1d5b52c88932b5cba5d6e712bb3ed12eb5", size = 45884, upload-time = "2026-01-26T02:44:42.488Z" }, + { url = "https://files.pythonhosted.org/packages/ca/61/42d3e5dbf661242a69c97ea363f2d7b46c567da8eadef8890022be6e2ab0/multidict-6.7.1-cp313-cp313-win_arm64.whl", hash = "sha256:563fe25c678aaba333d5399408f5ec3c383ca5b663e7f774dd179a520b8144df", size = 43122, upload-time = "2026-01-26T02:44:43.664Z" }, + { url = "https://files.pythonhosted.org/packages/6d/b3/e6b21c6c4f314bb956016b0b3ef2162590a529b84cb831c257519e7fde44/multidict-6.7.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:c76c4bec1538375dad9d452d246ca5368ad6e1c9039dadcf007ae59c70619ea1", size = 83175, upload-time = "2026-01-26T02:44:44.894Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/23ecd2abfe0957b234f6c960f4ade497f55f2c16aeb684d4ecdbf1c95791/multidict-6.7.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:57b46b24b5d5ebcc978da4ec23a819a9402b4228b8a90d9c656422b4bdd8a963", size = 48460, upload-time = "2026-01-26T02:44:46.106Z" }, + { url = "https://files.pythonhosted.org/packages/c4/57/a0ed92b23f3a042c36bc4227b72b97eca803f5f1801c1ab77c8a212d455e/multidict-6.7.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e954b24433c768ce78ab7929e84ccf3422e46deb45a4dc9f93438f8217fa2d34", size = 46930, upload-time = "2026-01-26T02:44:47.278Z" }, + { url = "https://files.pythonhosted.org/packages/b5/66/02ec7ace29162e447f6382c495dc95826bf931d3818799bbef11e8f7df1a/multidict-6.7.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3bd231490fa7217cc832528e1cd8752a96f0125ddd2b5749390f7c3ec8721b65", size = 242582, upload-time = "2026-01-26T02:44:48.604Z" }, + { url = "https://files.pythonhosted.org/packages/58/18/64f5a795e7677670e872673aca234162514696274597b3708b2c0d276cce/multidict-6.7.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:253282d70d67885a15c8a7716f3a73edf2d635793ceda8173b9ecc21f2fb8292", size = 250031, upload-time = "2026-01-26T02:44:50.544Z" }, + { url = "https://files.pythonhosted.org/packages/c8/ed/e192291dbbe51a8290c5686f482084d31bcd9d09af24f63358c3d42fd284/multidict-6.7.1-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b4c48648d7649c9335cf1927a8b87fa692de3dcb15faa676c6a6f1f1aabda43", size = 228596, upload-time = "2026-01-26T02:44:51.951Z" }, + { url = "https://files.pythonhosted.org/packages/1e/7e/3562a15a60cf747397e7f2180b0a11dc0c38d9175a650e75fa1b4d325e15/multidict-6.7.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:98bc624954ec4d2c7cb074b8eefc2b5d0ce7d482e410df446414355d158fe4ca", size = 257492, upload-time = "2026-01-26T02:44:53.902Z" }, + { url = "https://files.pythonhosted.org/packages/24/02/7d0f9eae92b5249bb50ac1595b295f10e263dd0078ebb55115c31e0eaccd/multidict-6.7.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1b99af4d9eec0b49927b4402bcbb58dea89d3e0db8806a4086117019939ad3dd", size = 255899, upload-time = "2026-01-26T02:44:55.316Z" }, + { url = "https://files.pythonhosted.org/packages/00/e3/9b60ed9e23e64c73a5cde95269ef1330678e9c6e34dd4eb6b431b85b5a10/multidict-6.7.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6aac4f16b472d5b7dc6f66a0d49dd57b0e0902090be16594dc9ebfd3d17c47e7", size = 247970, upload-time = "2026-01-26T02:44:56.783Z" }, + { url = "https://files.pythonhosted.org/packages/3e/06/538e58a63ed5cfb0bd4517e346b91da32fde409d839720f664e9a4ae4f9d/multidict-6.7.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:21f830fe223215dffd51f538e78c172ed7c7f60c9b96a2bf05c4848ad49921c3", size = 245060, upload-time = "2026-01-26T02:44:58.195Z" }, + { url = "https://files.pythonhosted.org/packages/b2/2f/d743a3045a97c895d401e9bd29aaa09b94f5cbdf1bd561609e5a6c431c70/multidict-6.7.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f5dd81c45b05518b9aa4da4aa74e1c93d715efa234fd3e8a179df611cc85e5f4", size = 235888, upload-time = "2026-01-26T02:44:59.57Z" }, + { url = "https://files.pythonhosted.org/packages/38/83/5a325cac191ab28b63c52f14f1131f3b0a55ba3b9aa65a6d0bf2a9b921a0/multidict-6.7.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:eb304767bca2bb92fb9c5bd33cedc95baee5bb5f6c88e63706533a1c06ad08c8", size = 243554, upload-time = "2026-01-26T02:45:01.054Z" }, + { url = "https://files.pythonhosted.org/packages/20/1f/9d2327086bd15da2725ef6aae624208e2ef828ed99892b17f60c344e57ed/multidict-6.7.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c9035dde0f916702850ef66460bc4239d89d08df4d02023a5926e7446724212c", size = 252341, upload-time = "2026-01-26T02:45:02.484Z" }, + { url = "https://files.pythonhosted.org/packages/e8/2c/2a1aa0280cf579d0f6eed8ee5211c4f1730bd7e06c636ba2ee6aafda302e/multidict-6.7.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:af959b9beeb66c822380f222f0e0a1889331597e81f1ded7f374f3ecb0fd6c52", size = 246391, upload-time = "2026-01-26T02:45:03.862Z" }, + { url = "https://files.pythonhosted.org/packages/e5/03/7ca022ffc36c5a3f6e03b179a5ceb829be9da5783e6fe395f347c0794680/multidict-6.7.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:41f2952231456154ee479651491e94118229844dd7226541788be783be2b5108", size = 243422, upload-time = "2026-01-26T02:45:05.296Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1d/b31650eab6c5778aceed46ba735bd97f7c7d2f54b319fa916c0f96e7805b/multidict-6.7.1-cp313-cp313t-win32.whl", hash = "sha256:df9f19c28adcb40b6aae30bbaa1478c389efd50c28d541d76760199fc1037c32", size = 47770, upload-time = "2026-01-26T02:45:06.754Z" }, + { url = "https://files.pythonhosted.org/packages/ac/5b/2d2d1d522e51285bd61b1e20df8f47ae1a9d80839db0b24ea783b3832832/multidict-6.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d54ecf9f301853f2c5e802da559604b3e95bb7a3b01a9c295c6ee591b9882de8", size = 53109, upload-time = "2026-01-26T02:45:08.044Z" }, + { url = "https://files.pythonhosted.org/packages/3d/a3/cc409ba012c83ca024a308516703cf339bdc4b696195644a7215a5164a24/multidict-6.7.1-cp313-cp313t-win_arm64.whl", hash = "sha256:5a37ca18e360377cfda1d62f5f382ff41f2b8c4ccb329ed974cc2e1643440118", size = 45573, upload-time = "2026-01-26T02:45:09.349Z" }, + { url = "https://files.pythonhosted.org/packages/91/cc/db74228a8be41884a567e88a62fd589a913708fcf180d029898c17a9a371/multidict-6.7.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8f333ec9c5eb1b7105e3b84b53141e66ca05a19a605368c55450b6ba208cb9ee", size = 75190, upload-time = "2026-01-26T02:45:10.651Z" }, + { url = "https://files.pythonhosted.org/packages/d5/22/492f2246bb5b534abd44804292e81eeaf835388901f0c574bac4eeec73c5/multidict-6.7.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a407f13c188f804c759fc6a9f88286a565c242a76b27626594c133b82883b5c2", size = 44486, upload-time = "2026-01-26T02:45:11.938Z" }, + { url = "https://files.pythonhosted.org/packages/f1/4f/733c48f270565d78b4544f2baddc2fb2a245e5a8640254b12c36ac7ac68e/multidict-6.7.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0e161ddf326db5577c3a4cc2d8648f81456e8a20d40415541587a71620d7a7d1", size = 43219, upload-time = "2026-01-26T02:45:14.346Z" }, + { url = "https://files.pythonhosted.org/packages/24/bb/2c0c2287963f4259c85e8bcbba9182ced8d7fca65c780c38e99e61629d11/multidict-6.7.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1e3a8bb24342a8201d178c3b4984c26ba81a577c80d4d525727427460a50c22d", size = 245132, upload-time = "2026-01-26T02:45:15.712Z" }, + { url = "https://files.pythonhosted.org/packages/a7/f9/44d4b3064c65079d2467888794dea218d1601898ac50222ab8a9a8094460/multidict-6.7.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97231140a50f5d447d3164f994b86a0bed7cd016e2682f8650d6a9158e14fd31", size = 252420, upload-time = "2026-01-26T02:45:17.293Z" }, + { url = "https://files.pythonhosted.org/packages/8b/13/78f7275e73fa17b24c9a51b0bd9d73ba64bb32d0ed51b02a746eb876abe7/multidict-6.7.1-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6b10359683bd8806a200fd2909e7c8ca3a7b24ec1d8132e483d58e791d881048", size = 233510, upload-time = "2026-01-26T02:45:19.356Z" }, + { url = "https://files.pythonhosted.org/packages/4b/25/8167187f62ae3cbd52da7893f58cb036b47ea3fb67138787c76800158982/multidict-6.7.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:283ddac99f7ac25a4acadbf004cb5ae34480bbeb063520f70ce397b281859362", size = 264094, upload-time = "2026-01-26T02:45:20.834Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e7/69a3a83b7b030cf283fb06ce074a05a02322359783424d7edf0f15fe5022/multidict-6.7.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:538cec1e18c067d0e6103aa9a74f9e832904c957adc260e61cd9d8cf0c3b3d37", size = 260786, upload-time = "2026-01-26T02:45:22.818Z" }, + { url = "https://files.pythonhosted.org/packages/fe/3b/8ec5074bcfc450fe84273713b4b0a0dd47c0249358f5d82eb8104ffe2520/multidict-6.7.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eee46ccb30ff48a1e35bb818cc90846c6be2b68240e42a78599166722cea709", size = 248483, upload-time = "2026-01-26T02:45:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/48/5a/d5a99e3acbca0e29c5d9cba8f92ceb15dce78bab963b308ae692981e3a5d/multidict-6.7.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa263a02f4f2dd2d11a7b1bb4362aa7cb1049f84a9235d31adf63f30143469a0", size = 248403, upload-time = "2026-01-26T02:45:25.982Z" }, + { url = "https://files.pythonhosted.org/packages/35/48/e58cd31f6c7d5102f2a4bf89f96b9cf7e00b6c6f3d04ecc44417c00a5a3c/multidict-6.7.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:2e1425e2f99ec5bd36c15a01b690a1a2456209c5deed58f95469ffb46039ccbb", size = 240315, upload-time = "2026-01-26T02:45:27.487Z" }, + { url = "https://files.pythonhosted.org/packages/94/33/1cd210229559cb90b6786c30676bb0c58249ff42f942765f88793b41fdce/multidict-6.7.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:497394b3239fc6f0e13a78a3e1b61296e72bf1c5f94b4c4eb80b265c37a131cd", size = 245528, upload-time = "2026-01-26T02:45:28.991Z" }, + { url = "https://files.pythonhosted.org/packages/64/f2/6e1107d226278c876c783056b7db43d800bb64c6131cec9c8dfb6903698e/multidict-6.7.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:233b398c29d3f1b9676b4b6f75c518a06fcb2ea0b925119fb2c1bc35c05e1601", size = 258784, upload-time = "2026-01-26T02:45:30.503Z" }, + { url = "https://files.pythonhosted.org/packages/4d/c1/11f664f14d525e4a1b5327a82d4de61a1db604ab34c6603bb3c2cc63ad34/multidict-6.7.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:93b1818e4a6e0930454f0f2af7dfce69307ca03cdcfb3739bf4d91241967b6c1", size = 251980, upload-time = "2026-01-26T02:45:32.603Z" }, + { url = "https://files.pythonhosted.org/packages/e1/9f/75a9ac888121d0c5bbd4ecf4eead45668b1766f6baabfb3b7f66a410e231/multidict-6.7.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f33dc2a3abe9249ea5d8360f969ec7f4142e7ac45ee7014d8f8d5acddf178b7b", size = 243602, upload-time = "2026-01-26T02:45:34.043Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e7/50bf7b004cc8525d80dbbbedfdc7aed3e4c323810890be4413e589074032/multidict-6.7.1-cp314-cp314-win32.whl", hash = "sha256:3ab8b9d8b75aef9df299595d5388b14530839f6422333357af1339443cff777d", size = 40930, upload-time = "2026-01-26T02:45:36.278Z" }, + { url = "https://files.pythonhosted.org/packages/e0/bf/52f25716bbe93745595800f36fb17b73711f14da59ed0bb2eba141bc9f0f/multidict-6.7.1-cp314-cp314-win_amd64.whl", hash = "sha256:5e01429a929600e7dab7b166062d9bb54a5eed752384c7384c968c2afab8f50f", size = 45074, upload-time = "2026-01-26T02:45:37.546Z" }, + { url = "https://files.pythonhosted.org/packages/97/ab/22803b03285fa3a525f48217963da3a65ae40f6a1b6f6cf2768879e208f9/multidict-6.7.1-cp314-cp314-win_arm64.whl", hash = "sha256:4885cb0e817aef5d00a2e8451d4665c1808378dc27c2705f1bf4ef8505c0d2e5", size = 42471, upload-time = "2026-01-26T02:45:38.889Z" }, + { url = "https://files.pythonhosted.org/packages/e0/6d/f9293baa6146ba9507e360ea0292b6422b016907c393e2f63fc40ab7b7b5/multidict-6.7.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:0458c978acd8e6ea53c81eefaddbbee9c6c5e591f41b3f5e8e194780fe026581", size = 82401, upload-time = "2026-01-26T02:45:40.254Z" }, + { url = "https://files.pythonhosted.org/packages/7a/68/53b5494738d83558d87c3c71a486504d8373421c3e0dbb6d0db48ad42ee0/multidict-6.7.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:c0abd12629b0af3cf590982c0b413b1e7395cd4ec026f30986818ab95bfaa94a", size = 48143, upload-time = "2026-01-26T02:45:41.635Z" }, + { url = "https://files.pythonhosted.org/packages/37/e8/5284c53310dcdc99ce5d66563f6e5773531a9b9fe9ec7a615e9bc306b05f/multidict-6.7.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:14525a5f61d7d0c94b368a42cff4c9a4e7ba2d52e2672a7b23d84dc86fb02b0c", size = 46507, upload-time = "2026-01-26T02:45:42.99Z" }, + { url = "https://files.pythonhosted.org/packages/e4/fc/6800d0e5b3875568b4083ecf5f310dcf91d86d52573160834fb4bfcf5e4f/multidict-6.7.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:17307b22c217b4cf05033dabefe68255a534d637c6c9b0cc8382718f87be4262", size = 239358, upload-time = "2026-01-26T02:45:44.376Z" }, + { url = "https://files.pythonhosted.org/packages/41/75/4ad0973179361cdf3a113905e6e088173198349131be2b390f9fa4da5fc6/multidict-6.7.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a7e590ff876a3eaf1c02a4dfe0724b6e69a9e9de6d8f556816f29c496046e59", size = 246884, upload-time = "2026-01-26T02:45:47.167Z" }, + { url = "https://files.pythonhosted.org/packages/c3/9c/095bb28b5da139bd41fb9a5d5caff412584f377914bd8787c2aa98717130/multidict-6.7.1-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5fa6a95dfee63893d80a34758cd0e0c118a30b8dcb46372bf75106c591b77889", size = 225878, upload-time = "2026-01-26T02:45:48.698Z" }, + { url = "https://files.pythonhosted.org/packages/07/d0/c0a72000243756e8f5a277b6b514fa005f2c73d481b7d9e47cd4568aa2e4/multidict-6.7.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a0543217a6a017692aa6ae5cc39adb75e587af0f3a82288b1492eb73dd6cc2a4", size = 253542, upload-time = "2026-01-26T02:45:50.164Z" }, + { url = "https://files.pythonhosted.org/packages/c0/6b/f69da15289e384ecf2a68837ec8b5ad8c33e973aa18b266f50fe55f24b8c/multidict-6.7.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f99fe611c312b3c1c0ace793f92464d8cd263cc3b26b5721950d977b006b6c4d", size = 252403, upload-time = "2026-01-26T02:45:51.779Z" }, + { url = "https://files.pythonhosted.org/packages/a2/76/b9669547afa5a1a25cd93eaca91c0da1c095b06b6d2d8ec25b713588d3a1/multidict-6.7.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9004d8386d133b7e6135679424c91b0b854d2d164af6ea3f289f8f2761064609", size = 244889, upload-time = "2026-01-26T02:45:53.27Z" }, + { url = "https://files.pythonhosted.org/packages/7e/a9/a50d2669e506dad33cfc45b5d574a205587b7b8a5f426f2fbb2e90882588/multidict-6.7.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e628ef0e6859ffd8273c69412a2465c4be4a9517d07261b33334b5ec6f3c7489", size = 241982, upload-time = "2026-01-26T02:45:54.919Z" }, + { url = "https://files.pythonhosted.org/packages/c5/bb/1609558ad8b456b4827d3c5a5b775c93b87878fd3117ed3db3423dfbce1b/multidict-6.7.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:841189848ba629c3552035a6a7f5bf3b02eb304e9fea7492ca220a8eda6b0e5c", size = 232415, upload-time = "2026-01-26T02:45:56.981Z" }, + { url = "https://files.pythonhosted.org/packages/d8/59/6f61039d2aa9261871e03ab9dc058a550d240f25859b05b67fd70f80d4b3/multidict-6.7.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ce1bbd7d780bb5a0da032e095c951f7014d6b0a205f8318308140f1a6aba159e", size = 240337, upload-time = "2026-01-26T02:45:58.698Z" }, + { url = "https://files.pythonhosted.org/packages/a1/29/fdc6a43c203890dc2ae9249971ecd0c41deaedfe00d25cb6564b2edd99eb/multidict-6.7.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b26684587228afed0d50cf804cc71062cc9c1cdf55051c4c6345d372947b268c", size = 248788, upload-time = "2026-01-26T02:46:00.862Z" }, + { url = "https://files.pythonhosted.org/packages/a9/14/a153a06101323e4cf086ecee3faadba52ff71633d471f9685c42e3736163/multidict-6.7.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:9f9af11306994335398293f9958071019e3ab95e9a707dc1383a35613f6abcb9", size = 242842, upload-time = "2026-01-26T02:46:02.824Z" }, + { url = "https://files.pythonhosted.org/packages/41/5f/604ae839e64a4a6efc80db94465348d3b328ee955e37acb24badbcd24d83/multidict-6.7.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b4938326284c4f1224178a560987b6cf8b4d38458b113d9b8c1db1a836e640a2", size = 240237, upload-time = "2026-01-26T02:46:05.898Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/c3a5187bf66f6fb546ff4ab8fb5a077cbdd832d7b1908d4365c7f74a1917/multidict-6.7.1-cp314-cp314t-win32.whl", hash = "sha256:98655c737850c064a65e006a3df7c997cd3b220be4ec8fe26215760b9697d4d7", size = 48008, upload-time = "2026-01-26T02:46:07.468Z" }, + { url = "https://files.pythonhosted.org/packages/0c/f7/addf1087b860ac60e6f382240f64fb99f8bfb532bb06f7c542b83c29ca61/multidict-6.7.1-cp314-cp314t-win_amd64.whl", hash = "sha256:497bde6223c212ba11d462853cfa4f0ae6ef97465033e7dc9940cdb3ab5b48e5", size = 53542, upload-time = "2026-01-26T02:46:08.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/81/4629d0aa32302ef7b2ec65c75a728cc5ff4fa410c50096174c1632e70b3e/multidict-6.7.1-cp314-cp314t-win_arm64.whl", hash = "sha256:2bbd113e0d4af5db41d5ebfe9ccaff89de2120578164f86a5d17d5a576d1e5b2", size = 44719, upload-time = "2026-01-26T02:46:11.146Z" }, + { url = "https://files.pythonhosted.org/packages/81/08/7036c080d7117f28a4af526d794aab6a84463126db031b007717c1a6676e/multidict-6.7.1-py3-none-any.whl", hash = "sha256:55d97cc6dae627efa6a6e548885712d4864b81110ac76fa4e534c03819fa4a56", size = 12319, upload-time = "2026-01-26T02:46:44.004Z" }, ] [[package]] @@ -4623,8 +4689,8 @@ name = "powerfx" version = "0.0.34" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, - { name = "pythonnet", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "pythonnet", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9f/fb/6c4bf87e0c74ca1c563921ce89ca1c5785b7576bca932f7255cdf81082a7/powerfx-0.0.34.tar.gz", hash = "sha256:956992e7afd272657ed16d80f4cad24ec95d9e4a79fb9dfa4a068a09e136af32", size = 3237555, upload-time = "2025-12-22T15:50:59.682Z" } wheels = [ @@ -5291,7 +5357,7 @@ name = "pythonnet" version = "3.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "clr-loader", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "clr-loader", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/d6/1afd75edd932306ae9bd2c2d961d603dc2b52fcec51b04afea464f1f6646/pythonnet-3.0.5.tar.gz", hash = "sha256:48e43ca463941b3608b32b4e236db92d8d40db4c58a75ace902985f76dac21cf", size = 239212, upload-time = "2024-12-13T08:30:44.393Z" } wheels = [ @@ -6155,11 +6221,11 @@ wheels = [ [[package]] name = "setuptools" -version = "80.10.1" +version = "80.10.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/86/ff/f75651350db3cf2ef767371307eb163f3cc1ac03e16fdf3ac347607f7edb/setuptools-80.10.1.tar.gz", hash = "sha256:bf2e513eb8144c3298a3bd28ab1a5edb739131ec5c22e045ff93cd7f5319703a", size = 1229650, upload-time = "2026-01-21T09:42:03.061Z" } +sdist = { url = "https://files.pythonhosted.org/packages/76/95/faf61eb8363f26aa7e1d762267a8d602a1b26d4f3a1e758e92cb3cb8b054/setuptools-80.10.2.tar.gz", hash = "sha256:8b0e9d10c784bf7d262c4e5ec5d4ec94127ce206e8738f29a437945fbc219b70", size = 1200343, upload-time = "2026-01-25T22:38:17.252Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e0/76/f963c61683a39084aa575f98089253e1e852a4417cb8a3a8a422923a5246/setuptools-80.10.1-py3-none-any.whl", hash = "sha256:fc30c51cbcb8199a219c12cc9c281b5925a4978d212f84229c909636d9f6984e", size = 1099859, upload-time = "2026-01-21T09:42:00.688Z" }, + { url = "https://files.pythonhosted.org/packages/94/b8/f1f62a5e3c0ad2ff1d189590bfa4c46b4f3b6e49cef6f26c6ee4e575394d/setuptools-80.10.2-py3-none-any.whl", hash = "sha256:95b30ddfb717250edb492926c92b5221f7ef3fbcc2b07579bcd4a27da21d0173", size = 1064234, upload-time = "2026-01-25T22:38:15.216Z" }, ] [[package]] @@ -6651,11 +6717,11 @@ wheels = [ [[package]] name = "types-python-dateutil" -version = "2.9.0.20251115" +version = "2.9.0.20260124" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/41/4f8eb1ce08688a9e3e23709ed07089ccdeaf95b93745bfb768c6da71197d/types_python_dateutil-2.9.0.20260124.tar.gz", hash = "sha256:7d2db9f860820c30e5b8152bfe78dbdf795f7d1c6176057424e8b3fdd1f581af", size = 16596, upload-time = "2026-01-24T03:18:42.975Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c2/aa5e3f4103cc8b1dcf92432415dde75d70021d634ecfd95b2e913cf43e17/types_python_dateutil-2.9.0.20260124-py3-none-any.whl", hash = "sha256:f802977ae08bf2260142e7ca1ab9d4403772a254409f7bbdf652229997124951", size = 18266, upload-time = "2026-01-24T03:18:42.155Z" }, ] [[package]]