diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py
index 4e9392d37b0a..a8ae093b8661 100644
--- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py
+++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/__init__.py
@@ -23,12 +23,14 @@
from ._base import AgentServerHost
from ._config import AgentConfig
from ._errors import create_error_response
+from ._server_version import build_server_version
from ._tracing import end_span, flush_spans, record_error, trace_stream
from ._version import VERSION
__all__ = [
"AgentConfig",
"AgentServerHost",
+ "build_server_version",
"create_error_response",
"end_span",
"flush_spans",
diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_base.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_base.py
index 163af602a3c4..f97b319c474e 100644
--- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_base.py
+++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_base.py
@@ -18,6 +18,7 @@
from starlette.routing import Route
from . import _config, _tracing
+from ._server_version import build_server_version
from ._version import VERSION as _CORE_VERSION
logger = logging.getLogger("azure.ai.agentserver")
@@ -25,23 +26,25 @@
# Pre-built health-check response to avoid per-request allocation.
_HEALTHY_BODY = b'{"status":"healthy"}'
-# Server identity header per spec: {sdk}/{version} (python/{runtime})
-_PLATFORM_SERVER_VALUE = (
- f"azure-ai-agentserver-core/{_CORE_VERSION} "
- f"(python/{sys.version_info.major}.{sys.version_info.minor})"
-)
-
# Sentinel attribute name set on the console handler to prevent adding duplicates
# across multiple AgentServerHost instantiations.
_CONSOLE_HANDLER_ATTR = "_agentserver_console"
class _PlatformHeaderMiddleware(BaseHTTPMiddleware):
- """Middleware that adds x-platform-server identity header to all responses."""
+ """Middleware that adds ``x-platform-server`` version header to all responses.
+
+ Takes a callable that returns the current header value so protocol
+ hosts can register additional segments after construction.
+ """
+
+ def __init__(self, app: Any, *, get_server_version: Callable[[], str]) -> None: # type: ignore[override]
+ super().__init__(app)
+ self._get_server_version = get_server_version
async def dispatch(self, request: Request, call_next): # type: ignore[no-untyped-def, override]
response = await call_next(request)
- response.headers["x-platform-server"] = _PLATFORM_SERVER_VALUE
+ response.headers["x-platform-server"] = self._get_server_version()
return response
@@ -103,6 +106,14 @@ def __init__(
# Shutdown handler slot (server-level lifecycle) -------------------
self._shutdown_fn: Optional[Callable[[], Awaitable[None]]] = None
+ # Server version segments for the x-platform-server header.
+ # Protocol packages call register_server_version() to add their
+ # own portion; the middleware joins them at response time.
+ self._server_version_segments: list[str] = []
+ self.register_server_version(
+ build_server_version("azure-ai-agentserver-core", _CORE_VERSION)
+ )
+
# Logging ----------------------------------------------------------
resolved_level = _config.resolve_log_level(log_level)
logger.setLevel(resolved_level)
@@ -171,10 +182,46 @@ async def _lifespan(_app: Starlette) -> AsyncGenerator[None, None]: # noqa: RUF
super().__init__(
routes=all_routes,
lifespan=_lifespan,
- middleware=[Middleware(_PlatformHeaderMiddleware)],
+ middleware=[
+ Middleware(_PlatformHeaderMiddleware, get_server_version=self._build_server_version),
+ ],
**kwargs,
)
+ # ------------------------------------------------------------------
+ # Server version (x-platform-server header)
+ # ------------------------------------------------------------------
+
+ def register_server_version(self, version_segment: str) -> None:
+ """Register a version segment for the ``x-platform-server`` header.
+
+ Protocol packages (e.g. responses, invocations) call this in their
+ ``__init__`` to add their own portion. Handler developers can also
+ call it to append a custom version string. Duplicates are ignored.
+
+ Use :func:`~azure.ai.agentserver.core.build_server_version` to
+ build a standard segment::
+
+ from azure.ai.agentserver.core import build_server_version
+
+ app.register_server_version(
+ build_server_version("my-library", "2.0.0")
+ )
+
+ :param version_segment: The version string to register.
+ :type version_segment: str
+ :raises ValueError: If *version_segment* is empty or whitespace-only.
+ """
+ if not version_segment or not version_segment.strip():
+ raise ValueError("Version segment must not be empty.")
+ normalized = version_segment.strip()
+ if normalized not in self._server_version_segments:
+ self._server_version_segments.append(normalized)
+
+ def _build_server_version(self) -> str:
+ """Join all registered segments into the header value."""
+ return " ".join(self._server_version_segments)
+
# ------------------------------------------------------------------
# Tracing (for protocol subclasses)
# ------------------------------------------------------------------
diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_config.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_config.py
index 9646a8e8e9f3..efb99a2509b7 100644
--- a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_config.py
+++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_config.py
@@ -32,7 +32,7 @@
_ENV_SSE_KEEPALIVE_INTERVAL = "SSE_KEEPALIVE_INTERVAL"
_DEFAULT_PORT = 8088
-_DEFAULT_SSE_KEEPALIVE_INTERVAL = 15
+_DEFAULT_SSE_KEEPALIVE_INTERVAL = 0
# ======================================================================
@@ -292,7 +292,7 @@ def resolve_sse_keepalive_interval(interval: Optional[int] = None) -> int:
"""Resolve the SSE keep-alive interval from argument, env var, or default.
Resolution order: explicit *interval* → ``SSE_KEEPALIVE_INTERVAL`` env var
- → ``15`` (seconds). A value of ``0`` disables keep-alive.
+ → ``0`` (seconds). A value of ``0`` disables keep-alive.
:param interval: Explicitly requested interval in seconds, or None.
:type interval: Optional[int]
diff --git a/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_server_version.py b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_server_version.py
new file mode 100644
index 000000000000..a20c1b6fc60b
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-core/azure/ai/agentserver/core/_server_version.py
@@ -0,0 +1,33 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+"""Utility for building ``x-platform-server`` version strings."""
+
+from __future__ import annotations
+
+import sys
+
+
+def build_server_version(sdk_name: str, version: str) -> str:
+ """Build a standard version segment for the ``x-platform-server`` header.
+
+ Format: ``{sdk_name}/{version} (python/{major}.{minor})``
+
+ Protocol packages call this during host initialisation and pass the
+ result to :meth:`AgentServerHost.register_server_version`.
+
+ :param sdk_name: The SDK identifier
+ (e.g., ``"azure-ai-agentserver-responses"``).
+ :type sdk_name: str
+ :param version: The package version string (e.g., ``"1.0.0b1"``).
+ :type version: str
+ :returns: A formatted version string.
+ :rtype: str
+ :raises ValueError: If *sdk_name* or *version* is empty.
+ """
+ if not sdk_name or not sdk_name.strip():
+ raise ValueError("sdk_name must not be empty.")
+ if not version or not version.strip():
+ raise ValueError("version must not be empty.")
+ runtime = f"python/{sys.version_info.major}.{sys.version_info.minor}"
+ return f"{sdk_name}/{version} ({runtime})"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/CHANGELOG.md b/sdk/agentserver/azure-ai-agentserver-responses/CHANGELOG.md
new file mode 100644
index 000000000000..5646e312f178
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/CHANGELOG.md
@@ -0,0 +1,19 @@
+# Release History
+
+## 1.0.0b1 (Unreleased)
+
+### Features Added
+
+- Initial release of `azure-ai-agentserver-responses`.
+- `ResponsesAgentServerHost` — Starlette-based host with Responses protocol endpoints (`POST /responses`, `GET /responses/{id}`, `POST /responses/{id}/cancel`, `DELETE /responses/{id}`, `GET /responses/{id}/input_items`).
+- `TextResponse` — high-level convenience for text-only responses with automatic SSE lifecycle (`create_text` and `create_text_stream` modes).
+- `ResponseEventStream` — low-level builder API for emitting SSE events with full control over output items (message, function call, reasoning, file search, web search, code interpreter, image gen, MCP, custom tool).
+- Convenience generators (`output_item_message()`, `output_item_function_call()`, `output_item_reasoning_item()`) and async streaming variants (`aoutput_item_message()`, etc.) for common patterns.
+- `ResponseContext` providing `response_id`, conversation history loading, input item access via `get_input_items()` (returns `Item` subtypes), `get_input_text()` convenience for extracting text content, isolation context, and client headers.
+- `ResponsesServerOptions` for configuring default model, SSE keep-alive, shutdown grace period, and other runtime options.
+- Support for all execution modes: default (synchronous), streaming (SSE), background, and streaming + background.
+- Automatic SSE event replay for previously streamed responses via `?stream=true`.
+- Cooperative cancellation via `asyncio.Event` and graceful shutdown integration.
+- `InMemoryResponseProvider` as the default in-process state store.
+- `ResponseProviderProtocol` and `ResponseStreamProviderProtocol` for custom storage implementations.
+- Built-in distributed tracing with OpenTelemetry integration.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/LICENSE b/sdk/agentserver/azure-ai-agentserver-responses/LICENSE
new file mode 100644
index 000000000000..63447fd8bbbf
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) Microsoft Corporation.
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/MANIFEST.in b/sdk/agentserver/azure-ai-agentserver-responses/MANIFEST.in
new file mode 100644
index 000000000000..59f874c668d6
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/MANIFEST.in
@@ -0,0 +1,9 @@
+include *.md
+include LICENSE
+recursive-include tests *.py
+recursive-include samples *.py *.md
+recursive-include doc *.rst *.md
+include azure/__init__.py
+include azure/ai/__init__.py
+include azure/ai/agentserver/__init__.py
+include azure/ai/agentserver/responses/py.typed
\ No newline at end of file
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/Makefile b/sdk/agentserver/azure-ai-agentserver-responses/Makefile
new file mode 100644
index 000000000000..2805f475736f
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/Makefile
@@ -0,0 +1,197 @@
+# Python TypeSpec Code Generation Tooling
+# Targets: generate-models, generate-validators, generate-openapi,
+# generate-contracts, clean, install-typespec-deps
+#
+# Dependencies are resolved from eng/emitter-package.json (via tsp-client sync)
+# which creates TempTypeSpecFiles/package.json with pinned versions.
+# Do NOT override versions with npm install --no-save.
+
+OUTPUT_DIR ?= azure/ai/agentserver/responses/models/_generated
+TYPESPEC_DIR ?= type_spec
+TEMP_TSP_DIR := $(TYPESPEC_DIR)/TempTypeSpecFiles
+OPENAPI_SPEC ?= $(TEMP_TSP_DIR)/Foundry/openapi.virtual-public-preview.yaml
+VALIDATORS_OUTPUT ?= $(OUTPUT_DIR)/_validators.py
+ROOT_SCHEMAS ?= CreateResponse
+OVERLAY ?= scripts/validation-overlay.yaml
+TEMP_OUTPUT_DIR := $(OUTPUT_DIR)/.tmp_codegen
+MODEL_PACKAGE_DIR := $(TEMP_OUTPUT_DIR)/azure/ai/agentserver/responses/models
+MODEL_SHIMS_DIR := scripts/generated_shims
+CONTRACTS_DIR := $(TEMP_TSP_DIR)/sdk-service-agentserver-contracts
+MODEL_BASE := $(OUTPUT_DIR)/sdk/models/_utils/model_base.py
+
+.PHONY: generate-models generate-validators generate-openapi generate-contracts clean install-typespec-deps
+
+ifeq ($(OS),Windows_NT)
+SHELL := cmd
+.SHELLFLAGS := /c
+endif
+
+# --------------------------------------------------------------------------
+# generate-validators: Generate JSON payload validators from OpenAPI
+# --------------------------------------------------------------------------
+ifeq ($(OS),Windows_NT)
+generate-validators:
+ @where python >NUL 2>NUL || (echo Error: python is required and was not found on PATH. 1>&2 && exit /b 1)
+ @if not exist "$(OPENAPI_SPEC)" (echo Error: OpenAPI spec not found at $(OPENAPI_SPEC). Run 'make generate-openapi' first. 1>&2 && exit /b 1)
+ @echo Generating payload validators from $(OPENAPI_SPEC)...
+ python scripts/generate_validators.py --input "$(OPENAPI_SPEC)" --output "$(VALIDATORS_OUTPUT)" --root-schemas "$(ROOT_SCHEMAS)" --overlay "$(OVERLAY)"
+ @echo Generated validators at $(VALIDATORS_OUTPUT)
+else
+generate-validators:
+ @command -v python >/dev/null 2>&1 || { \
+ echo "Error: python is required and was not found on PATH." >&2; \
+ exit 1; \
+ }
+ @test -f "$(OPENAPI_SPEC)" || { \
+ echo "Error: OpenAPI spec not found at $(OPENAPI_SPEC)." >&2; \
+ echo "Run 'make generate-openapi' first." >&2; \
+ exit 1; \
+ }
+ @echo "Generating payload validators from $(OPENAPI_SPEC)..."
+ python scripts/generate_validators.py --input "$(OPENAPI_SPEC)" --output "$(VALIDATORS_OUTPUT)" --root-schemas "$(ROOT_SCHEMAS)" --overlay "$(OVERLAY)"
+ @echo "Generated validators at $(VALIDATORS_OUTPUT)"
+endif
+
+# --------------------------------------------------------------------------
+# generate-openapi: Compile TypeSpec → OpenAPI spec (for validator generation)
+# --------------------------------------------------------------------------
+ifeq ($(OS),Windows_NT)
+generate-openapi:
+ @where npm >NUL 2>NUL || (echo Error: npm is required. Install Node.js ^(v18+^) from https://nodejs.org/ 1>&2 && exit /b 1)
+ @if not exist "$(CONTRACTS_DIR)\client.tsp" ( \
+ echo Error: TypeSpec sources not found. Run 'make install-typespec-deps' first. 1>&2 && exit /b 1 \
+ )
+ @echo Compiling TypeSpec to OpenAPI spec...
+ cd /d $(TEMP_TSP_DIR) && npx tsp compile sdk-service-agentserver-contracts\client.tsp --emit @typespec/openapi3 --option "@typespec/openapi3.emitter-output-dir=$(abspath $(dir $(OPENAPI_SPEC)))"
+ @if not exist "$(OPENAPI_SPEC)" (echo Error: OpenAPI spec was not generated at $(OPENAPI_SPEC). 1>&2 && exit /b 1)
+ @echo OpenAPI spec generated at $(OPENAPI_SPEC)
+else
+generate-openapi:
+ @command -v npm >/dev/null 2>&1 || { \
+ echo "Error: npm is required. Install Node.js (v18+) from https://nodejs.org/" >&2; \
+ exit 1; \
+ }
+ @test -f "$(CONTRACTS_DIR)/client.tsp" || { \
+ echo "Error: TypeSpec sources not found. Run 'make install-typespec-deps' first." >&2; \
+ exit 1; \
+ }
+ @echo "Compiling TypeSpec → OpenAPI spec..."
+ cd $(TEMP_TSP_DIR) && npx tsp compile sdk-service-agentserver-contracts/client.tsp --emit @typespec/openapi3 --option "@typespec/openapi3.emitter-output-dir=$(abspath $(dir $(OPENAPI_SPEC)))"
+ @test -f "$(OPENAPI_SPEC)" || { \
+ echo "Error: OpenAPI spec was not generated at $(OPENAPI_SPEC)." >&2; \
+ exit 1; \
+ }
+ @echo "OpenAPI spec generated at $(OPENAPI_SPEC)"
+endif
+
+# --------------------------------------------------------------------------
+# generate-contracts: Generate models + OpenAPI spec + validators
+# --------------------------------------------------------------------------
+generate-contracts: generate-models generate-openapi generate-validators
+
+# --------------------------------------------------------------------------
+# generate-models: Compile TypeSpec definitions into Python model classes
+# --------------------------------------------------------------------------
+ifeq ($(OS),Windows_NT)
+generate-models:
+ @where tsp-client >NUL 2>NUL || (echo Error: tsp-client is not installed. 1>&2 && echo Run 'make install-typespec-deps' to install it. 1>&2 && exit /b 1)
+ @where npm >NUL 2>NUL || (echo Error: npm is required. Install Node.js ^(v18+^) from https://nodejs.org/ 1>&2 && exit /b 1)
+ @echo Syncing upstream TypeSpec sources...
+ cd /d $(TYPESPEC_DIR) && tsp-client sync
+ @echo Installing TypeSpec dependencies from emitter-package.json...
+ cd /d $(TEMP_TSP_DIR) && npm install --silent
+ @echo Generating Python models...
+ @if exist "$(OUTPUT_DIR)" rmdir /s /q "$(OUTPUT_DIR)"
+ cd /d $(TEMP_TSP_DIR) && npx tsp compile sdk-service-agentserver-contracts\client.tsp --emit @azure-tools/typespec-python --option "@azure-tools/typespec-python.emitter-output-dir=$(abspath $(TEMP_OUTPUT_DIR))"
+ @if not exist "$(MODEL_PACKAGE_DIR)" (echo Error: generated model package was not found. 1>&2 && exit /b 1)
+ @if not exist "$(OUTPUT_DIR)\sdk" mkdir "$(OUTPUT_DIR)\sdk"
+ @xcopy /E /I /Y "$(MODEL_PACKAGE_DIR)" "$(OUTPUT_DIR)\sdk\models" >NUL
+ @if exist "$(OUTPUT_DIR)\sdk\models\aio" rmdir /s /q "$(OUTPUT_DIR)\sdk\models\aio"
+ @if exist "$(OUTPUT_DIR)\sdk\models\operations" rmdir /s /q "$(OUTPUT_DIR)\sdk\models\operations"
+ @if exist "$(OUTPUT_DIR)\sdk\models\_client.py" del /q "$(OUTPUT_DIR)\sdk\models\_client.py"
+ @if exist "$(OUTPUT_DIR)\sdk\models\_configuration.py" del /q "$(OUTPUT_DIR)\sdk\models\_configuration.py"
+ @if exist "$(OUTPUT_DIR)\sdk\models\_version.py" del /q "$(OUTPUT_DIR)\sdk\models\_version.py"
+ @copy /Y "$(MODEL_SHIMS_DIR)\sdk_models__init__.py" "$(OUTPUT_DIR)\sdk\models\__init__.py" >NUL
+ @copy /Y "$(MODEL_SHIMS_DIR)\__init__.py" "$(OUTPUT_DIR)\__init__.py" >NUL
+ @copy /Y "$(MODEL_SHIMS_DIR)\_enums.py" "$(OUTPUT_DIR)\_enums.py" >NUL
+ @copy /Y "$(MODEL_SHIMS_DIR)\_models.py" "$(OUTPUT_DIR)\_models.py" >NUL
+ @copy /Y "$(MODEL_SHIMS_DIR)\_patch.py" "$(OUTPUT_DIR)\_patch.py" >NUL
+ @copy /Y "$(MODEL_SHIMS_DIR)\models_patch.py" "$(OUTPUT_DIR)\sdk\models\models\_patch.py" >NUL
+ @REM Patch _deserialize_sequence: reject plain strings so union falls through to str branch
+ @powershell -Command "(Get-Content '$(MODEL_BASE)') -replace 'return type\(obj\)\(_deserialize\(deserializer, entry, module\) for entry in obj\)','if isinstance(obj, str):\n raise DeserializationError()\n return type(obj)(_deserialize(deserializer, entry, module) for entry in obj)' | Set-Content '$(MODEL_BASE)'"
+ @if exist "$(TEMP_OUTPUT_DIR)" rmdir /s /q "$(TEMP_OUTPUT_DIR)"
+else
+generate-models:
+ @command -v tsp-client >/dev/null 2>&1 || { \
+ echo "Error: tsp-client is not installed." >&2; \
+ echo "Run 'make install-typespec-deps' to install it." >&2; \
+ exit 1; \
+ }
+ @command -v npm >/dev/null 2>&1 || { \
+ echo "Error: npm is required. Install Node.js (v18+) from https://nodejs.org/" >&2; \
+ exit 1; \
+ }
+ @echo "Syncing upstream TypeSpec sources..."
+ cd $(TYPESPEC_DIR) && tsp-client sync
+ @echo "Installing TypeSpec dependencies from emitter-package.json..."
+ cd $(TEMP_TSP_DIR) && npm install --silent
+ @echo "Generating Python models..."
+ rm -rf $(OUTPUT_DIR)
+ cd $(TEMP_TSP_DIR) && npx tsp compile sdk-service-agentserver-contracts/client.tsp --emit @azure-tools/typespec-python --option "@azure-tools/typespec-python.emitter-output-dir=$(abspath $(TEMP_OUTPUT_DIR))"
+ @test -d $(MODEL_PACKAGE_DIR) || { \
+ echo "Error: generated model package was not found." >&2; \
+ exit 1; \
+ }
+ mkdir -p $(OUTPUT_DIR)/sdk
+ cp -R $(MODEL_PACKAGE_DIR) $(OUTPUT_DIR)/sdk/models
+ rm -rf $(OUTPUT_DIR)/sdk/models/aio
+ rm -rf $(OUTPUT_DIR)/sdk/models/operations
+ rm -f $(OUTPUT_DIR)/sdk/models/_client.py
+ rm -f $(OUTPUT_DIR)/sdk/models/_configuration.py
+ rm -f $(OUTPUT_DIR)/sdk/models/_version.py
+ cp $(MODEL_SHIMS_DIR)/sdk_models__init__.py $(OUTPUT_DIR)/sdk/models/__init__.py
+ cp $(MODEL_SHIMS_DIR)/__init__.py $(OUTPUT_DIR)/__init__.py
+ cp $(MODEL_SHIMS_DIR)/_enums.py $(OUTPUT_DIR)/_enums.py
+ cp $(MODEL_SHIMS_DIR)/_models.py $(OUTPUT_DIR)/_models.py
+ cp $(MODEL_SHIMS_DIR)/_patch.py $(OUTPUT_DIR)/_patch.py
+ cp $(MODEL_SHIMS_DIR)/models_patch.py $(OUTPUT_DIR)/sdk/models/models/_patch.py
+ # Patch _deserialize_sequence: reject plain strings so union falls through to str branch
+ sed -i 's/ return type(obj)(_deserialize(deserializer, entry, module) for entry in obj)/ if isinstance(obj, str):\n raise DeserializationError()\n return type(obj)(_deserialize(deserializer, entry, module) for entry in obj)/' $(MODEL_BASE)
+ rm -rf $(TEMP_OUTPUT_DIR)
+endif
+
+# --------------------------------------------------------------------------
+# clean: Remove all previously generated Python model files
+# --------------------------------------------------------------------------
+ifeq ($(OS),Windows_NT)
+clean:
+ @if exist "$(OUTPUT_DIR)" rmdir /s /q "$(OUTPUT_DIR)"
+else
+clean:
+ rm -rf $(OUTPUT_DIR)
+endif
+
+# --------------------------------------------------------------------------
+# install-typespec-deps: Install tsp-client CLI and sync TypeSpec sources
+# --------------------------------------------------------------------------
+ifeq ($(OS),Windows_NT)
+install-typespec-deps:
+ @where node >NUL 2>NUL || (echo Error: Node.js ^(v18+^) is required. Install from https://nodejs.org/ 1>&2 && exit /b 1)
+ @where npm >NUL 2>NUL || (echo Error: npm is required. Install Node.js ^(v18+^) from https://nodejs.org/ 1>&2 && exit /b 1)
+ npm install -g @azure-tools/typespec-client-generator-cli
+ cd /d $(TYPESPEC_DIR) && tsp-client sync
+ cd /d $(TEMP_TSP_DIR) && npm install --silent
+else
+install-typespec-deps:
+ @command -v node >/dev/null 2>&1 || { \
+ echo "Error: Node.js (v18+) is required. Install from https://nodejs.org/" >&2; \
+ exit 1; \
+ }
+ @command -v npm >/dev/null 2>&1 || { \
+ echo "Error: npm is required. Install Node.js (v18+) from https://nodejs.org/" >&2; \
+ exit 1; \
+ }
+ npm install -g @azure-tools/typespec-client-generator-cli
+ cd $(TYPESPEC_DIR) && tsp-client sync
+ cd $(TEMP_TSP_DIR) && npm install --silent
+endif
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/README.md b/sdk/agentserver/azure-ai-agentserver-responses/README.md
new file mode 100644
index 000000000000..e58a6cf10c64
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/README.md
@@ -0,0 +1,233 @@
+# Azure AI Agent Server Responses client library for Python
+
+The `azure-ai-agentserver-responses` package provides the Responses protocol endpoints for Azure AI Hosted Agent containers. It plugs into the [`azure-ai-agentserver-core`](https://pypi.org/project/azure-ai-agentserver-core/) host framework and adds the full response lifecycle: create, stream (SSE), cancel, delete, replay, and input-item listing.
+
+## Getting started
+
+### Install the package
+
+```bash
+pip install azure-ai-agentserver-responses
+```
+
+This automatically installs `azure-ai-agentserver-core` as a dependency.
+
+### Prerequisites
+
+- Python 3.10 or later
+
+## Key concepts
+
+### ResponsesAgentServerHost
+
+`ResponsesAgentServerHost` is an `AgentServerHost` subclass that adds Responses protocol endpoints. Register your handler with the `@app.create_handler` decorator:
+
+```python
+@app.create_handler
+def my_handler(
+ request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event
+):
+ ...
+```
+
+### Protocol endpoints
+
+| Method | Route | Description |
+|---|---|---|
+| `POST` | `/responses` | Create a new response |
+| `GET` | `/responses/{response_id}` | Get response state (JSON or SSE replay via `?stream=true`) |
+| `POST` | `/responses/{response_id}/cancel` | Cancel an in-flight response |
+| `DELETE` | `/responses/{response_id}` | Delete a stored response |
+| `GET` | `/responses/{response_id}/input_items` | List input items (paginated) |
+
+### TextResponse
+
+The simplest way to return text. Handles the full SSE lifecycle automatically (`response.created` → `response.in_progress` → message/content events → `response.completed`):
+
+```python
+return TextResponse(context, request, create_text=lambda: "Hello!")
+```
+
+For streaming, provide `create_text_stream` instead:
+
+```python
+async def tokens():
+ for t in ["Hello", ", ", "world!"]:
+ yield t
+
+return TextResponse(context, request, create_text_stream=tokens)
+```
+
+### ResponseEventStream
+
+Use `ResponseEventStream` when you need function calls, reasoning items, multiple output types, or fine-grained event control. Each `yield` maps 1:1 to an SSE event with zero bookkeeping:
+
+```python
+stream = ResponseEventStream(response_id=context.response_id, request=request)
+yield stream.emit_created()
+yield stream.emit_in_progress()
+yield from stream.output_item_message("Hello, world!")
+yield stream.emit_completed()
+```
+
+Drop down to the builder API for full control over individual events:
+
+```python
+message = stream.add_output_item_message()
+yield message.emit_added()
+text = message.add_text_content()
+yield text.emit_added()
+yield text.emit_delta("Hello!")
+yield text.emit_done()
+yield message.emit_content_done(text)
+yield message.emit_done()
+```
+
+### ResponseContext
+
+The `ResponseContext` provides request-scoped state:
+
+| Property / Method | Description |
+|---|---|
+| `response_id` | Unique ID for this response |
+| `is_shutdown_requested` | Whether the server is draining |
+| `raw_body` | Raw request body bytes |
+| `isolation` | `IsolationContext` with `user_key` and `chat_key` for multi-tenant state partitioning |
+| `client_headers` | Dictionary of `x-client-*` headers forwarded from the platform |
+| `query_parameters` | Dictionary of query string parameters |
+| `get_input_items()` | Load resolved input items as `Item` subtypes |
+| `get_input_text()` | Extract all text content from input items as a single string |
+| `get_history()` | Load conversation history items |
+
+### Streaming and background modes
+
+The SDK automatically handles all combinations of `stream` and `background` flags:
+
+- **Default** — Run to completion, return final JSON response
+- **Streaming** — Pipe events as SSE in real-time, cancel on client disconnect
+- **Background** — Return immediately, handler runs in the background
+- **Streaming + Background** — SSE while connected, handler continues after disconnect
+
+### Response lifecycle
+
+The library orchestrates the complete response lifecycle: `created` → `in_progress` → `completed` (or `failed` / `cancelled`). Cancellation, error handling, and terminal event guarantees are all managed automatically.
+
+For detailed handler implementation guidance, see [docs/handler-implementation-guide.md](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/agentserver/azure-ai-agentserver-responses/docs/handler-implementation-guide.md).
+
+## Examples
+
+### Echo handler
+
+```python
+import asyncio
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ TextResponse,
+)
+
+app = ResponsesAgentServerHost()
+
+
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ text = await context.get_input_text()
+ return TextResponse(context, request, create_text=lambda: f"Echo: {text}")
+
+
+app.run()
+```
+
+### Function calling
+
+```python
+import json
+
+from azure.ai.agentserver.responses import ResponseEventStream
+
+stream = ResponseEventStream(response_id=context.response_id, request=request)
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+arguments = json.dumps({"location": "Seattle", "unit": "fahrenheit"})
+yield from stream.output_item_function_call("get_weather", "call_001", arguments)
+
+yield stream.emit_completed()
+```
+
+### Reasoning + text message
+
+```python
+stream = ResponseEventStream(response_id=context.response_id, request=request)
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+yield from stream.output_item_reasoning_item("Let me think about this...")
+yield from stream.output_item_message("Here is my answer.")
+
+yield stream.emit_completed()
+```
+
+### Configuration
+
+```python
+from azure.ai.agentserver.responses import ResponsesAgentServerHost, ResponsesServerOptions
+
+options = ResponsesServerOptions(
+ default_model="gpt-4o",
+ sse_keep_alive_interval_seconds=15,
+ shutdown_grace_period_seconds=10,
+)
+
+app = ResponsesAgentServerHost(options=options)
+```
+
+## Troubleshooting
+
+### Common errors
+
+- **400 Bad Request**: The request body failed validation. Check that optional fields such as `model` (when provided) are valid and that `input` items are well-formed.
+- **404 Not Found**: The response ID does not exist or has expired past the configured TTL.
+- **400 Bad Request** (cancel): The response was not created with `background=true`, or it has already reached a terminal state.
+
+### Reporting issues
+
+To report an issue with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues).
+
+## Next steps
+
+Visit the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples) folder for complete working examples:
+
+| Sample | Description |
+|---|---|
+| [Getting Started](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_01_getting_started.py) | Minimal echo handler using `TextResponse` |
+| [Streaming Text Deltas](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_02_streaming_text_deltas.py) | Token-by-token streaming with `configure` callback |
+| [Full Control](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_03_full_control.py) | Convenience, streaming, and builder — three ways to emit output |
+| [Function Calling](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_04_function_calling.py) | Two-turn function calling with convenience and builder variants |
+| [Conversation History](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_05_conversation_history.py) | Multi-turn study tutor with `context.get_history()` |
+| [Multi-Output](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_06_multi_output.py) | Reasoning + message in a single response |
+| [Streaming Upstream](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_10_streaming_upstream.py) | Forward to upstream streaming LLM via `openai` SDK |
+| [Non-Streaming Upstream](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_11_non_streaming_upstream.py) | Forward to upstream non-streaming LLM, emit items via builders |
+
+- [Handler implementation guide](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/agentserver/azure-ai-agentserver-responses/docs/handler-implementation-guide.md) — Detailed reference for building handlers
+
+## Contributing
+
+This project welcomes contributions and suggestions. Most contributions require
+you to agree to a Contributor License Agreement (CLA) declaring that you have
+the right to, and actually do, grant us the rights to use your contribution.
+For details, visit https://cla.microsoft.com.
+
+When you submit a pull request, a CLA-bot will automatically determine whether
+you need to provide a CLA and decorate the PR appropriately (e.g., label,
+comment). Simply follow the instructions provided by the bot. You will only
+need to do this once across all repos using our CLA.
+
+This project has adopted the
+[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information,
+see the Code of Conduct FAQ or contact opencode@microsoft.com with any
+additional questions or comments.
+
+[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/__init__.py
new file mode 100644
index 000000000000..d55ccad1f573
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/__init__.py
@@ -0,0 +1 @@
+__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/__init__.py
new file mode 100644
index 000000000000..d55ccad1f573
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/__init__.py
@@ -0,0 +1 @@
+__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/__init__.py
new file mode 100644
index 000000000000..d55ccad1f573
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/__init__.py
@@ -0,0 +1 @@
+__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/__init__.py
new file mode 100644
index 000000000000..0674f8c5c075
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/__init__.py
@@ -0,0 +1,85 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Public API surface for the Azure AI Agent Server Responses package."""
+
+from ._version import VERSION
+
+__version__ = VERSION
+
+from ._options import ResponsesServerOptions
+from ._response_context import IsolationContext, ResponseContext
+from .hosting._routing import ResponsesAgentServerHost
+from .models import CreateResponse, ResponseObject
+from .models._helpers import (
+ get_conversation_id,
+ get_input_expanded,
+ to_output_item,
+)
+from .store._base import ResponseProviderProtocol, ResponseStreamProviderProtocol
+from .store._foundry_errors import (
+ FoundryApiError,
+ FoundryBadRequestError,
+ FoundryResourceNotFoundError,
+ FoundryStorageError,
+)
+from .store._foundry_provider import FoundryStorageProvider
+from .store._foundry_settings import FoundryStorageSettings
+from .store._memory import InMemoryResponseProvider
+from .streaming._builders import (
+ OutputItemBuilder,
+ OutputItemCodeInterpreterCallBuilder,
+ OutputItemCustomToolCallBuilder,
+ OutputItemFileSearchCallBuilder,
+ OutputItemFunctionCallBuilder,
+ OutputItemFunctionCallOutputBuilder,
+ OutputItemImageGenCallBuilder,
+ OutputItemMcpCallBuilder,
+ OutputItemMcpListToolsBuilder,
+ OutputItemMessageBuilder,
+ OutputItemReasoningItemBuilder,
+ OutputItemWebSearchCallBuilder,
+ ReasoningSummaryPartBuilder,
+ RefusalContentBuilder,
+ TextContentBuilder,
+)
+from .streaming._event_stream import ResponseEventStream
+from .streaming._text_response import TextResponse
+
+__all__ = [
+ "__version__",
+ "ResponsesAgentServerHost",
+ "ResponseContext",
+ "IsolationContext",
+ "ResponsesServerOptions",
+ "ResponseProviderProtocol",
+ "ResponseStreamProviderProtocol",
+ "InMemoryResponseProvider",
+ "FoundryStorageProvider",
+ "FoundryStorageSettings",
+ "FoundryStorageError",
+ "FoundryResourceNotFoundError",
+ "FoundryBadRequestError",
+ "FoundryApiError",
+ "TextContentBuilder",
+ "OutputItemMessageBuilder",
+ "OutputItemBuilder",
+ "OutputItemFunctionCallBuilder",
+ "OutputItemFunctionCallOutputBuilder",
+ "RefusalContentBuilder",
+ "OutputItemReasoningItemBuilder",
+ "ReasoningSummaryPartBuilder",
+ "OutputItemFileSearchCallBuilder",
+ "OutputItemWebSearchCallBuilder",
+ "OutputItemCodeInterpreterCallBuilder",
+ "OutputItemImageGenCallBuilder",
+ "OutputItemMcpCallBuilder",
+ "OutputItemMcpListToolsBuilder",
+ "OutputItemCustomToolCallBuilder",
+ "ResponseEventStream",
+ "TextResponse",
+ "CreateResponse",
+ "ResponseObject",
+ "get_conversation_id",
+ "get_input_expanded",
+ "to_output_item",
+]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_id_generator.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_id_generator.py
new file mode 100644
index 000000000000..b4639195fe57
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_id_generator.py
@@ -0,0 +1,501 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""ID generation utilities for deterministic response and item IDs."""
+
+from __future__ import annotations
+
+import base64
+import secrets
+from typing import Callable, Sequence
+
+from .models import _generated as generated_models
+
+
+class IdGenerator: # pylint: disable=too-many-public-methods
+ """Generates IDs with embedded partition keys."""
+
+ _PARTITION_KEY_HEX_LENGTH = 16
+ _PARTITION_KEY_SUFFIX = "00"
+ _PARTITION_KEY_TOTAL_LENGTH = _PARTITION_KEY_HEX_LENGTH + 2
+ _ENTROPY_LENGTH = 32
+ _NEW_FORMAT_BODY_LENGTH = _PARTITION_KEY_TOTAL_LENGTH + _ENTROPY_LENGTH
+ _LEGACY_BODY_LENGTH = 48
+ _LEGACY_PARTITION_KEY_LENGTH = 16
+
+ @staticmethod
+ def new_id(prefix: str, partition_key_hint: str | None = "") -> str:
+ """Generate a new ID in the format ``{prefix}_{partitionKey}{entropy}``.
+
+ :param prefix: The prefix segment for the ID (e.g. ``"caresp"``, ``"msg"``).
+ :type prefix: str
+ :param partition_key_hint: An existing ID from which to extract a partition key
+ for co-location. Defaults to an empty string (generates a new partition key).
+ :type partition_key_hint: str | None
+ :returns: A new unique ID string.
+ :rtype: str
+ :raises TypeError: If *prefix* is None.
+ :raises ValueError: If *prefix* is empty.
+ """
+ if prefix is None:
+ raise TypeError("prefix must not be None")
+ if len(prefix) == 0:
+ raise ValueError("Prefix must not be empty.")
+
+ extracted, partition_key = IdGenerator._try_extract_partition_key_raw(partition_key_hint)
+ if extracted:
+ if len(partition_key) == IdGenerator._LEGACY_PARTITION_KEY_LENGTH:
+ partition_key = partition_key + IdGenerator._PARTITION_KEY_SUFFIX
+ else:
+ partition_key = IdGenerator._generate_partition_key()
+
+ entropy = IdGenerator._generate_entropy()
+ return f"{prefix}_{partition_key}{entropy}"
+
+ @staticmethod
+ def new_response_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new response ID with the ``caresp`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique response ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("caresp", partition_key_hint)
+
+ @staticmethod
+ def new_message_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new message item ID with the ``msg`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique message item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("msg", partition_key_hint)
+
+ @staticmethod
+ def new_function_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new function call item ID with the ``fc`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique function call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("fc", partition_key_hint)
+
+ @staticmethod
+ def new_reasoning_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new reasoning item ID with the ``rs`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique reasoning item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("rs", partition_key_hint)
+
+ @staticmethod
+ def new_file_search_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new file search call item ID with the ``fs`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique file search call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("fs", partition_key_hint)
+
+ @staticmethod
+ def new_web_search_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new web search call item ID with the ``ws`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique web search call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("ws", partition_key_hint)
+
+ @staticmethod
+ def new_code_interpreter_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new code interpreter call item ID with the ``ci`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique code interpreter call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("ci", partition_key_hint)
+
+ @staticmethod
+ def new_image_gen_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new image generation call item ID with the ``ig`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique image generation call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("ig", partition_key_hint)
+
+ @staticmethod
+ def new_mcp_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new MCP call item ID with the ``mcp`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique MCP call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("mcp", partition_key_hint)
+
+ @staticmethod
+ def new_mcp_list_tools_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new MCP list tools item ID with the ``mcpl`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique MCP list tools item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("mcpl", partition_key_hint)
+
+ @staticmethod
+ def new_custom_tool_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new custom tool call item ID with the ``ctc`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique custom tool call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("ctc", partition_key_hint)
+
+ @staticmethod
+ def new_custom_tool_call_output_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new custom tool call output item ID with the ``ctco`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique custom tool call output item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("ctco", partition_key_hint)
+
+ @staticmethod
+ def new_function_call_output_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new function call output item ID with the ``fco`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique function call output item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("fco", partition_key_hint)
+
+ @staticmethod
+ def new_computer_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new computer call item ID with the ``cu`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique computer call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("cu", partition_key_hint)
+
+ @staticmethod
+ def new_computer_call_output_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new computer call output item ID with the ``cuo`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique computer call output item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("cuo", partition_key_hint)
+
+ @staticmethod
+ def new_local_shell_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new local shell call item ID with the ``lsh`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique local shell call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("lsh", partition_key_hint)
+
+ @staticmethod
+ def new_local_shell_call_output_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new local shell call output item ID with the ``lsho`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique local shell call output item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("lsho", partition_key_hint)
+
+ @staticmethod
+ def new_function_shell_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new function shell call item ID with the ``lsh`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique function shell call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("lsh", partition_key_hint)
+
+ @staticmethod
+ def new_function_shell_call_output_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new function shell call output item ID with the ``lsho`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique function shell call output item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("lsho", partition_key_hint)
+
+ @staticmethod
+ def new_apply_patch_call_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new apply patch call item ID with the ``ap`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique apply patch call item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("ap", partition_key_hint)
+
+ @staticmethod
+ def new_apply_patch_call_output_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new apply patch call output item ID with the ``apo`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique apply patch call output item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("apo", partition_key_hint)
+
+ @staticmethod
+ def new_mcp_approval_request_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new MCP approval request item ID with the ``mcpr`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique MCP approval request item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("mcpr", partition_key_hint)
+
+ @staticmethod
+ def new_mcp_approval_response_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new MCP approval response item ID with the ``mcpa`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique MCP approval response item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("mcpa", partition_key_hint)
+
+ @staticmethod
+ def new_compaction_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new compaction item ID with the ``cmp`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique compaction item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("cmp", partition_key_hint)
+
+ @staticmethod
+ def new_workflow_action_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new workflow action item ID with the ``wfa`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique workflow action item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("wfa", partition_key_hint)
+
+ @staticmethod
+ def new_output_message_item_id(partition_key_hint: str | None = "") -> str:
+ """Generate a new output message item ID with the ``om`` prefix.
+
+ :param partition_key_hint: An existing ID to extract the partition key from for co-location.
+ :type partition_key_hint: str | None
+ :returns: A new unique output message item ID string.
+ :rtype: str
+ """
+ return IdGenerator.new_id("om", partition_key_hint)
+
+ @staticmethod
+ def new_item_id(item: generated_models.Item, partition_key_hint: str | None = "") -> str | None:
+ """Generate a type-specific ID for a generated Item subtype.
+
+ Dispatches to the appropriate ``new_*_item_id`` factory method based on the
+ runtime type of *item*. Returns None for ``ItemReferenceParam`` or unrecognized types.
+
+ :param item: The generated Item instance to create an ID for.
+ :type item: generated_models.Item
+ :param partition_key_hint: An existing ID from which to extract the partition key
+ for co-location. Defaults to an empty string.
+ :type partition_key_hint: str | None
+ :returns: A new unique ID string, or None if the item type is a reference or unrecognized.
+ :rtype: str | None
+ """
+ dispatch_map: tuple[tuple[type[object], Callable[..., str]], ...] = (
+ (generated_models.ItemMessage, IdGenerator.new_message_item_id),
+ (generated_models.ItemOutputMessage, IdGenerator.new_output_message_item_id),
+ (generated_models.ItemFunctionToolCall, IdGenerator.new_function_call_item_id),
+ (generated_models.FunctionCallOutputItemParam, IdGenerator.new_function_call_output_item_id),
+ (generated_models.ItemCustomToolCall, IdGenerator.new_custom_tool_call_item_id),
+ (generated_models.ItemCustomToolCallOutput, IdGenerator.new_custom_tool_call_output_item_id),
+ (generated_models.ItemComputerToolCall, IdGenerator.new_computer_call_item_id),
+ (generated_models.ComputerCallOutputItemParam, IdGenerator.new_computer_call_output_item_id),
+ (generated_models.ItemFileSearchToolCall, IdGenerator.new_file_search_call_item_id),
+ (generated_models.ItemWebSearchToolCall, IdGenerator.new_web_search_call_item_id),
+ (generated_models.ItemImageGenToolCall, IdGenerator.new_image_gen_call_item_id),
+ (generated_models.ItemCodeInterpreterToolCall, IdGenerator.new_code_interpreter_call_item_id),
+ (generated_models.ItemLocalShellToolCall, IdGenerator.new_local_shell_call_item_id),
+ (generated_models.ItemLocalShellToolCallOutput, IdGenerator.new_local_shell_call_output_item_id),
+ (generated_models.FunctionShellCallItemParam, IdGenerator.new_function_shell_call_item_id),
+ (generated_models.FunctionShellCallOutputItemParam, IdGenerator.new_function_shell_call_output_item_id),
+ (generated_models.ApplyPatchToolCallItemParam, IdGenerator.new_apply_patch_call_item_id),
+ (generated_models.ApplyPatchToolCallOutputItemParam, IdGenerator.new_apply_patch_call_output_item_id),
+ (generated_models.ItemMcpListTools, IdGenerator.new_mcp_list_tools_item_id),
+ (generated_models.ItemMcpToolCall, IdGenerator.new_mcp_call_item_id),
+ (generated_models.ItemMcpApprovalRequest, IdGenerator.new_mcp_approval_request_item_id),
+ (generated_models.MCPApprovalResponse, IdGenerator.new_mcp_approval_response_item_id),
+ (generated_models.ItemReasoningItem, IdGenerator.new_reasoning_item_id),
+ (generated_models.CompactionSummaryItemParam, IdGenerator.new_compaction_item_id),
+ )
+
+ for model_type, generator in dispatch_map:
+ if isinstance(item, model_type):
+ return generator(partition_key_hint)
+
+ if isinstance(item, generated_models.ItemReferenceParam):
+ return None
+ return None
+
+ @staticmethod
+ def extract_partition_key(id_value: str) -> str:
+ """Extract the partition key segment from an existing ID.
+
+ :param id_value: The full ID string to extract the partition key from.
+ :type id_value: str
+ :returns: The partition key hex string.
+ :rtype: str
+ :raises ValueError: If the ID is null, empty, missing a delimiter, or has
+ an unexpected body length.
+ """
+ extracted, partition_key = IdGenerator._try_extract_partition_key_raw(id_value)
+ if extracted:
+ return partition_key
+
+ if id_value is None or id_value == "":
+ raise ValueError("ID must not be null or empty.")
+ if "_" not in id_value:
+ raise ValueError(f"ID '{id_value}' has no '_' delimiter.")
+ raise ValueError(f"ID '{id_value}' has unexpected body length.")
+
+ @staticmethod
+ def is_valid(id_value: str | None, allowed_prefixes: Sequence[str] | None = None) -> tuple[bool, str | None]:
+ """Validate whether an ID string conforms to the expected format.
+
+ :param id_value: The ID string to validate.
+ :type id_value: str | None
+ :param allowed_prefixes: An optional sequence of allowed prefix strings.
+ When provided, the ID's prefix must be in this set.
+ :type allowed_prefixes: Sequence[str] | None
+ :returns: A tuple of (is_valid, error_message). When valid, error_message is None.
+ :rtype: tuple[bool, str | None]
+ """
+ if id_value is None or id_value == "":
+ return False, "ID must not be null or empty."
+
+ delimiter_index = id_value.find("_")
+ if delimiter_index < 0:
+ return False, f"ID '{id_value}' has no '_' delimiter."
+
+ prefix = id_value[:delimiter_index]
+ if len(prefix) == 0:
+ return False, "ID has an empty prefix."
+
+ body = id_value[delimiter_index + 1 :]
+ if len(body) != IdGenerator._NEW_FORMAT_BODY_LENGTH and len(body) != IdGenerator._LEGACY_BODY_LENGTH:
+ return (
+ False,
+ f"ID '{id_value}' has unexpected body length {len(body)}"
+ + f" (expected {IdGenerator._NEW_FORMAT_BODY_LENGTH} or"
+ + f" {IdGenerator._LEGACY_BODY_LENGTH}).",
+ )
+
+ if allowed_prefixes is not None and prefix not in allowed_prefixes:
+ return False, f"ID prefix '{prefix}' is not in the allowed set [{', '.join(allowed_prefixes)}]."
+
+ return True, None
+
+ @staticmethod
+ def _generate_partition_key() -> str:
+ """Generate a random partition key hex string with the standard suffix.
+
+ :returns: An 18-character hex partition key string.
+ :rtype: str
+ """
+ return f"{secrets.token_bytes(8).hex()}{IdGenerator._PARTITION_KEY_SUFFIX}"
+
+ @staticmethod
+ def _generate_entropy() -> str:
+ """Generate a random alphanumeric entropy string.
+
+ :returns: A 32-character alphanumeric entropy string.
+ :rtype: str
+ """
+ chars: list[str] = []
+ while len(chars) < IdGenerator._ENTROPY_LENGTH:
+ base64_text = base64.b64encode(secrets.token_bytes(48)).decode("ascii")
+ for char in base64_text:
+ if char.isalnum():
+ chars.append(char)
+ if len(chars) >= IdGenerator._ENTROPY_LENGTH:
+ break
+ return "".join(chars)
+
+ @staticmethod
+ def _try_extract_partition_key_raw(id_value: str | None) -> tuple[bool, str]:
+ """Attempt to extract the raw partition key from an ID string.
+
+ Supports both the new format (18-char partition key at the start of the body)
+ and the legacy format (16-char partition key at the end of the body).
+
+ :param id_value: The full ID string to parse.
+ :type id_value: str | None
+ :returns: A tuple of (success, partition_key). On failure, partition_key is
+ an empty string.
+ :rtype: tuple[bool, str]
+ """
+ if id_value is None or id_value == "":
+ return False, ""
+
+ delimiter_index = id_value.find("_")
+ if delimiter_index < 0:
+ return False, ""
+
+ body = id_value[delimiter_index + 1 :]
+ if len(body) == IdGenerator._NEW_FORMAT_BODY_LENGTH:
+ return True, body[: IdGenerator._PARTITION_KEY_TOTAL_LENGTH]
+
+ if len(body) == IdGenerator._LEGACY_BODY_LENGTH:
+ return True, body[-IdGenerator._LEGACY_PARTITION_KEY_LENGTH :]
+
+ return False, ""
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_options.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_options.py
new file mode 100644
index 000000000000..c87c83d8ae47
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_options.py
@@ -0,0 +1,80 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Typed options for configuring the Responses server runtime."""
+
+from __future__ import annotations
+
+import os
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any, Mapping
+
+if TYPE_CHECKING:
+ from .hosting._observability import CreateSpanHook
+
+
+@dataclass
+class ResponsesServerOptions:
+ """Configuration values for hosting and runtime behavior."""
+
+ additional_server_version: str | None = None
+ default_model: str | None = None
+ default_fetch_history_count: int = 100
+ sse_keep_alive_interval_seconds: int | None = None
+ shutdown_grace_period_seconds: int = 10
+ create_span_hook: CreateSpanHook | None = None
+
+ def __post_init__(self) -> None:
+ if self.additional_server_version is not None:
+ normalized = self.additional_server_version.strip()
+ self.additional_server_version = normalized or None
+ if self.default_model is not None:
+ normalized_model = self.default_model.strip()
+ self.default_model = normalized_model or None
+ if self.sse_keep_alive_interval_seconds is not None and self.sse_keep_alive_interval_seconds <= 0:
+ raise ValueError("sse_keep_alive_interval_seconds must be > 0 when set")
+ if self.default_fetch_history_count <= 0:
+ raise ValueError("default_fetch_history_count must be > 0")
+ if self.shutdown_grace_period_seconds <= 0:
+ raise ValueError("shutdown_grace_period_seconds must be > 0")
+
+ @classmethod
+ def from_env(cls, environ: Mapping[str, str] | None = None) -> "ResponsesServerOptions":
+ """Create options from environment variables."""
+ source: Mapping[str, str] = os.environ if environ is None else environ
+
+ def _first_non_empty(*keys: str) -> str | None:
+ for key in keys:
+ raw = source.get(key)
+ if raw is None:
+ continue
+ normalized = raw.strip()
+ if normalized:
+ return normalized
+ return None
+
+ def _parse_positive_int(*keys: str) -> int | None:
+ raw = _first_non_empty(*keys)
+ if raw is None:
+ return None
+ try:
+ value = int(raw)
+ except ValueError as exc:
+ raise ValueError(f"{keys[0]} must be a positive integer") from exc
+ if value <= 0:
+ raise ValueError(f"{keys[0]} must be > 0")
+ return value
+
+ default_fetch_history_count = _parse_positive_int(
+ "DEFAULT_FETCH_HISTORY_ITEM_COUNT",
+ )
+
+ kwargs: dict[str, Any] = {}
+ if default_fetch_history_count is not None:
+ kwargs["default_fetch_history_count"] = default_fetch_history_count
+
+ return cls(**kwargs)
+
+ @property
+ def sse_keep_alive_enabled(self) -> bool:
+ """Return whether periodic SSE keep-alive comments are enabled."""
+ return self.sse_keep_alive_interval_seconds is not None
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_response_context.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_response_context.py
new file mode 100644
index 000000000000..10b9db0781b3
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_response_context.py
@@ -0,0 +1,240 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""ResponseContext for user-defined response execution."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from datetime import datetime, timezone
+from typing import TYPE_CHECKING, Any, Sequence
+
+from azure.ai.agentserver.responses.models._generated.sdk.models._types import InputParam
+
+from .models._generated import (
+ CreateResponse,
+ Item,
+ ItemMessage,
+ ItemReferenceParam,
+ MessageContentInputTextContent,
+ OutputItem,
+)
+from .models._helpers import get_input_expanded, to_item, to_output_item
+from .models.runtime import ResponseModeFlags
+
+if TYPE_CHECKING:
+ from .store._base import ResponseProviderProtocol
+
+
+@dataclass(frozen=True)
+class IsolationContext:
+ """Platform-injected isolation keys for multi-tenant state partitioning.
+
+ The Foundry hosting platform injects ``x-agent-user-isolation-key`` and
+ ``x-agent-chat-isolation-key`` headers on every protocol request (not on
+ health probes). These opaque strings serve as partition keys:
+
+ - ``user_key`` — unique per user across all sessions; use for user-private state.
+ - ``chat_key`` — represents where conversation state lives; in 1:1 chats the
+ two keys are equal.
+
+ When the headers are absent (e.g. local development), both keys are ``None``.
+ When the platform sends the header with an empty value, the key is an empty
+ string. Use ``is None`` to detect whether the header was present at all.
+ """
+
+ user_key: str | None = None
+ """Partition key for user-private state (from ``x-agent-user-isolation-key``).
+ ``None`` when the header was not sent."""
+
+ chat_key: str | None = None
+ """Partition key for conversation/shared state (from ``x-agent-chat-isolation-key``).
+ ``None`` when the header was not sent."""
+
+
+class ResponseContext:
+ """Runtime context exposed to response handlers and used by hosting orchestration.
+
+ - response identifier
+ - shutdown signal flag
+ - raw body access
+ - async input/history resolution
+ """
+
+ def __init__(
+ self,
+ *,
+ response_id: str,
+ mode_flags: ResponseModeFlags,
+ raw_body: dict[str, Any] | None = None,
+ request: CreateResponse | None = None,
+ created_at: datetime | None = None,
+ provider: "ResponseProviderProtocol | None" = None,
+ input_items: list[InputParam] | None = None,
+ previous_response_id: str | None = None,
+ conversation_id: str | None = None,
+ history_limit: int = 100,
+ client_headers: dict[str, str] | None = None,
+ query_parameters: dict[str, str] | None = None,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ self.response_id = response_id
+ self.mode_flags = mode_flags
+ self.raw_body = raw_body
+ self.request = request
+ self.created_at = created_at if created_at is not None else datetime.now(timezone.utc)
+ self.is_shutdown_requested: bool = False
+ self.client_headers: dict[str, str] = client_headers or {}
+ self.query_parameters: dict[str, str] = query_parameters or {}
+ self.isolation: IsolationContext = isolation if isolation is not None else IsolationContext()
+ self._provider: "ResponseProviderProtocol | None" = provider
+ self._input_items: list[InputParam] = list(input_items) if input_items is not None else []
+ self._previous_response_id: str | None = previous_response_id
+ self.conversation_id: str | None = conversation_id
+ self._history_limit: int = history_limit
+ self._input_items_resolved_cache: Sequence[Item] | None = None
+ self._input_items_unresolved_cache: Sequence[Item] | None = None
+ self._history_cache: Sequence[OutputItem] | None = None
+
+ async def get_input_items(self, *, resolve_references: bool = True) -> Sequence[Item]:
+ """Return the caller's input items as :class:`Item` subtypes.
+
+ Inline items are returned as-is — the same :class:`Item` subtypes from
+ the original request (e.g. :class:`ItemMessage`,
+ :class:`FunctionCallOutputItemParam`).
+ :class:`ItemReferenceParam` entries are batch-resolved via the
+ provider and converted back to :class:`Item` subtypes.
+ Unresolvable references (provider returns ``None``) are silently dropped.
+
+ :keyword resolve_references: When ``True`` (default),
+ :class:`ItemReferenceParam` items are resolved via the provider and
+ returned as their concrete :class:`Item` subtype. When ``False``,
+ item references are left as :class:`ItemReferenceParam` in the
+ returned sequence.
+ :type resolve_references: bool
+ :returns: A tuple of input items.
+ :rtype: Sequence[Item]
+ """
+ if resolve_references:
+ return await self._get_input_items_resolved()
+ return await self._get_input_items_unresolved()
+
+ async def get_input_text(self, *, resolve_references: bool = True) -> str:
+ """Resolve input items and extract all text content as a single string.
+
+ Convenience method that calls :meth:`get_input_items`, filters for
+ :class:`ItemMessage` items, expands their content, and joins all
+ :class:`MessageContentInputTextContent` text values with newline
+ separators.
+
+ :keyword resolve_references: When ``True`` (default), item references
+ are resolved before extracting text.
+ :type resolve_references: bool
+ :returns: The combined text content, or ``""`` if no text found.
+ :rtype: str
+ """
+ items = await self.get_input_items(resolve_references=resolve_references)
+ texts: list[str] = []
+ for item in items:
+ if isinstance(item, ItemMessage):
+ for part in getattr(item, "content", None) or []:
+ if isinstance(part, MessageContentInputTextContent):
+ text = getattr(part, "text", None)
+ if text is not None:
+ texts.append(text)
+ return "\n".join(texts)
+
+ async def _get_input_items_for_persistence(self) -> Sequence[OutputItem]:
+ """Return input items as :class:`OutputItem` for storage persistence.
+
+ The orchestrator needs :class:`OutputItem` instances when creating the
+ stored response. This method resolves references (so stored items are
+ always concrete), converts each :class:`Item` to :class:`OutputItem`,
+ and caches the result.
+
+ :returns: A tuple of output items suitable for persistence.
+ :rtype: Sequence[OutputItem]
+ """
+ items = await self.get_input_items(resolve_references=True)
+ return tuple(out for item in items if (out := to_output_item(item, self.response_id)) is not None)
+
+ # ------------------------------------------------------------------
+ # Private resolution helpers (cached independently per mode)
+ # ------------------------------------------------------------------
+
+ async def _get_input_items_resolved(self) -> Sequence[Item]:
+ """Resolve and cache input items with references resolved."""
+ if self._input_items_resolved_cache is not None:
+ return self._input_items_resolved_cache
+
+ expanded = self._expand_input()
+ if not expanded:
+ self._input_items_resolved_cache = ()
+ return self._input_items_resolved_cache
+
+ # Collect ItemReferenceParam positions and IDs for batch resolution.
+ reference_ids: list[str] = []
+ reference_positions: list[int] = []
+ results: list[Item | None] = []
+
+ for item in expanded:
+ if isinstance(item, ItemReferenceParam):
+ reference_ids.append(item.id)
+ reference_positions.append(len(results))
+ results.append(None) # placeholder
+ else:
+ results.append(item)
+
+ # Batch-resolve references if we have a provider and pending refs.
+ if reference_ids and self._provider is not None:
+ resolved = await self._provider.get_items(reference_ids, isolation=self.isolation)
+ for idx, pos in enumerate(reference_positions):
+ if idx < len(resolved) and resolved[idx] is not None:
+ converted = to_item(resolved[idx]) # type: ignore[arg-type]
+ if converted is not None:
+ results[pos] = converted
+
+ # Remove unresolved (None) placeholders.
+ self._input_items_resolved_cache = tuple(item for item in results if item is not None)
+ return self._input_items_resolved_cache
+
+ async def _get_input_items_unresolved(self) -> Sequence[Item]:
+ """Return input items without resolving references."""
+ if self._input_items_unresolved_cache is not None:
+ return self._input_items_unresolved_cache
+
+ expanded = self._expand_input()
+ self._input_items_unresolved_cache = tuple(expanded)
+ return self._input_items_unresolved_cache
+
+ def _expand_input(self) -> list[Item]:
+ """Normalize raw input into typed Item instances."""
+ if self.request is not None:
+ return get_input_expanded(self.request)
+ return list(self._input_items) # type: ignore[arg-type]
+
+ async def get_history(self) -> Sequence[OutputItem]:
+ """Resolve and cache conversation history items via the provider.
+
+ :returns: A tuple of conversation history items.
+ :rtype: Sequence[OutputItem]
+ """
+ if self._history_cache is not None:
+ return self._history_cache
+
+ if self._provider is None:
+ self._history_cache = ()
+ return self._history_cache
+
+ item_ids = await self._provider.get_history_item_ids(
+ self._previous_response_id,
+ self.conversation_id,
+ self._history_limit,
+ isolation=self.isolation,
+ )
+ if not item_ids:
+ self._history_cache = ()
+ return self._history_cache
+
+ items = await self._provider.get_items(item_ids, isolation=self.isolation)
+ self._history_cache = tuple(item for item in items if item is not None)
+ return self._history_cache
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_version.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_version.py
new file mode 100644
index 000000000000..cf584760eb91
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/_version.py
@@ -0,0 +1,7 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+VERSION = "1.0.0b1"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/__init__.py
new file mode 100644
index 000000000000..dc96a3c541e3
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""HTTP hosting, routing, and request orchestration for the Responses server."""
+
+from ._observability import (
+ CreateSpan,
+ CreateSpanHook,
+ InMemoryCreateSpanHook,
+ RecordedSpan,
+ build_create_span_tags,
+ build_platform_server_header,
+ start_create_span,
+)
+from ._routing import ResponsesAgentServerHost
+from ._validation import (
+ build_api_error_response,
+ build_invalid_mode_error_response,
+ build_not_found_error_response,
+ parse_and_validate_create_response,
+ parse_create_response,
+ to_api_error_response,
+ validate_create_response,
+)
+
+__all__ = [
+ "ResponsesAgentServerHost",
+ "CreateSpan",
+ "CreateSpanHook",
+ "InMemoryCreateSpanHook",
+ "RecordedSpan",
+ "build_api_error_response",
+ "build_create_span_tags",
+ "build_invalid_mode_error_response",
+ "build_not_found_error_response",
+ "build_platform_server_header",
+ "parse_and_validate_create_response",
+ "parse_create_response",
+ "start_create_span",
+ "to_api_error_response",
+ "validate_create_response",
+]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_endpoint_handler.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_endpoint_handler.py
new file mode 100644
index 000000000000..51d93363dca7
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_endpoint_handler.py
@@ -0,0 +1,1010 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""HTTP endpoint handler for the Responses server.
+
+This module owns all Starlette I/O: ``Request`` parsing, route-level
+validation, header propagation, and ``Response`` construction. Business
+logic lives in :class:`_ResponseOrchestrator`.
+"""
+
+from __future__ import annotations
+
+import asyncio # pylint: disable=do-not-import-asyncio
+import contextvars
+import logging
+import threading
+from typing import TYPE_CHECKING, Any
+
+from starlette.requests import Request
+from starlette.responses import JSONResponse, Response, StreamingResponse
+
+from azure.ai.agentserver.core import end_span, flush_spans, trace_stream
+from azure.ai.agentserver.responses.models._generated import AgentReference
+from azure.ai.agentserver.responses.models._generated.sdk.models.models._models import CreateResponse
+
+from .._options import ResponsesServerOptions
+from .._response_context import IsolationContext, ResponseContext
+from ..models._helpers import get_input_expanded, to_output_item
+from ..models.runtime import ResponseExecution, ResponseModeFlags, build_cancelled_response, build_failed_response
+from ..store._base import ResponseProviderProtocol, ResponseStreamProviderProtocol
+from ..streaming._helpers import EVENT_TYPE, _encode_sse
+from ..streaming._sse import encode_sse_any_event
+from ..streaming._state_machine import LifecycleStateMachineError, normalize_lifecycle_events
+from ._execution_context import _ExecutionContext
+from ._observability import (
+ CreateSpan,
+ _initial_create_span_tags,
+ build_create_otel_attrs,
+ build_create_span_tags,
+ extract_request_id,
+ start_create_span,
+)
+from ._orchestrator import _HandlerError, _refresh_background_status, _ResponseOrchestrator
+from ._request_parsing import (
+ _apply_item_cursors,
+ _extract_item_id,
+ _prevalidate_identity_payload,
+ _resolve_conversation_id,
+ _resolve_identity_fields,
+ _resolve_session_id,
+)
+from ._runtime_state import _RuntimeState
+from ._validation import (
+ deleted_response as _deleted_response,
+)
+from ._validation import (
+ error_response as _error_response,
+)
+from ._validation import (
+ invalid_mode_response as _invalid_mode,
+)
+from ._validation import (
+ invalid_request_response as _invalid_request,
+)
+from ._validation import (
+ not_found_response as _not_found,
+)
+from ._validation import parse_and_validate_create_response
+from ._validation import (
+ service_unavailable_response as _service_unavailable,
+)
+
+if TYPE_CHECKING:
+ from ._routing import ResponsesAgentServerHost
+
+from opentelemetry import baggage as _otel_baggage
+from opentelemetry import context as _otel_context
+
+from ..models.errors import RequestValidationError
+
+logger = logging.getLogger("azure.ai.agentserver")
+
+# OTel span attribute keys for error tagging (§7.2)
+_ATTR_ERROR_CODE = "azure.ai.agentserver.responses.error.code"
+_ATTR_ERROR_MESSAGE = "azure.ai.agentserver.responses.error.message"
+
+
+def _classify_error_code(exc: Exception) -> str:
+ """Return an error code string for an exception, matching API error classification."""
+ if isinstance(exc, RequestValidationError):
+ return exc.code
+ if isinstance(exc, ValueError):
+ return "invalid_request"
+ return "internal_error"
+
+
+def _extract_isolation(request: Request) -> IsolationContext:
+ """Build an ``IsolationContext`` from platform-injected request headers.
+
+ Returns the isolation keys from ``x-agent-user-isolation-key`` and
+ ``x-agent-chat-isolation-key``. Keys are ``None`` when the header
+ is absent (e.g. local development) and empty string when sent
+ with no value.
+ """
+ return IsolationContext(
+ user_key=request.headers.get("x-agent-user-isolation-key"),
+ chat_key=request.headers.get("x-agent-chat-isolation-key"),
+ )
+
+
+# Structured log scope context variables (spec §7.4)
+_response_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("ResponseId", default="")
+_conversation_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("ConversationId", default="")
+_streaming_var: contextvars.ContextVar[str] = contextvars.ContextVar("Streaming", default="")
+
+
+class _ResponseLogFilter(logging.Filter):
+ """Attach response-scope IDs to every log record from context vars.
+
+ Reads from ``contextvars`` rather than instance state, so a single
+ filter instance can be installed once on the logger (not per-request).
+ """
+
+ def filter(self, record: logging.LogRecord) -> bool:
+ record.response_id = _response_id_var.get("") # type: ignore[attr-defined]
+ record.conversation_id = _conversation_id_var.get("") # type: ignore[attr-defined]
+ record.streaming = _streaming_var.get("") # type: ignore[attr-defined]
+ return True
+
+
+# Install once on first request — no per-request add/remove needed.
+_log_filter_lock = threading.Lock()
+_log_filter_installed = False
+
+
+def _ensure_response_log_filter() -> None:
+ """Install the response log filter on first use (lazy, thread-safe)."""
+ global _log_filter_installed # pylint: disable=global-statement
+ if _log_filter_installed:
+ return
+ with _log_filter_lock:
+ if _log_filter_installed:
+ return
+ logger.addFilter(_ResponseLogFilter())
+ _log_filter_installed = True
+
+
+class _ResponseEndpointHandler: # pylint: disable=too-many-instance-attributes
+ """HTTP-layer handler for all Responses API endpoints.
+
+ Owns all Starlette ``Request``/``Response`` concerns. Delegates
+ event-pipeline logic to :class:`_ResponseOrchestrator`.
+
+ Mutable shutdown state (``_is_draining``, ``_shutdown_requested``) lives
+ here so every route method shares consistent drain/cancel semantics without
+ needing a ``nonlocal`` closure variable.
+ """
+
+ def __init__(
+ self,
+ *,
+ orchestrator: _ResponseOrchestrator,
+ runtime_state: _RuntimeState,
+ runtime_options: ResponsesServerOptions,
+ response_headers: dict[str, str],
+ sse_headers: dict[str, str],
+ host: "ResponsesAgentServerHost",
+ provider: ResponseProviderProtocol,
+ stream_provider: ResponseStreamProviderProtocol | None = None,
+ ) -> None:
+ """Initialise the endpoint handler.
+
+ :param orchestrator: Event-pipeline orchestrator.
+ :type orchestrator: _ResponseOrchestrator
+ :param runtime_state: In-memory execution record store.
+ :type runtime_state: _RuntimeState
+ :param runtime_options: Server runtime options.
+ :type runtime_options: ResponsesServerOptions
+ :param response_headers: Headers to include on all responses.
+ :type response_headers: dict[str, str]
+ :param sse_headers: SSE-specific headers (e.g. connection, cache-control).
+ :type sse_headers: dict[str, str]
+ :param host: The ``ResponsesAgentServerHost`` instance (provides ``request_span``).
+ :type host: ResponsesAgentServerHost
+ :param provider: Persistence provider for response envelopes and input items.
+ :type provider: ResponseProviderProtocol
+ :param stream_provider: Optional provider for SSE stream event persistence and replay.
+ :type stream_provider: ResponseStreamProviderProtocol | None
+ """
+ self._orchestrator = orchestrator
+ self._runtime_state = runtime_state
+ self._runtime_options = runtime_options
+ self._response_headers = response_headers
+ self._sse_headers = sse_headers
+ self._host = host
+ self._provider = provider
+ self._stream_provider = stream_provider
+ self._shutdown_requested: asyncio.Event = asyncio.Event()
+ self._is_draining: bool = False
+
+ # Validate the lifecycle event state machine on startup so
+ # misconfigured state machines surface immediately.
+ try:
+ normalize_lifecycle_events(
+ response_id="resp_validation",
+ events=[
+ {"type": EVENT_TYPE.RESPONSE_CREATED.value, "response": {"status": "in_progress"}},
+ {"type": EVENT_TYPE.RESPONSE_COMPLETED.value, "response": {"status": "completed"}},
+ ],
+ )
+ except LifecycleStateMachineError as exc:
+ raise RuntimeError(f"Invalid lifecycle event state machine configuration: {exc}") from exc
+
+ # ------------------------------------------------------------------
+ # Span attribute helper
+ # ------------------------------------------------------------------
+
+ @staticmethod
+ def _safe_set_attrs(span: Any, attrs: dict[str, str]) -> None:
+ """Safely set attributes on an OTel span.
+
+ :param span: The OTel span, or *None*.
+ :type span: Any
+ :param attrs: Key-value attributes to set.
+ :type attrs: dict[str, str]
+ """
+ if span is None:
+ return
+ try:
+ for key, value in attrs.items():
+ span.set_attribute(key, value)
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.debug("Failed to set span attributes: %s", list(attrs.keys()), exc_info=True)
+
+ # ------------------------------------------------------------------
+ # Streaming response helpers
+ # ------------------------------------------------------------------
+
+ async def _monitor_disconnect(self, request: Request, cancellation_signal: asyncio.Event) -> None:
+ """Poll for client disconnect and set cancellation signal.
+
+ Used for non-background streaming requests so that handler
+ cancellation is triggered when the client drops the connection
+ (spec requirement B17).
+
+ :param request: The Starlette request to monitor.
+ :type request: Request
+ :param cancellation_signal: Event to set when disconnect is detected.
+ :type cancellation_signal: asyncio.Event
+ """
+ while not cancellation_signal.is_set():
+ if await request.is_disconnected():
+ cancellation_signal.set()
+ return
+ await asyncio.sleep(0.5)
+
+ def _wrap_streaming_response(
+ self,
+ response: StreamingResponse,
+ otel_span: Any,
+ ) -> StreamingResponse:
+ """Wrap a streaming response's body iterator with span lifecycle.
+
+ ``trace_stream`` wraps the body iterator so the OTel span covers
+ the full streaming duration and is ended when iteration completes.
+
+ :param response: The ``StreamingResponse`` to wrap.
+ :type response: StreamingResponse
+ :param otel_span: The OTel span (or *None* when tracing is disabled).
+ :type otel_span: Any
+ :return: The same response object, with its body_iterator replaced.
+ :rtype: StreamingResponse
+ """
+ if otel_span is None:
+ return response
+ response.body_iterator = trace_stream(response.body_iterator, otel_span)
+ return response
+
+ # ------------------------------------------------------------------
+ # ResponseContext factory
+ # ------------------------------------------------------------------
+
+ def _build_execution_context(
+ self,
+ *,
+ payload: dict[str, Any],
+ parsed: CreateResponse,
+ response_id: str,
+ agent_reference: AgentReference | dict[str, Any],
+ agent_session_id: str | None = None,
+ span: CreateSpan,
+ request: Request,
+ ) -> _ExecutionContext:
+ """Build an :class:`_ExecutionContext` from the parsed request.
+
+ Extracts all protocol fields from *parsed* exactly once and
+ creates the cancellation signal. The companion
+ :class:`ResponseContext` is derived automatically so that both
+ objects share a single source of truth for mode flags, input
+ items, and conversation-threading fields.
+
+ :param payload: Raw JSON payload dict.
+ :type payload: dict[str, Any]
+ :param parsed: Validated :class:`CreateResponse` model.
+ :type parsed: CreateResponse
+ :param response_id: Assigned response identifier.
+ :type response_id: str
+ :param agent_reference: Normalised agent reference model or dictionary.
+ :type agent_reference: AgentReference | dict[str, Any]
+ :keyword agent_session_id: Resolved session ID (B39), or ``None``.
+ :keyword type agent_session_id: str | None
+ :param span: Active observability span for this request.
+ :type span: CreateSpan
+ :param request: Starlette HTTP request (for headers / query params).
+ :type request: Request
+ :return: A fully-populated :class:`_ExecutionContext` with its
+ ``context`` field already set.
+ :rtype: _ExecutionContext
+ """
+ stream = bool(getattr(parsed, "stream", False))
+ store = True if getattr(parsed, "store", None) is None else bool(parsed.store)
+ background = bool(getattr(parsed, "background", False))
+ model = getattr(parsed, "model", None)
+ _expanded = get_input_expanded(parsed)
+ input_items = [out for item in _expanded if (out := to_output_item(item, response_id)) is not None]
+ previous_response_id: str | None = (
+ parsed.previous_response_id
+ if isinstance(parsed.previous_response_id, str) and parsed.previous_response_id
+ else None
+ )
+ conversation_id = _resolve_conversation_id(parsed)
+
+ cancellation_signal = asyncio.Event()
+ if self._shutdown_requested.is_set():
+ cancellation_signal.set()
+
+ ctx = _ExecutionContext(
+ response_id=response_id,
+ agent_reference=agent_reference,
+ model=model,
+ store=store,
+ background=background,
+ stream=stream,
+ input_items=input_items,
+ previous_response_id=previous_response_id,
+ conversation_id=conversation_id,
+ cancellation_signal=cancellation_signal,
+ agent_session_id=agent_session_id,
+ span=span,
+ parsed=parsed,
+ user_isolation_key=request.headers.get("x-agent-user-isolation-key"),
+ chat_isolation_key=request.headers.get("x-agent-chat-isolation-key"),
+ )
+
+ # Derive the public ResponseContext from the execution context.
+ ctx.context = self._create_response_context(ctx, raw_body=payload, request=request)
+ return ctx
+
+ def _create_response_context(
+ self,
+ ctx: _ExecutionContext,
+ *,
+ raw_body: dict[str, Any],
+ request: Request,
+ ) -> ResponseContext:
+ """Derive a :class:`ResponseContext` from an :class:`_ExecutionContext`.
+
+ All protocol fields (mode flags, input items, conversation
+ threading) are read from *ctx* so that values are extracted from
+ the parsed request exactly once.
+
+ :param ctx: The execution context that owns the protocol fields.
+ :param raw_body: The raw JSON payload dict.
+ :param request: The Starlette HTTP request.
+ :return: A fully-populated :class:`ResponseContext`.
+ """
+ mode_flags = ResponseModeFlags(stream=ctx.stream, store=ctx.store, background=ctx.background)
+ client_headers = {k: v for k, v in request.headers.items() if k.lower().startswith("x-client-")}
+
+ context = ResponseContext(
+ response_id=ctx.response_id,
+ mode_flags=mode_flags,
+ raw_body=raw_body,
+ request=ctx.parsed,
+ provider=self._provider,
+ input_items=ctx.input_items,
+ previous_response_id=ctx.previous_response_id,
+ conversation_id=ctx.conversation_id,
+ history_limit=self._runtime_options.default_fetch_history_count,
+ client_headers=client_headers,
+ query_parameters=dict(request.query_params),
+ isolation=IsolationContext(
+ user_key=ctx.user_isolation_key,
+ chat_key=ctx.chat_isolation_key,
+ ),
+ )
+ context.is_shutdown_requested = self._shutdown_requested.is_set()
+ return context
+
+ # ------------------------------------------------------------------
+ # Route handlers
+ # ------------------------------------------------------------------
+
+ async def handle_create(self, request: Request) -> Response: # pylint: disable=too-many-return-statements
+ """Route handler for ``POST /responses``.
+
+ Parses and validates the create request, builds an
+ :class:`_ExecutionContext`, then dispatches to the appropriate
+ orchestrator method (stream / sync / background).
+
+ :param request: Incoming Starlette request.
+ :type request: Request
+ :return: HTTP response for the create operation.
+ :rtype: Response
+ """
+ if self._is_draining:
+ return _service_unavailable("Server is shutting down.", {})
+
+ # Also maintain CreateSpanHook for backward compat (tests etc.)
+ span = start_create_span(
+ "create_response",
+ _initial_create_span_tags(),
+ hook=self._runtime_options.create_span_hook,
+ )
+ captured_error: Exception | None = None
+
+ try:
+ payload = await request.json()
+ _prevalidate_identity_payload(payload)
+ parsed = parse_and_validate_create_response(payload, options=self._runtime_options)
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logger.error("Failed to parse/validate create request", exc_info=exc)
+ captured_error = exc
+ span.end(captured_error)
+ return _error_response(exc, {})
+
+ try:
+ response_id, agent_reference = _resolve_identity_fields(
+ parsed,
+ request_headers=request.headers,
+ )
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logger.error("Failed to resolve identity fields", exc_info=exc)
+ captured_error = exc
+ span.end(captured_error)
+ return _error_response(exc, {})
+
+ # B39: Resolve session ID
+ config_session_id = getattr(getattr(self._host, "config", None), "session_id", "") or ""
+ agent_session_id = _resolve_session_id(parsed, payload, env_session_id=config_session_id)
+
+ ctx = self._build_execution_context(
+ payload=payload,
+ parsed=parsed,
+ response_id=response_id,
+ agent_reference=agent_reference,
+ agent_session_id=agent_session_id,
+ span=span,
+ request=request,
+ )
+
+ # Extract X-Request-Id header for request ID propagation (truncated to 256 chars).
+ request_id = extract_request_id(request.headers)
+ _project_id = getattr(getattr(self._host, "config", None), "project_id", "") or ""
+
+ span.set_tags(build_create_span_tags(ctx, request_id=request_id, project_id=_project_id))
+
+ # Start OTel request span using host's request_span context manager.
+ with self._host.request_span(
+ request.headers,
+ response_id,
+ "invoke_agent",
+ operation_name="invoke_agent",
+ session_id=agent_session_id or "",
+ end_on_exit=False,
+ ) as otel_span:
+ self._safe_set_attrs(otel_span, build_create_otel_attrs(ctx, request_id=request_id, project_id=_project_id))
+
+ # Set W3C baggage per spec §7.3
+ bag_ctx = _otel_context.get_current()
+ bag_ctx = _otel_baggage.set_baggage("azure.ai.agentserver.response_id", response_id, context=bag_ctx)
+ bag_ctx = _otel_baggage.set_baggage(
+ "azure.ai.agentserver.conversation_id", ctx.conversation_id or "", context=bag_ctx
+ )
+ bag_ctx = _otel_baggage.set_baggage("azure.ai.agentserver.streaming", str(ctx.stream), context=bag_ctx)
+ if request_id:
+ bag_ctx = _otel_baggage.set_baggage("azure.ai.agentserver.x-request-id", request_id, context=bag_ctx)
+ baggage_token = _otel_context.attach(bag_ctx)
+
+ # Set structured log scope per spec §7.4
+ _ensure_response_log_filter()
+ rid_token = _response_id_var.set(response_id)
+ cid_token = _conversation_id_var.set(ctx.conversation_id or "")
+ str_token = _streaming_var.set(str(ctx.stream).lower())
+
+ disconnect_task: asyncio.Task[None] | None = None
+ try:
+ if ctx.stream:
+ body_iter = self._orchestrator.run_stream(ctx)
+
+ # B17: monitor client disconnect for non-background streams
+ if not ctx.background:
+ disconnect_task = asyncio.create_task(
+ self._monitor_disconnect(request, ctx.cancellation_signal)
+ )
+ raw_iter = body_iter
+
+ async def _iter_with_cleanup(): # type: ignore[return]
+ try:
+ async for chunk in raw_iter:
+ yield chunk
+ finally:
+ if disconnect_task and not disconnect_task.done():
+ disconnect_task.cancel()
+
+ body_iter = _iter_with_cleanup()
+
+ sse_response = StreamingResponse(
+ body_iter,
+ media_type="text/event-stream",
+ headers=self._sse_headers,
+ )
+ wrapped = self._wrap_streaming_response(sse_response, otel_span)
+ return wrapped
+
+ if not ctx.background:
+ disconnect_task = asyncio.create_task(self._monitor_disconnect(request, ctx.cancellation_signal))
+ try:
+ snapshot = await self._orchestrator.run_sync(ctx)
+ end_span(otel_span)
+ return JSONResponse(snapshot, status_code=200)
+ except _HandlerError as exc:
+ logger.error(
+ "Handler error in sync create (response_id=%s)",
+ ctx.response_id,
+ exc_info=exc.original,
+ )
+ self._safe_set_attrs(
+ otel_span,
+ {
+ _ATTR_ERROR_CODE: _classify_error_code(exc.original),
+ _ATTR_ERROR_MESSAGE: str(exc.original),
+ },
+ )
+ end_span(otel_span, exc=exc.original)
+ # Handler errors are server-side faults, not client errors
+ err_body = {
+ "error": {
+ "message": "internal server error",
+ "type": "server_error",
+ "code": "internal_error",
+ "param": None,
+ }
+ }
+ return JSONResponse(err_body, status_code=500)
+ finally:
+ disconnect_task.cancel()
+
+ snapshot = await self._orchestrator.run_background(ctx)
+ end_span(otel_span)
+ return JSONResponse(snapshot, status_code=200, headers=self._response_headers)
+ except _HandlerError as exc:
+ logger.error("Handler error in create (response_id=%s)", ctx.response_id, exc_info=exc.original)
+ self._safe_set_attrs(
+ otel_span,
+ {
+ _ATTR_ERROR_CODE: _classify_error_code(exc.original),
+ _ATTR_ERROR_MESSAGE: str(exc.original),
+ },
+ )
+ end_span(otel_span, exc=exc)
+ # Handler errors are server-side faults, not client errors
+ err_body = {
+ "error": {
+ "message": "internal server error",
+ "type": "server_error",
+ "code": "internal_error",
+ "param": None,
+ }
+ }
+ return JSONResponse(
+ err_body,
+ status_code=500,
+ headers=self._response_headers,
+ )
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logger.error("Unexpected error in create (response_id=%s)", ctx.response_id, exc_info=exc)
+ self._safe_set_attrs(
+ otel_span,
+ {
+ _ATTR_ERROR_CODE: _classify_error_code(exc),
+ _ATTR_ERROR_MESSAGE: str(exc),
+ },
+ )
+ end_span(otel_span, exc=exc)
+ raise
+ finally:
+ _response_id_var.reset(rid_token)
+ _conversation_id_var.reset(cid_token)
+ _streaming_var.reset(str_token)
+ # Flush pending spans before the response is sent.
+ # BatchSpanProcessor exports on a timer; in hosted sandboxes
+ # the platform may freeze the process after the HTTP response,
+ # losing any buffered spans (e.g. LangGraph per-node spans).
+ flush_spans()
+ try:
+ _otel_context.detach(baggage_token)
+ except ValueError:
+ pass
+
+ async def handle_get(self, request: Request) -> Response: # pylint: disable=too-many-return-statements
+ """Route handler for ``GET /responses/{response_id}``.
+
+ Returns the response snapshot or replays SSE events if
+ ``stream=true`` is in the query parameters.
+
+ :param request: Incoming Starlette request.
+ :type request: Request
+ :return: JSON snapshot or SSE replay streaming response.
+ :rtype: Response
+ """
+ response_id = request.path_params["response_id"]
+ record = await self._runtime_state.get(response_id)
+ if record is None:
+ if await self._runtime_state.is_deleted(response_id):
+ return _deleted_response(response_id, {})
+
+ _isolation = _extract_isolation(request)
+ stream_replay = request.query_params.get("stream", "false").lower() == "true"
+ if not stream_replay:
+ # Provider fallback: serve completed responses that are no longer in runtime state
+ # (e.g., after a process restart).
+ try:
+ response_obj = await self._provider.get_response(response_id, isolation=_isolation)
+ snapshot = response_obj.as_dict()
+ return JSONResponse(snapshot, status_code=200)
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning("Provider fallback failed for GET response_id=%s", response_id, exc_info=True)
+ else:
+ # Stream provider fallback: replay persisted SSE events when runtime state is gone.
+ replay_response = await self._try_replay_persisted_stream(request, response_id, isolation=_isolation)
+ if replay_response is not None:
+ return replay_response
+
+ # Response may exist in storage but wasn't replay-eligible
+ # (e.g., created without background=true, stream=true, store=true).
+ try:
+ await self._provider.get_response(response_id, isolation=_isolation)
+ return _invalid_mode(
+ "stream replay is not available for this response; to enable SSE replay, "
+ + "create the response with background=true, stream=true, and store=true",
+ {},
+ param="stream",
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ pass # Response doesn't exist in provider either — fall through to 404
+
+ return _not_found(response_id, {})
+
+ _refresh_background_status(record)
+
+ stream_replay = request.query_params.get("stream", "false").lower() == "true"
+ if stream_replay:
+ # B14: store=false responses are never persisted — return 404.
+ if not record.mode_flags.store:
+ return _not_found(response_id, {})
+ if not record.replay_enabled:
+ return _invalid_mode(
+ "stream replay is not available for this response; to enable SSE replay, "
+ + "create the response with background=true, stream=true, and store=true",
+ {},
+ param="stream",
+ )
+
+ parsed_cursor = self._parse_starting_after(request)
+ if isinstance(parsed_cursor, Response):
+ return parsed_cursor
+
+ return self._build_live_stream_response(record, parsed_cursor)
+
+ if not record.visible_via_get:
+ return _not_found(response_id, {})
+
+ return JSONResponse(_RuntimeState.to_snapshot(record), status_code=200, headers=self._response_headers)
+
+ @staticmethod
+ def _parse_starting_after(request: Request) -> int | Response:
+ """Parse the ``starting_after`` query parameter.
+
+ Returns the integer cursor value (defaulting to ``-1``) or an
+ error :class:`Response` when the value is not a valid integer.
+ """
+ cursor_raw = request.query_params.get("starting_after")
+ if cursor_raw is None:
+ return -1
+ try:
+ return int(cursor_raw)
+ except ValueError:
+ return _invalid_request(
+ "starting_after must be an integer",
+ {},
+ param="starting_after",
+ )
+
+ def _build_live_stream_response(self, record: ResponseExecution, starting_after: int) -> StreamingResponse:
+ """Build a live SSE subscription response for an in-flight record."""
+ _cursor = starting_after
+
+ async def _stream_from_subject():
+ async for event in record.subject.subscribe(cursor=_cursor): # type: ignore[union-attr]
+ yield encode_sse_any_event(event)
+
+ return StreamingResponse(_stream_from_subject(), media_type="text/event-stream", headers=self._sse_headers)
+
+ async def _try_replay_persisted_stream(
+ self, request: Request, response_id: str, *, isolation: IsolationContext | None = None
+ ) -> Response | None:
+ """Try to replay persisted SSE events from the stream provider.
+
+ Returns a ``StreamingResponse`` if replay events are available,
+ an error ``Response`` for invalid query parameters, or ``None``
+ when no replay data exists.
+ """
+ if self._stream_provider is None:
+ return None
+ try:
+ replay_events = await self._stream_provider.get_stream_events(response_id, isolation=isolation)
+ if replay_events is None:
+ return None
+ parsed_cursor = self._parse_starting_after(request)
+ if isinstance(parsed_cursor, Response):
+ return parsed_cursor
+ filtered = [e for e in replay_events if e["sequence_number"] > parsed_cursor]
+ return StreamingResponse(
+ _encode_sse(filtered),
+ media_type="text/event-stream",
+ headers=self._sse_headers,
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning("Failed to replay persisted stream for response_id=%s", response_id, exc_info=True)
+ return None
+
+ async def handle_delete(self, request: Request) -> Response:
+ """Route handler for ``DELETE /responses/{response_id}``.
+
+ :param request: Incoming Starlette request.
+ :type request: Request
+ :return: Deletion confirmation or error response.
+ :rtype: Response
+ """
+ response_id = request.path_params["response_id"]
+ record = await self._runtime_state.get(response_id)
+ if record is None:
+ return _not_found(response_id, {})
+
+ # store=false responses are not deletable (FR-014)
+ if not record.mode_flags.store:
+ return _not_found(response_id, {})
+
+ _refresh_background_status(record)
+
+ if record.mode_flags.background and record.status in {"queued", "in_progress"}:
+ return _invalid_request(
+ "Cannot delete an in-flight response.",
+ {},
+ param="response_id",
+ )
+
+ deleted = await self._runtime_state.delete(response_id)
+ if not deleted:
+ return _not_found(response_id, {})
+
+ if record.mode_flags.store:
+ try:
+ await self._provider.delete_response(response_id, isolation=_extract_isolation(request))
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning("Best-effort provider delete failed for response_id=%s", response_id, exc_info=True)
+ # Clean up persisted stream events
+ if self._stream_provider is not None:
+ try:
+ await self._stream_provider.delete_stream_events(
+ response_id,
+ isolation=_extract_isolation(request),
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.debug(
+ "Best-effort stream event delete failed for response_id=%s",
+ response_id,
+ exc_info=True,
+ )
+
+ return JSONResponse(
+ {"id": response_id, "object": "response.deleted", "deleted": True},
+ status_code=200,
+ )
+
+ async def handle_cancel(self, request: Request) -> Response: # pylint: disable=too-many-return-statements
+ """Route handler for ``POST /responses/{response_id}/cancel``.
+
+ :param request: Incoming Starlette request.
+ :type request: Request
+ :return: Cancelled snapshot or error response.
+ :rtype: Response
+ """
+ response_id = request.path_params["response_id"]
+ record = await self._runtime_state.get(response_id)
+ if record is None:
+ # Provider fallback: after a restart, stored terminal responses lose
+ # their runtime records. Check the provider so we return the correct
+ # 400 error instead of a misleading 404.
+ try:
+ response_obj = await self._provider.get_response(response_id, isolation=_extract_isolation(request))
+ stored_status = response_obj.as_dict().get("status")
+ if stored_status == "completed":
+ return _invalid_request(
+ "Cannot cancel a completed response.",
+ {},
+ param="response_id",
+ )
+ if stored_status == "failed":
+ return _invalid_request(
+ "Cannot cancel a failed response.",
+ {},
+ param="response_id",
+ )
+ if stored_status == "cancelled":
+ return _invalid_request(
+ "Cannot cancel an already cancelled response.",
+ {},
+ param="response_id",
+ )
+ if stored_status == "incomplete":
+ return _invalid_request(
+ "Cannot cancel an incomplete response.",
+ {},
+ param="response_id",
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.debug(
+ "Provider fallback failed for cancel response_id=%s",
+ response_id,
+ exc_info=True,
+ )
+ return _not_found(response_id, {})
+
+ _refresh_background_status(record)
+
+ if not record.mode_flags.background:
+ return _invalid_request(
+ "Cannot cancel a synchronous response.",
+ {},
+ param="response_id",
+ )
+
+ if record.status == "cancelled":
+ # Idempotent: ensure the response snapshot reflects cancelled state
+ record.set_response_snapshot(
+ build_cancelled_response(record.response_id, record.agent_reference, record.model)
+ )
+ return JSONResponse(_RuntimeState.to_snapshot(record), status_code=200, headers=self._response_headers)
+
+ if record.status == "completed":
+ return _invalid_request(
+ "Cannot cancel a completed response.",
+ {},
+ param="response_id",
+ )
+
+ if record.status == "failed":
+ return _invalid_request(
+ "Cannot cancel a failed response.",
+ {},
+ param="response_id",
+ )
+
+ if record.status == "incomplete":
+ return _invalid_request(
+ "Cannot cancel an incomplete response.",
+ {},
+ param="response_id",
+ )
+
+ # B11: initiate cancellation winddown
+ record.cancel_requested = True
+ record.cancel_signal.set()
+
+ # Wait for handler to complete with grace period (B11: up to 10 seconds).
+ # Wait for handler task to finish (up to 10s grace period).
+ if record.execution_task is not None:
+ try:
+ await asyncio.wait_for(asyncio.shield(record.execution_task), timeout=10.0)
+ except (asyncio.TimeoutError, asyncio.CancelledError, Exception): # pylint: disable=broad-exception-caught
+ pass # Handler may throw or timeout — already handled by the task itself
+
+ # Set cancelled snapshot and transition
+ record.set_response_snapshot(build_cancelled_response(record.response_id, record.agent_reference, record.model))
+ record.transition_to("cancelled")
+
+ # Persist cancelled state to durable store (B11: cancellation always wins)
+ try:
+ if record.response is not None:
+ await self._provider.update_response(record.response, isolation=_extract_isolation(request))
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.debug("Best-effort cancel persist failed for response_id=%s", record.response_id, exc_info=True)
+
+ return JSONResponse(_RuntimeState.to_snapshot(record), status_code=200, headers=self._response_headers)
+
+ async def handle_input_items(self, request: Request) -> Response:
+ """Route handler for ``GET /responses/{response_id}/input_items``.
+
+ Returns a paginated list of input items for the given response.
+
+ :param request: Incoming Starlette request.
+ :type request: Request
+ :return: Paginated input items list.
+ :rtype: Response
+ """
+ response_id = request.path_params["response_id"]
+
+ limit_raw = request.query_params.get("limit", "20")
+ try:
+ limit = int(limit_raw)
+ except ValueError:
+ return _invalid_request("limit must be an integer between 1 and 100", {}, param="limit")
+
+ if limit < 1 or limit > 100:
+ return _invalid_request("limit must be between 1 and 100", {}, param="limit")
+
+ order = request.query_params.get("order", "desc").lower()
+ if order not in {"asc", "desc"}:
+ return _invalid_request("order must be 'asc' or 'desc'", {}, param="order")
+
+ after = request.query_params.get("after")
+ before = request.query_params.get("before")
+
+ try:
+ items = await self._provider.get_input_items(
+ response_id, limit=100, ascending=True, isolation=_extract_isolation(request)
+ )
+ except ValueError:
+ return _deleted_response(response_id, {})
+ except KeyError:
+ # Fall back to runtime_state for in-flight responses not yet persisted to provider
+ try:
+ items = await self._runtime_state.get_input_items(response_id)
+ except ValueError:
+ return _deleted_response(response_id, {})
+ except KeyError:
+ return _not_found(response_id, {})
+
+ ordered_items = items if order == "asc" else list(reversed(items))
+ scoped_items = _apply_item_cursors(ordered_items, after=after, before=before)
+
+ page = scoped_items[:limit]
+ has_more = len(scoped_items) > limit
+
+ first_id = _extract_item_id(page[0]) if page else None
+ last_id = _extract_item_id(page[-1]) if page else None
+
+ page_data = [item.as_dict() if hasattr(item, "as_dict") else item for item in page]
+
+ return JSONResponse(
+ {
+ "object": "list",
+ "data": page_data,
+ "first_id": first_id,
+ "last_id": last_id,
+ "has_more": has_more,
+ },
+ status_code=200,
+ )
+
+ async def handle_shutdown(self) -> None:
+ """Graceful shutdown handler.
+
+ Signals all active responses to cancel and waits for in-flight
+ background executions to complete within the configured grace period.
+
+ :return: None
+ :rtype: None
+ """
+ self._is_draining = True
+ self._shutdown_requested.set()
+
+ records = await self._runtime_state.list_records()
+ for record in records:
+ if record.response_context is not None:
+ record.response_context.is_shutdown_requested = True
+
+ record.cancel_signal.set()
+
+ if record.mode_flags.background and record.status in {"queued", "in_progress"}:
+ record.set_response_snapshot(
+ build_failed_response(record.response_id, record.agent_reference, record.model)
+ )
+ record.transition_to("failed")
+
+ deadline = asyncio.get_running_loop().time() + float(self._runtime_options.shutdown_grace_period_seconds)
+ while True:
+ pending = [
+ record
+ for record in records
+ if record.mode_flags.background
+ and record.execution_task is not None
+ and record.status in {"queued", "in_progress"}
+ ]
+ if not pending:
+ break
+ if asyncio.get_running_loop().time() >= deadline:
+ break
+ await asyncio.sleep(0.05)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_event_subject.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_event_subject.py
new file mode 100644
index 000000000000..fda631d76686
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_event_subject.py
@@ -0,0 +1,94 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Seekable replay subject for in-process SSE event broadcasting."""
+
+from __future__ import annotations
+
+import asyncio # pylint: disable=do-not-import-asyncio
+from typing import AsyncIterator
+
+from ..models._generated import ResponseStreamEvent
+
+
+class _ResponseEventSubject:
+ """In-process hot observable with replay buffer for SSE event broadcasting.
+
+ Implements a seekable replay subject pattern.
+ Multiple concurrent subscribers can join at any time and receive:
+
+ - All buffered events emitted since creation (or from a cursor).
+ - Subsequent live events as they are published in real time.
+ - A completion signal when the stream ends.
+
+ This enables live SSE replay behaviour for
+ ``GET /responses/{id}?stream=true`` while a background+stream response is
+ still in flight.
+ """
+
+ _DONE: object = object() # sentinel that signals stream completion
+
+ def __init__(self) -> None:
+ """Initialise the subject with an empty event buffer and no subscribers."""
+ self._events: list[ResponseStreamEvent] = []
+ self._subscribers: list[asyncio.Queue[ResponseStreamEvent | object]] = []
+ self._done: bool = False
+ self._lock: asyncio.Lock = asyncio.Lock()
+
+ async def publish(self, event: ResponseStreamEvent) -> None:
+ """Push a new event to all current subscribers and append it to the replay buffer.
+
+ :param event: The normalised event (``ResponseStreamEvent`` model instance).
+ :type event: ResponseStreamEvent
+ """
+ async with self._lock:
+ self._events.append(event)
+ for q in self._subscribers:
+ q.put_nowait(event)
+
+ async def complete(self) -> None:
+ """Signal stream completion to all current and future subscribers.
+
+ After calling this, new :meth:`subscribe` calls will still deliver the full
+ buffered event history and then exit immediately.
+ """
+ async with self._lock:
+ self._done = True
+ for q in self._subscribers:
+ q.put_nowait(self._DONE)
+
+ async def subscribe(self, cursor: int = -1) -> AsyncIterator[ResponseStreamEvent]:
+ """Subscribe to events, yielding buffered history then live events.
+
+ :param cursor: Sequence-number cursor. Only events whose
+ ``sequence_number`` is strictly greater than *cursor* are
+ yielded. Pass ``-1`` (default) to receive all events.
+ :type cursor: int
+ :returns: An async iterator of event instances.
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ q: asyncio.Queue[ResponseStreamEvent | object] = asyncio.Queue()
+ async with self._lock:
+ # Replay all buffered events that are after the cursor
+ for event in self._events:
+ if event["sequence_number"] > cursor:
+ q.put_nowait(event)
+ if self._done:
+ # Stream already completed — put sentinel so iterator exits after replay
+ q.put_nowait(self._DONE)
+ else:
+ # Register for live events
+ self._subscribers.append(q)
+
+ try:
+ while True:
+ item = await q.get()
+ if item is self._DONE:
+ return
+ yield item
+ finally:
+ # Clean up subscription on client disconnect or normal completion
+ async with self._lock:
+ try:
+ self._subscribers.remove(q)
+ except ValueError:
+ pass # already removed (e.g. complete() ran concurrently)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_execution_context.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_execution_context.py
new file mode 100644
index 000000000000..f65eddb5c658
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_execution_context.py
@@ -0,0 +1,41 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Per-request execution context for the Responses server."""
+
+from __future__ import annotations
+
+import asyncio # pylint: disable=do-not-import-asyncio
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Any
+
+from .._response_context import ResponseContext
+from ..models._generated import AgentReference, CreateResponse, OutputItem
+
+if TYPE_CHECKING:
+ from ._observability import CreateSpan
+
+
+@dataclass(slots=True)
+class _ExecutionContext:
+ """Holds all per-request state for a single create-response call.
+
+ Passed between the routing layer and the orchestrator. All fields
+ except ``context`` are set once at construction.
+ """
+
+ response_id: str
+ agent_reference: AgentReference | dict[str, Any]
+ model: str | None
+ store: bool
+ background: bool
+ stream: bool
+ input_items: list[OutputItem]
+ previous_response_id: str | None
+ conversation_id: str | None
+ cancellation_signal: asyncio.Event
+ span: CreateSpan
+ parsed: CreateResponse
+ agent_session_id: str | None = None
+ context: ResponseContext | None = None
+ user_isolation_key: str | None = None
+ chat_isolation_key: str | None = None
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_observability.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_observability.py
new file mode 100644
index 000000000000..f984c62dc1f5
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_observability.py
@@ -0,0 +1,327 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Observability and server version header helpers."""
+
+from __future__ import annotations
+
+from collections.abc import Mapping, MutableMapping
+from dataclasses import dataclass
+from datetime import datetime, timezone
+from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
+
+if TYPE_CHECKING:
+ from ._execution_context import _ExecutionContext
+
+
+def build_platform_server_header(sdk_name: str, version: str, runtime: str, extra: str | None = None) -> str:
+ """Build the platform server user-agent header value.
+
+ :param sdk_name: SDK package name.
+ :type sdk_name: str
+ :param version: SDK package version.
+ :type version: str
+ :param runtime: Runtime marker, such as python/3.10.
+ :type runtime: str
+ :param extra: Optional additional user-agent suffix.
+ :type extra: str | None
+ :returns: Formatted user-agent header value.
+ :rtype: str
+ """
+ base_value = f"{sdk_name}/{version} ({runtime})"
+ return f"{base_value} {extra}".strip() if extra else base_value
+
+
+@runtime_checkable
+class CreateSpanHook(Protocol):
+ """Hook contract for one-root-span-per-create observability."""
+
+ def on_span_start(self, name: str, tags: dict[str, Any]) -> None:
+ """Called when a create span starts.
+
+ :param name: Span name.
+ :type name: str
+ :param tags: Initial span tags.
+ :type tags: dict[str, Any]
+ :return: None
+ :rtype: None
+ """
+
+ def on_span_end(self, name: str, tags: dict[str, Any], error: Exception | None) -> None:
+ """Called when a create span ends.
+
+ :param name: Span name.
+ :type name: str
+ :param tags: Final span tags.
+ :type tags: dict[str, Any]
+ :param error: The exception if the span ended with an error, or ``None``.
+ :type error: Exception | None
+ :return: None
+ :rtype: None
+ """
+
+
+class CreateSpan:
+ """Mutable create-span helper used by hosting orchestration."""
+
+ def __init__(
+ self,
+ *,
+ name: str,
+ tags: dict[str, Any],
+ _hook: CreateSpanHook | None = None,
+ _ended: bool = False,
+ ) -> None:
+ self.name = name
+ self.tags = tags
+ self._hook = _hook
+ self._ended = _ended
+
+ def set_tag(self, key: str, value: Any) -> None:
+ """Set or overwrite one span tag.
+
+ :param key: Tag key.
+ :type key: str
+ :param value: Tag value.
+ :type value: Any
+ :return: None
+ :rtype: None
+ """
+ self.tags[key] = value
+
+ def set_tags(self, values: dict[str, Any]) -> None:
+ """Merge a set of tags into this span.
+
+ :param values: Dictionary of tags to merge.
+ :type values: dict[str, Any]
+ :return: None
+ :rtype: None
+ """
+ self.tags.update(values)
+
+ def end(self, error: Exception | None = None) -> None:
+ """Complete the span exactly once.
+
+ Subsequent calls are no-ops.
+
+ :param error: The exception if the span ended with an error, or ``None``.
+ :type error: Exception | None
+ :return: None
+ :rtype: None
+ """
+ if self._ended:
+ return
+
+ self._ended = True
+ if self._hook is None:
+ return
+ self._hook.on_span_end(self.name, dict(self.tags), error)
+
+
+def start_create_span(name: str, tags: dict[str, Any], hook: CreateSpanHook | None = None) -> CreateSpan:
+ """Start a create span and notify hook subscribers.
+
+ :param name: Span name.
+ :type name: str
+ :param tags: Initial span tags.
+ :type tags: dict[str, Any]
+ :param hook: Optional hook to receive span lifecycle events.
+ :type hook: CreateSpanHook | None
+ :return: The started ``CreateSpan`` instance.
+ :rtype: CreateSpan
+ """
+ span = CreateSpan(name=name, tags=dict(tags), _hook=hook)
+ if hook is not None:
+ hook.on_span_start(name, dict(span.tags))
+ return span
+
+
+def build_create_span_tags(
+ ctx: "_ExecutionContext",
+ *,
+ request_id: str | None = None,
+ project_id: str = "",
+) -> dict[str, Any]:
+ """Build a GenAI tag set for create spans from an execution context.
+
+ :param ctx: Current execution context.
+ :type ctx: _ExecutionContext
+ :keyword request_id: Truncated ``X-Request-Id`` value, or ``None``.
+ :keyword type request_id: str | None
+ :keyword project_id: Foundry project ARM resource ID.
+ :keyword type project_id: str
+ :return: Dictionary of OpenTelemetry-style GenAI span tags.
+ :rtype: dict[str, Any]
+ """
+ agent_name, agent_version, agent_id = _resolve_agent_fields(ctx.agent_reference)
+ tags: dict[str, Any] = {
+ "service.name": _SERVICE_NAME,
+ "gen_ai.provider.name": _PROVIDER_NAME,
+ "gen_ai.system": "responses",
+ "gen_ai.operation.name": "invoke_agent",
+ "gen_ai.response.id": ctx.response_id,
+ "gen_ai.request.model": ctx.model,
+ "gen_ai.agent.name": agent_name,
+ "gen_ai.agent.id": agent_id if agent_id is not None else "",
+ # Namespaced tags per spec §7.2
+ "azure.ai.agentserver.responses.response_id": ctx.response_id,
+ "azure.ai.agentserver.responses.conversation_id": ctx.conversation_id or "",
+ "azure.ai.agentserver.responses.streaming": ctx.stream,
+ }
+ if project_id:
+ tags["microsoft.foundry.project.id"] = project_id
+ if agent_version is not None:
+ tags["gen_ai.agent.version"] = agent_version
+ if ctx.conversation_id is not None:
+ tags["gen_ai.conversation.id"] = ctx.conversation_id
+ if request_id is not None:
+ tags["request.id"] = request_id
+ return tags
+
+
+_SERVICE_NAME = "azure.ai.agentserver"
+_PROVIDER_NAME = "AzureAI Hosted Agents"
+_MAX_REQUEST_ID_LEN = 256
+
+
+def _initial_create_span_tags() -> dict[str, Any]:
+ """Placeholder tags for the initial create span (before request context is known).
+
+ Used to initialise :class:`CreateSpan` before :class:`_ExecutionContext` is
+ available. The real tags are written via :func:`build_create_span_tags`
+ once the execution context has been constructed.
+
+ :return: Minimal tag dict with fixed provider identifiers.
+ :rtype: dict[str, Any]
+ """
+ return {
+ "service.name": _SERVICE_NAME,
+ "gen_ai.provider.name": _PROVIDER_NAME,
+ "gen_ai.system": "responses",
+ "gen_ai.operation.name": "invoke_agent",
+ }
+
+
+def extract_request_id(headers: Mapping[str, str]) -> str | None:
+ """Extract and truncate the ``X-Request-Id`` header value.
+
+ Returns the value truncated to 256 characters, or ``None`` when the
+ header is absent.
+
+ :param headers: HTTP request headers mapping.
+ :type headers: Mapping[str, str]
+ :return: Truncated request ID string, or ``None``.
+ :rtype: str | None
+ """
+ raw = headers.get("x-request-id") or headers.get("X-Request-Id")
+ return raw[:_MAX_REQUEST_ID_LEN] if raw else None
+
+
+def _resolve_agent_fields(
+ agent_reference: MutableMapping[str, Any] | dict[str, Any] | None,
+) -> tuple[str | None, str | None, str | None]:
+ """Return ``(agent_name, agent_version, agent_id)`` from *agent_reference*."""
+ if agent_reference is None or not isinstance(agent_reference, (dict, MutableMapping)):
+ return None, None, None
+ name = agent_reference.get("name") or None
+ version = agent_reference.get("version") or None
+ agent_id = f"{name}:{version}" if name and version else None
+ return name, version, agent_id
+
+
+def build_create_otel_attrs(
+ ctx: "_ExecutionContext",
+ *,
+ request_id: str | None,
+ project_id: str = "",
+) -> dict[str, Any]:
+ """Build the OTel span attribute dict for ``POST /responses``.
+
+ :param ctx: Current execution context.
+ :type ctx: _ExecutionContext
+ :keyword request_id: Truncated ``X-Request-Id`` value, or ``None``.
+ :keyword type request_id: str | None
+ :keyword project_id: Foundry project ARM resource ID.
+ :keyword type project_id: str
+ :return: Attribute dict ready to be set on an OTel span.
+ :rtype: dict[str, Any]
+ """
+ agent_name, agent_version, agent_id = _resolve_agent_fields(ctx.agent_reference)
+ attrs: dict[str, Any] = {
+ "gen_ai.response.id": ctx.response_id,
+ "gen_ai.provider.name": _PROVIDER_NAME,
+ "service.name": _SERVICE_NAME,
+ "gen_ai.operation.name": "invoke_agent",
+ "gen_ai.request.model": ctx.model or "",
+ "gen_ai.agent.id": agent_id if agent_id is not None else "",
+ # Namespaced tags per spec §7.2
+ "azure.ai.agentserver.responses.response_id": ctx.response_id,
+ "azure.ai.agentserver.responses.conversation_id": ctx.conversation_id or "",
+ "azure.ai.agentserver.responses.streaming": ctx.stream,
+ }
+ if project_id:
+ attrs["microsoft.foundry.project.id"] = project_id
+ if ctx.conversation_id:
+ attrs["gen_ai.conversation.id"] = ctx.conversation_id
+ if agent_name:
+ attrs["gen_ai.agent.name"] = agent_name
+ if agent_version:
+ attrs["gen_ai.agent.version"] = agent_version
+ if request_id:
+ attrs["request.id"] = request_id
+ return attrs
+
+
+@dataclass
+class RecordedSpan:
+ """Recorded span event for tests and diagnostics."""
+
+ name: str
+ tags: dict[str, Any]
+ started_at: datetime
+ ended_at: datetime | None = None
+ error: Exception | None = None
+
+
+class InMemoryCreateSpanHook:
+ """Simple in-memory hook for asserting span lifecycle in tests."""
+
+ def __init__(self, spans: list[RecordedSpan] | None = None) -> None:
+ self.spans: list[RecordedSpan] = spans if spans is not None else []
+
+ def on_span_start(self, name: str, tags: dict[str, Any]) -> None:
+ """Record a span start event.
+
+ :param name: Span name.
+ :type name: str
+ :param tags: Span tags at start time.
+ :type tags: dict[str, Any]
+ :return: None
+ :rtype: None
+ """
+ self.spans.append(
+ RecordedSpan(
+ name=name,
+ tags=dict(tags),
+ started_at=datetime.now(timezone.utc),
+ )
+ )
+
+ def on_span_end(self, name: str, tags: dict[str, Any], error: Exception | None) -> None:
+ """Record a span end event.
+
+ :param name: Span name.
+ :type name: str
+ :param tags: Final span tags.
+ :type tags: dict[str, Any]
+ :param error: The exception if the span ended with an error, or ``None``.
+ :type error: Exception | None
+ :return: None
+ :rtype: None
+ """
+ if not self.spans:
+ self.on_span_start(name, tags)
+
+ span = self.spans[-1]
+ span.tags = dict(tags)
+ span.error = error
+ span.ended_at = datetime.now(timezone.utc)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_orchestrator.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_orchestrator.py
new file mode 100644
index 000000000000..19b3bcdac066
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_orchestrator.py
@@ -0,0 +1,1431 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Event-pipeline orchestration for the Responses server.
+
+This module is intentionally free of Starlette imports: it operates purely on
+``_ExecutionContext`` and produces plain Python data (dicts, async iterators of
+strings). The HTTP layer (Starlette ``Request`` / ``Response``) lives in the
+routing module which wraps these results.
+"""
+
+from __future__ import annotations
+
+import asyncio # pylint: disable=do-not-import-asyncio
+import logging
+from copy import deepcopy
+from typing import TYPE_CHECKING, Any, AsyncIterator, Callable
+
+logger = logging.getLogger("azure.ai.agentserver")
+
+if TYPE_CHECKING:
+ from .._response_context import ResponseContext
+ from ..models._generated import AgentReference, CreateResponse
+
+import anyio
+
+from .._options import ResponsesServerOptions
+from ..models import _generated as generated_models
+from ..models.runtime import (
+ ResponseExecution,
+ ResponseModeFlags,
+)
+from ..models.runtime import (
+ build_cancelled_response as _build_cancelled_response,
+)
+from ..models.runtime import (
+ build_failed_response as _build_failed_response,
+)
+from ..store._base import ResponseProviderProtocol, ResponseStreamProviderProtocol
+from ..streaming._helpers import (
+ EVENT_TYPE,
+ _apply_stream_event_defaults,
+ _build_events,
+ _coerce_handler_event,
+ _extract_response_snapshot_from_events,
+)
+from ..streaming._internals import construct_event_model
+from ..streaming._sse import encode_keep_alive_comment, encode_sse_any_event, new_stream_counter
+from ..streaming._state_machine import EventStreamValidator
+from ._event_subject import _ResponseEventSubject
+from ._execution_context import _ExecutionContext
+from ._runtime_state import _RuntimeState
+
+
+def _check_first_event_contract(normalized: generated_models.ResponseStreamEvent, response_id: str) -> str | None:
+ """Return an error message if the first handler event violates FR-006/FR-007, else None.
+
+ - FR-006: The first event MUST be ``response.created`` with matching ``id``.
+ - FR-007: The ``status`` in ``response.created`` MUST be non-terminal.
+
+ :param normalized: Normalised first event (``ResponseStreamEvent`` model instance).
+ :type normalized: ResponseStreamEvent
+ :param response_id: Library-assigned response identifier.
+ :type response_id: str
+ :return: Violation message string, or ``None`` if no violation.
+ :rtype: str | None
+ """
+ event_type = normalized.get("type")
+ response = normalized.get("response") or {}
+ if event_type != "response.created":
+ return f"first event must be response.created, got '{event_type}'"
+ emitted_id = response.get("id")
+ if emitted_id and emitted_id != response_id:
+ return f"response.created id '{emitted_id}' != assigned id '{response_id}'"
+ emitted_status = response.get("status")
+ if emitted_status in {"completed", "failed", "cancelled", "incomplete"}:
+ return f"response.created status must be non-terminal, got '{emitted_status}'"
+ return None
+
+
+_CANCEL_WINDDOWN_TIMEOUT: float = 10.0
+
+
+async def _iter_with_winddown(
+ aiter: Any,
+ cancel_signal: asyncio.Event,
+ timeout: float = _CANCEL_WINDDOWN_TIMEOUT,
+) -> AsyncIterator:
+ """Yield items from *aiter*, enforcing a winddown timeout after cancellation.
+
+ Once *cancel_signal* is set a countdown of *timeout* seconds begins.
+ If the iterator does not stop within the budget, iteration is terminated
+ so that the caller can finalise the response without hanging indefinitely.
+
+ :param aiter: The async iterator to wrap.
+ :type aiter: Any
+ :param cancel_signal: Event signalling that cancellation was requested.
+ :type cancel_signal: asyncio.Event
+ :param timeout: Maximum seconds to wait after cancellation before forcing stop.
+ :type timeout: float
+ :return: Async iterator of items from *aiter*.
+ :rtype: AsyncIterator
+ """
+ deadline: float | None = None
+ while True:
+ if cancel_signal.is_set() and deadline is None:
+ deadline = asyncio.get_event_loop().time() + timeout
+
+ try:
+ if deadline is not None:
+ remaining = deadline - asyncio.get_event_loop().time()
+ if remaining <= 0:
+ return
+ item = await asyncio.wait_for(aiter.__anext__(), timeout=remaining)
+ else:
+ item = await aiter.__anext__()
+ except StopAsyncIteration:
+ return
+ except asyncio.TimeoutError:
+ return
+
+ yield item
+
+
+_OUTPUT_ITEM_EVENT_TYPES: frozenset[str] = frozenset(
+ {
+ EVENT_TYPE.RESPONSE_OUTPUT_ITEM_ADDED.value,
+ EVENT_TYPE.RESPONSE_OUTPUT_ITEM_DONE.value,
+ }
+)
+
+# Response-level lifecycle events whose ``response`` field carries a full Response snapshot.
+# Used by FR-008a output manipulation detection.
+_RESPONSE_SNAPSHOT_TYPES: frozenset[str] = frozenset(
+ {
+ EVENT_TYPE.RESPONSE_IN_PROGRESS.value,
+ EVENT_TYPE.RESPONSE_COMPLETED.value,
+ EVENT_TYPE.RESPONSE_FAILED.value,
+ EVENT_TYPE.RESPONSE_INCOMPLETE.value,
+ EVENT_TYPE.RESPONSE_QUEUED.value,
+ }
+)
+
+
+def _validate_handler_event(coerced: generated_models.ResponseStreamEvent) -> str | None:
+ """Return an error message if a coerced handler event has invalid structure, else None.
+
+ Lightweight structural checks (B30):
+ - For ``response.output_item.*`` events the model/dict must contain
+ ``output_index`` and at least one of ``item_id`` or ``item``.
+
+ :param coerced: Coerced event (``ResponseStreamEvent`` model instance).
+ :type coerced: ResponseStreamEvent
+ :return: Violation message string, or ``None`` if valid.
+ :rtype: str | None
+ """
+ event_type = coerced.get("type", "")
+ if event_type in _OUTPUT_ITEM_EVENT_TYPES:
+ if coerced.get("output_index") is None:
+ return f"{event_type} missing required field 'output_index'"
+ if coerced.get("item_id") is None and coerced.get("item") is None:
+ return f"{event_type} must include 'item_id' or 'item'"
+
+ return None
+
+
+async def _run_background_non_stream(
+ *,
+ create_fn: Callable[..., AsyncIterator[generated_models.ResponseStreamEvent]],
+ parsed: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+ record: ResponseExecution,
+ response_id: str,
+ agent_reference: AgentReference | dict[str, Any],
+ model: str | None,
+ provider: ResponseProviderProtocol | None = None,
+ store: bool = True,
+ agent_session_id: str | None = None,
+ conversation_id: str | None = None,
+ history_limit: int = 100,
+) -> None:
+ """Execute a non-stream handler in the background and update the execution record.
+
+ Collects handler events, builds the response payload, and transitions the
+ record status to ``completed``, ``failed``, or ``cancelled``.
+
+ :keyword create_fn: The handler's async generator callable.
+ :keyword type create_fn: Callable[..., AsyncIterator[ResponseStreamEvent]]
+ :keyword parsed: Parsed ``CreateResponse`` model instance.
+ :keyword type parsed: CreateResponse
+ :keyword context: Runtime response context for this request.
+ :keyword type context: ResponseContext
+ :keyword cancellation_signal: Event signalling that cancellation was requested.
+ :keyword type cancellation_signal: asyncio.Event
+ :keyword record: The mutable execution record to update.
+ :keyword type record: ResponseExecution
+ :keyword response_id: The response ID for this execution.
+ :keyword type response_id: str
+ :keyword agent_reference: Normalized agent reference model or dictionary.
+ :keyword type agent_reference: AgentReference | dict[str, Any]
+ :keyword model: Model name, or ``None``.
+ :keyword type model: str | None
+ :keyword provider: Optional persistence provider; when set and ``store`` is ``True``,
+ ``update_response`` is called after terminal state is reached.
+ :keyword type provider: ResponseProviderProtocol | None
+ :keyword store: Whether the response should be persisted via the provider.
+ :keyword type store: bool
+ :keyword agent_session_id: Resolved session ID (B39).
+ :keyword type agent_session_id: str | None
+ :return: None
+ :rtype: None
+ """
+ record.transition_to("in_progress")
+ handler_events: list[generated_models.ResponseStreamEvent] = []
+ validator = EventStreamValidator()
+ output_item_count = 0
+ _provider_created = False # tracks whether create_response was called
+ # Track whether the handler set queued status so we can honour it
+ _handler_initial_status: str | None = None
+
+ try:
+ try:
+ first_event_processed = False
+ async for handler_event in _iter_with_winddown(
+ create_fn(parsed, context, cancellation_signal), cancellation_signal
+ ):
+ if cancellation_signal.is_set():
+ record.transition_to("cancelled")
+ return
+
+ coerced = _coerce_handler_event(handler_event)
+ b30_err = _validate_handler_event(coerced)
+ if b30_err:
+ raise ValueError(b30_err)
+ normalized = _apply_stream_event_defaults(
+ coerced,
+ response_id=response_id,
+ agent_reference=agent_reference,
+ model=model,
+ sequence_number=None,
+ agent_session_id=agent_session_id,
+ conversation_id=conversation_id,
+ )
+ handler_events.append(normalized)
+ validator.validate_next(normalized)
+ if not first_event_processed:
+ first_event_processed = True
+
+ # FR-008a: output manipulation detection on response.created
+ created_response = normalized.get("response") or {}
+ created_output = created_response.get("output")
+ if isinstance(created_output, list) and len(created_output) != 0:
+ raise ValueError(
+ f"Handler directly modified Response.Output "
+ f"(found {len(created_output)} items, expected 0). "
+ f"Use output builder events instead."
+ )
+
+ # Set initial response snapshot for POST response body without
+ # changing record.status (transition_to manages status lifecycle)
+ _initial_snapshot = _extract_response_snapshot_from_events(
+ handler_events,
+ response_id=response_id,
+ agent_reference=agent_reference,
+ model=model,
+ agent_session_id=agent_session_id,
+ conversation_id=conversation_id,
+ )
+ record.set_response_snapshot(generated_models.ResponseObject(_initial_snapshot))
+ # Honour the handler's initial status (e.g. "queued") so the
+ # POST response body reflects what the handler actually set.
+ _handler_initial_status = _initial_snapshot.get("status")
+ if _handler_initial_status == "queued":
+ record.status = "queued" # type: ignore[assignment]
+ # Persist at response.created time for bg+store (FR-003)
+ if store and provider is not None:
+ try:
+ _isolation = context.isolation if context else None
+ _response_obj = generated_models.ResponseObject(_initial_snapshot)
+ _history_ids = (
+ await provider.get_history_item_ids(
+ record.previous_response_id,
+ None,
+ history_limit,
+ isolation=_isolation,
+ )
+ if record.previous_response_id
+ else None
+ )
+ await provider.create_response(
+ _response_obj, record.input_items or None, _history_ids, isolation=_isolation
+ )
+ _provider_created = True
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Best-effort provider create failed at response.created (response_id=%s)",
+ response_id,
+ exc_info=True,
+ )
+ record.response_created_signal.set()
+ else:
+ # Track output_item.added events for FR-008a
+ if normalized.get("type") == EVENT_TYPE.RESPONSE_OUTPUT_ITEM_ADDED.value:
+ output_item_count += 1
+
+ # FR-008a: detect direct Output manipulation on response.* events
+ n_type = normalized.get("type", "")
+ if n_type in _RESPONSE_SNAPSHOT_TYPES:
+ n_response = normalized.get("response") or {}
+ n_output = n_response.get("output")
+ if isinstance(n_output, list) and len(n_output) > output_item_count:
+ raise ValueError(
+ f"Output item count mismatch "
+ f"({len(n_output)} vs {output_item_count} output_item.added events)"
+ )
+ except asyncio.CancelledError:
+ # S-024: Distinguish known cancellation (cancel_signal set) from
+ # unknown. Known cancellation → transition to "cancelled".
+ # Unknown CancelledError (e.g. event-loop teardown) is re-raised.
+ if cancellation_signal.is_set():
+ if record.status != "cancelled":
+ record.transition_to("cancelled")
+ if not first_event_processed:
+ record.response_failed_before_events = True
+ record.response_created_signal.set()
+ return
+ raise
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logger.error(
+ "Handler raised during background processing (response_id=%s)",
+ response_id,
+ exc_info=exc,
+ )
+ if record.status != "cancelled":
+ record.set_response_snapshot(
+ _build_failed_response(
+ response_id,
+ agent_reference,
+ model,
+ created_at=context.created_at,
+ )
+ )
+ record.transition_to("failed")
+ if not first_event_processed:
+ # Mark failure before any events so run_background can return HTTP 500
+ record.response_failed_before_events = True
+ record.response_created_signal.set() # unblock run_background on failure
+ return
+
+ if cancellation_signal.is_set():
+ record.transition_to("cancelled")
+ record.response_created_signal.set() # unblock run_background on cancellation
+ return
+
+ events = (
+ handler_events
+ if handler_events
+ else _build_events(
+ response_id,
+ include_progress=True,
+ agent_reference=agent_reference,
+ model=model,
+ )
+ )
+ response_payload = _extract_response_snapshot_from_events(
+ events,
+ response_id=response_id,
+ agent_reference=agent_reference,
+ model=model,
+ remove_sequence_number=True,
+ agent_session_id=agent_session_id,
+ conversation_id=conversation_id,
+ )
+
+ resolved_status = response_payload.get("status")
+ if record.status != "cancelled":
+ record.set_response_snapshot(generated_models.ResponseObject(response_payload))
+ target = resolved_status if isinstance(resolved_status, str) else "completed"
+ # If still queued, transition through in_progress first so the
+ # state machine stays valid (queued can only reach terminal
+ # states via in_progress).
+ if record.status == "queued" and target != "in_progress":
+ record.transition_to("in_progress")
+ record.transition_to(target)
+ finally:
+ # Always unblock run_background (idempotent if already set)
+ record.response_created_signal.set()
+ # Persist terminal state update via provider (bg non-stream: update after runner completes)
+ if store and provider is not None and record.status not in {"cancelled"} and record.response is not None:
+ try:
+ if _provider_created:
+ await provider.update_response(record.response)
+ else:
+ # Response was never created (handler yielded nothing or
+ # failed before response.created) — create instead of update.
+ _isolation = context.isolation if context else None
+ await provider.create_response(
+ record.response, record.input_items or None, None, isolation=_isolation
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Best-effort provider persist failed at finalization (response_id=%s)",
+ response_id,
+ exc_info=True,
+ )
+
+
+def _refresh_background_status(record: ResponseExecution) -> None:
+ """Refresh the status of a background execution record.
+
+ Checks the execution task state and cancellation signal to update the
+ record status. Called by GET/DELETE/cancel endpoints to reflect the
+ current runner state without triggering execution.
+
+ :param record: The execution record to refresh.
+ :type record: ResponseExecution
+ :return: None
+ :rtype: None
+ """
+ if not record.mode_flags.background or record.is_terminal:
+ return
+
+ if record.cancel_signal.is_set() and not record.is_terminal:
+ record.status = "cancelled"
+ return
+
+ # execution_task is started immediately in run_background (Task 3.1)
+ if record.execution_task is not None and record.execution_task.done():
+ if not record.is_terminal:
+ if record.execution_task.cancelled():
+ record.status = "cancelled"
+ else:
+ exc = record.execution_task.exception()
+ if exc is not None:
+ record.status = "failed"
+
+
+class _HandlerError(Exception):
+ """Raised by :meth:`_ResponseOrchestrator.run_sync` when the handler raises.
+
+ Callers should catch this to convert it into an appropriate HTTP error
+ response without leaking orchestrator internals.
+ """
+
+ def __init__(self, original: Exception) -> None:
+ self.original = original
+ super().__init__(str(original))
+
+
+class _PipelineState:
+ """Mutable in-flight state for a single create-response invocation.
+
+ Intentionally separate from :class:`_ExecutionContext` (which is a pure
+ immutable per-request input value object). Created locally inside
+ :meth:`_ResponseOrchestrator._live_stream` and
+ :meth:`_ResponseOrchestrator.run_sync`, then threaded through every
+ internal helper so that the helpers are side-effect-free with respect
+ to ``_ExecutionContext``.
+ """
+
+ __slots__ = ("handler_events", "bg_record", "captured_error", "validator")
+
+ def __init__(self) -> None:
+ self.handler_events: list[generated_models.ResponseStreamEvent] = []
+ self.bg_record: ResponseExecution | None = None
+ self.captured_error: Exception | None = None
+ self.validator: EventStreamValidator = EventStreamValidator()
+
+
+class _ResponseOrchestrator: # pylint: disable=too-many-instance-attributes
+ """Event-pipeline orchestrator for the Responses API.
+
+ Handles the business logic for streaming, synchronous, and background
+ create-response requests: driving the handler iterator, normalising events,
+ managing the background execution record, and finalising persistent state.
+
+ This class has no dependency on Starlette types.
+ """
+
+ _TERMINAL_SSE_TYPES: frozenset[str] = frozenset(
+ {
+ EVENT_TYPE.RESPONSE_COMPLETED.value,
+ EVENT_TYPE.RESPONSE_FAILED.value,
+ EVENT_TYPE.RESPONSE_INCOMPLETE.value,
+ }
+ )
+
+ def __init__(
+ self,
+ *,
+ create_fn: Callable[..., AsyncIterator[generated_models.ResponseStreamEvent]],
+ runtime_state: _RuntimeState,
+ runtime_options: ResponsesServerOptions,
+ provider: ResponseProviderProtocol,
+ stream_provider: ResponseStreamProviderProtocol | None = None,
+ ) -> None:
+ """Initialise the orchestrator.
+
+ :param create_fn: The bound ``create_fn`` method from the registered handler.
+ :type create_fn: Callable[..., AsyncIterator[ResponseStreamEvent]]
+ :param runtime_state: In-memory execution record store.
+ :type runtime_state: _RuntimeState
+ :param runtime_options: Server runtime options (keep-alive, etc.).
+ :type runtime_options: ResponsesServerOptions
+ :param provider: Persistence provider for response envelopes and input items.
+ :type provider: ResponseProviderProtocol
+ :param stream_provider: Optional provider for SSE stream event persistence and replay.
+ :type stream_provider: ResponseStreamProviderProtocol | None
+ """
+ self._create_fn = create_fn
+ self._runtime_state = runtime_state
+ self._runtime_options = runtime_options
+ self._provider = provider
+ self._stream_provider = stream_provider
+
+ # ------------------------------------------------------------------
+ # Internal helpers (stream path)
+ # ------------------------------------------------------------------
+
+ async def _normalize_and_append(
+ self,
+ ctx: _ExecutionContext,
+ state: _PipelineState,
+ handler_event: generated_models.ResponseStreamEvent | dict[str, Any],
+ ) -> generated_models.ResponseStreamEvent:
+ """Coerce, validate, normalise, and append a handler event to the pipeline state.
+
+ Also propagates the event into the background record and its subject when active.
+ Raises ``ValueError`` on structural validation failure (B30) so that
+ :meth:`_process_handler_events` can emit ``response.failed`` (streaming)
+ or propagate as :class:`_HandlerError` (sync → HTTP 500).
+
+ :param ctx: Current execution context (immutable inputs).
+ :type ctx: _ExecutionContext
+ :param state: Mutable pipeline state for this invocation.
+ :type state: _PipelineState
+ :param handler_event: Raw event emitted by the handler.
+ :type handler_event: ResponseStreamEvent | dict[str, Any]
+ :return: The normalised event (``ResponseStreamEvent`` model instance).
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If the coerced event fails structural validation (B30).
+ """
+ coerced = _coerce_handler_event(handler_event)
+ violation = _validate_handler_event(coerced)
+ if violation:
+ raise ValueError(violation)
+ normalized = _apply_stream_event_defaults(
+ coerced,
+ response_id=ctx.response_id,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ sequence_number=len(state.handler_events),
+ agent_session_id=ctx.agent_session_id,
+ conversation_id=ctx.conversation_id,
+ )
+ state.handler_events.append(normalized)
+ state.validator.validate_next(normalized)
+ if state.bg_record is not None:
+ state.bg_record.apply_event(normalized, state.handler_events)
+ if state.bg_record.subject is not None:
+ await state.bg_record.subject.publish(normalized)
+ return normalized
+
+ @staticmethod
+ def _has_terminal_event(handler_events: list[generated_models.ResponseStreamEvent]) -> bool:
+ """Return ``True`` if any terminal event has been emitted.
+
+ :param handler_events: List of normalised handler events.
+ :type handler_events: list[ResponseStreamEvent]
+ :return: Whether a terminal event is present.
+ :rtype: bool
+ """
+ return any(e["type"] in _ResponseOrchestrator._TERMINAL_SSE_TYPES for e in handler_events)
+
+ async def _cancel_terminal_sse_dict(
+ self, ctx: _ExecutionContext, state: _PipelineState
+ ) -> generated_models.ResponseStreamEvent:
+ """Build, normalise, append, and return a cancel-terminal event.
+
+ Returns the normalised event (model instance) so that it can be consumed
+ by the shared :meth:`_process_handler_events` pipeline.
+
+ :param ctx: Current execution context (immutable inputs).
+ :type ctx: _ExecutionContext
+ :param state: Mutable pipeline state for this invocation.
+ :type state: _PipelineState
+ :return: Normalised cancel-terminal event.
+ :rtype: ResponseStreamEvent
+ """
+ cancel_event: dict[str, Any] = {
+ "type": EVENT_TYPE.RESPONSE_FAILED.value,
+ "response": _build_cancelled_response(ctx.response_id, ctx.agent_reference, ctx.model).as_dict(),
+ }
+ return await self._normalize_and_append(ctx, state, cancel_event)
+
+ async def _make_failed_event(
+ self, ctx: _ExecutionContext, state: _PipelineState
+ ) -> generated_models.ResponseStreamEvent:
+ """Build, normalise, append, and return a ``response.failed`` event.
+
+ Used for S-035 (handler exception after ``response.created``) and
+ S-015 (handler completed without emitting a terminal event).
+
+ :param ctx: Current execution context (immutable inputs).
+ :type ctx: _ExecutionContext
+ :param state: Mutable pipeline state for this invocation.
+ :type state: _PipelineState
+ :return: Normalised ``response.failed`` event.
+ :rtype: ResponseStreamEvent
+ """
+ failed_event: dict[str, Any] = {
+ "type": EVENT_TYPE.RESPONSE_FAILED.value,
+ "response": {
+ "id": ctx.response_id,
+ "object": "response",
+ "status": "failed",
+ "output": [],
+ "error": {"code": "server_error", "message": "An internal server error occurred."},
+ },
+ }
+ return await self._normalize_and_append(ctx, state, failed_event)
+
+ async def _register_bg_execution(
+ self, ctx: _ExecutionContext, state: _PipelineState, first_normalized: generated_models.ResponseStreamEvent
+ ) -> None:
+ """Create, seed, and register the background+stream execution record.
+
+ Called from :meth:`_process_handler_events` after the first event is
+ received. The record is seeded with ``first_normalized`` so that
+ subscribers joining mid-stream receive the full history.
+
+ :param ctx: Current execution context (immutable inputs).
+ :type ctx: _ExecutionContext
+ :param state: Mutable pipeline state for this invocation.
+ :type state: _PipelineState
+ :param first_normalized: The first normalised handler event.
+ :type first_normalized: ResponseStreamEvent
+ """
+ initial_payload = _extract_response_snapshot_from_events(
+ state.handler_events,
+ response_id=ctx.response_id,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ agent_session_id=ctx.agent_session_id,
+ conversation_id=ctx.conversation_id,
+ )
+ initial_status = initial_payload.get("status")
+ if not isinstance(initial_status, str):
+ initial_status = "in_progress"
+ execution = ResponseExecution(
+ response_id=ctx.response_id,
+ mode_flags=ResponseModeFlags(stream=True, store=True, background=True),
+ status=initial_status,
+ input_items=deepcopy(ctx.input_items),
+ previous_response_id=ctx.previous_response_id,
+ cancel_signal=ctx.cancellation_signal,
+ )
+ execution.set_response_snapshot(generated_models.ResponseObject(initial_payload))
+ execution.subject = _ResponseEventSubject()
+ state.bg_record = execution
+ await state.bg_record.subject.publish(first_normalized)
+ await self._runtime_state.add(execution)
+ if ctx.store:
+ _isolation = ctx.context.isolation if ctx.context else None
+ _initial_response_obj = generated_models.ResponseObject(initial_payload)
+ _history_ids = (
+ await self._provider.get_history_item_ids(
+ ctx.previous_response_id,
+ None,
+ self._runtime_options.default_fetch_history_count,
+ isolation=_isolation,
+ )
+ if ctx.previous_response_id
+ else None
+ )
+ await self._provider.create_response(
+ _initial_response_obj, ctx.input_items or None, _history_ids, isolation=_isolation
+ )
+
+ async def _process_handler_events(
+ self,
+ ctx: _ExecutionContext,
+ state: _PipelineState,
+ handler_iterator: AsyncIterator[generated_models.ResponseStreamEvent],
+ ) -> AsyncIterator[generated_models.ResponseStreamEvent]:
+ """Shared event pipeline: coerce → normalise → apply_event → subject publish.
+
+ This async generator is the single authoritative event pipeline consumed by
+ both :meth:`_live_stream` (streaming) and :meth:`run_sync` (synchronous).
+ It handles:
+
+ - Empty handler (``StopAsyncIteration`` before the first event): synthesises
+ a full lifecycle event sequence and yields it.
+ - Pre-creation handler exception (B8): yields a standalone ``error`` event
+ and sets ``state.captured_error``.
+ - First-event normalisation and bg+store record registration
+ (:meth:`_register_bg_execution`).
+ - Remaining events via :meth:`_normalize_and_append`.
+ - Post-creation handler exception (S-035): yields a ``response.failed`` event
+ and sets ``state.captured_error``.
+ - Missing terminal after successful handler completion (S-015): yields a
+ ``response.failed`` event without setting ``state.captured_error`` so that
+ synchronous callers can return HTTP 200 with a ``"failed"`` body.
+ - Cancellation winddown (B11): yields a cancel-terminal event when the
+ cancellation signal is set and no terminal event was emitted.
+
+ :param ctx: Current execution context (immutable inputs).
+ :type ctx: _ExecutionContext
+ :param state: Mutable pipeline state for this invocation.
+ :type state: _PipelineState
+ :param handler_iterator: Async generator returned by the handler's
+ ``create_fn`` factory.
+ :type handler_iterator: AsyncIterator[ResponseStreamEvent]
+ :return: Async iterator of normalised events (``ResponseStreamEvent`` model instances).
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ # --- First event ---
+ try:
+ first_raw = await handler_iterator.__anext__()
+ except StopAsyncIteration:
+ # Handler yielded nothing: synthesise fallback lifecycle events.
+ fallback_events = _build_events(
+ ctx.response_id,
+ include_progress=True,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ )
+ for event in fallback_events:
+ state.handler_events.append(event)
+ yield event
+ return
+ except asyncio.CancelledError:
+ # S-024: Known cancellation before first event.
+ if ctx.cancellation_signal.is_set():
+ state.captured_error = asyncio.CancelledError()
+ yield construct_event_model(
+ {
+ "type": "error",
+ "message": "An internal server error occurred.",
+ "param": None,
+ "code": None,
+ "sequence_number": 0,
+ }
+ )
+ return
+ # Unknown CancelledError (e.g. event-loop teardown) — re-raise.
+ raise
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ # B8: Pre-creation error → emit a standalone `error` event only.
+ # No response.created precedes it; this is the contract-mandated shape.
+ logger.error(
+ "Handler raised before response.created (response_id=%s)",
+ ctx.response_id,
+ exc_info=exc,
+ )
+ state.captured_error = exc
+ yield construct_event_model(
+ {
+ "type": "error",
+ "message": "An internal server error occurred.",
+ "param": None,
+ "code": None,
+ "sequence_number": 0,
+ }
+ )
+ return
+
+ # Normalise the first event manually (before _normalize_and_append so we
+ # can set up the bg record with the correct sequence number).
+ first_coerced = _coerce_handler_event(first_raw)
+
+ # B30: structural validation of the first event.
+ b30_violation = _validate_handler_event(first_coerced)
+ if b30_violation:
+ logger.error(
+ "Handler event structure violation (response_id=%s): %s",
+ ctx.response_id,
+ b30_violation,
+ )
+ state.captured_error = ValueError(b30_violation)
+ yield construct_event_model(
+ {
+ "type": "error",
+ "message": "An internal server error occurred.",
+ "param": None,
+ "code": None,
+ "sequence_number": 0,
+ }
+ )
+ return
+
+ first_normalized = _apply_stream_event_defaults(
+ first_coerced,
+ response_id=ctx.response_id,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ sequence_number=len(state.handler_events),
+ agent_session_id=ctx.agent_session_id,
+ conversation_id=ctx.conversation_id,
+ )
+
+ # FR-006/FR-007: first-event contract validation.
+ # Violations are treated the same as B8 pre-creation errors:
+ # - streaming: yield a standalone 'error' event and return (no record created)
+ # - sync: state.captured_error is set → run_sync raises _HandlerError → HTTP 500
+ violation = _check_first_event_contract(first_normalized, ctx.response_id)
+ if violation:
+ logger.error(
+ "First-event contract violation (response_id=%s): %s",
+ ctx.response_id,
+ violation,
+ )
+ state.captured_error = RuntimeError(violation)
+ yield construct_event_model(
+ {
+ "type": "error",
+ "message": "An internal server error occurred.",
+ "param": None,
+ "code": None,
+ "sequence_number": 0,
+ }
+ )
+ return
+
+ state.handler_events.append(first_normalized)
+ state.validator.validate_next(first_normalized)
+
+ # FR-008a: output manipulation detection on response.created.
+ # If the handler directly added items to response.output instead of
+ # using builder events, the output list will be non-empty.
+ created_response = first_normalized.get("response") or {}
+ created_output = created_response.get("output")
+ if isinstance(created_output, list) and len(created_output) != 0:
+ _fr008a_msg = (
+ f"Handler directly modified Response.Output "
+ f"(found {len(created_output)} items, expected 0). "
+ f"Use output builder events instead."
+ )
+ logger.error(
+ "Output manipulation detected (response_id=%s): %s",
+ ctx.response_id,
+ _fr008a_msg,
+ )
+ state.captured_error = ValueError(_fr008a_msg)
+ yield await self._make_failed_event(ctx, state)
+ return
+
+ # bg+store: create and register the execution record after the first event.
+ if ctx.background and ctx.store:
+ await self._register_bg_execution(ctx, state, first_normalized)
+
+ yield first_normalized
+
+ # --- Remaining events ---
+ output_item_count = 0
+ try:
+ async for raw in _iter_with_winddown(handler_iterator, ctx.cancellation_signal):
+ # FR-008a: Pre-check for output manipulation BEFORE validation.
+ # Must inspect the raw event first so that an offending terminal
+ # event (e.g. response.completed with manipulated output) is NOT
+ # appended to the state machine before we emit response.failed.
+ _pre_coerced = _coerce_handler_event(raw)
+ _pre_type = _pre_coerced.get("type", "")
+ if _pre_type == EVENT_TYPE.RESPONSE_OUTPUT_ITEM_ADDED.value:
+ output_item_count += 1
+ if _pre_type in _RESPONSE_SNAPSHOT_TYPES:
+ _pre_response = _pre_coerced.get("response") or {}
+ _pre_output = _pre_response.get("output")
+ if isinstance(_pre_output, list) and len(_pre_output) > output_item_count:
+ _fr008a_msg = (
+ f"Output item count mismatch "
+ f"({len(_pre_output)} vs {output_item_count} output_item.added events)"
+ )
+ logger.error(
+ "Output manipulation detected (response_id=%s): %s",
+ ctx.response_id,
+ _fr008a_msg,
+ )
+ state.captured_error = ValueError(_fr008a_msg)
+ yield await self._make_failed_event(ctx, state)
+ return
+
+ normalized = await self._normalize_and_append(ctx, state, raw)
+ yield normalized
+ except asyncio.CancelledError:
+ # S-024: Known cancellation — emit cancel terminal.
+ if ctx.cancellation_signal.is_set():
+ if not self._has_terminal_event(state.handler_events):
+ yield await self._cancel_terminal_sse_dict(ctx, state)
+ return
+ # Unknown CancelledError (e.g. event-loop teardown) — re-raise.
+ raise
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logger.error(
+ "Handler raised after response.created (response_id=%s)",
+ ctx.response_id,
+ exc_info=exc,
+ )
+ state.captured_error = exc
+ # S-035: emit response.failed when handler raises after response.created.
+ if not self._has_terminal_event(state.handler_events):
+ yield await self._make_failed_event(ctx, state)
+ return
+
+ # B11: cancellation winddown checked BEFORE S-015 so that a handler
+ # stopped early by the cancellation signal receives a proper cancel
+ # terminal event (response.failed with status == "cancelled") rather
+ # than a generic S-015 failure terminal.
+ if ctx.cancellation_signal.is_set() and not self._has_terminal_event(state.handler_events):
+ yield await self._cancel_terminal_sse_dict(ctx, state)
+ return
+
+ # S-015: handler completed normally but never emitted a terminal event.
+ # NOTE: state.captured_error intentionally left None so that synchronous
+ # callers return HTTP 200 with a "failed" body rather than HTTP 500.
+ if not self._has_terminal_event(state.handler_events):
+ yield await self._make_failed_event(ctx, state)
+
+ async def _finalize_stream(self, ctx: _ExecutionContext, state: _PipelineState) -> None:
+ """Persist state and complete the subject for a streaming response.
+
+ Unified finalizer for both background and non-background streaming
+ paths, called from the ``finally`` block of :meth:`_live_stream`.
+
+ When a background execution record already exists (``state.bg_record``,
+ created at ``response.created`` time by :meth:`_register_bg_execution`),
+ the record is updated in place and persisted via ``update_response``.
+ Otherwise — for non-background streams, or background streams where no
+ record was created (empty handler, pre-creation errors, first-event
+ contract violations) — a new record is created and persisted via
+ ``create_response``.
+
+ :param ctx: Current execution context (immutable inputs).
+ :type ctx: _ExecutionContext
+ :param state: Mutable pipeline state for this invocation.
+ :type state: _PipelineState
+ """
+ # --- Path A: BG with pre-existing record (normal bg+stream completion) ---
+ if ctx.background and ctx.store and state.bg_record is not None:
+ record = state.bg_record
+
+ # B11: When status is already "cancelled" (set by the cancel endpoint),
+ # skip snapshot/status update — cancellation always wins. But still
+ # persist the cancelled state and complete the subject below.
+ if record.status != "cancelled":
+ events = (
+ state.handler_events
+ if state.handler_events
+ else _build_events(
+ ctx.response_id,
+ include_progress=True,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ )
+ )
+ if state.captured_error is not None:
+ record.set_response_snapshot(
+ _build_failed_response(
+ ctx.response_id,
+ ctx.agent_reference,
+ ctx.model,
+ created_at=ctx.context.created_at,
+ )
+ )
+ record.transition_to("failed")
+ else:
+ response_payload = _extract_response_snapshot_from_events(
+ events,
+ response_id=ctx.response_id,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ agent_session_id=ctx.agent_session_id,
+ conversation_id=ctx.conversation_id,
+ )
+ resolved_status = response_payload.get("status")
+ status = resolved_status if isinstance(resolved_status, str) else "in_progress"
+ record.set_response_snapshot(generated_models.ResponseObject(response_payload))
+ record.transition_to(status) # type: ignore[arg-type]
+
+ # Persist terminal state update via provider (bg+stream: initial create already done).
+ # Always persist — including cancelled state — so the durable store
+ # reflects the final status.
+ if record.mode_flags.store and record.response is not None:
+ try:
+ _isolation = ctx.context.isolation if ctx.context else None
+ await self._provider.update_response(record.response, isolation=_isolation)
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Best-effort provider update failed at stream finalization (response_id=%s)",
+ ctx.response_id,
+ exc_info=True,
+ )
+ # Persist SSE events for replay after process restart (not needed for cancelled)
+ if record.status != "cancelled" and self._stream_provider is not None and state.handler_events:
+ try:
+ await self._stream_provider.save_stream_events(
+ ctx.response_id, state.handler_events, isolation=_isolation
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Best-effort stream event persistence failed (response_id=%s)",
+ ctx.response_id,
+ exc_info=True,
+ )
+
+ ctx.span.end(state.captured_error)
+ # Complete the subject — signals all live SSE replay subscribers that
+ # the stream has ended.
+ if record.subject is not None:
+ try:
+ await record.subject.complete()
+ except Exception: # pylint: disable=broad-exception-caught
+ pass # best effort
+ return
+
+ # --- Path B: No pre-existing record ---
+ # Covers non-background streams and background streams where no record
+ # was created (empty handler fallback, pre-creation errors, first-event
+ # contract violations).
+ events = (
+ state.handler_events
+ if state.handler_events
+ else _build_events(
+ ctx.response_id,
+ include_progress=True,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ )
+ )
+ response_payload = _extract_response_snapshot_from_events(
+ events,
+ response_id=ctx.response_id,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ agent_session_id=ctx.agent_session_id,
+ conversation_id=ctx.conversation_id,
+ )
+ resolved_status = response_payload.get("status")
+ status = resolved_status if isinstance(resolved_status, str) else "completed"
+
+ # Always register in runtime state so cancel/GET return correct status codes.
+ replay_subject: _ResponseEventSubject | None = None
+ if ctx.store:
+ replay_subject = _ResponseEventSubject()
+ for _evt in events:
+ await replay_subject.publish(_evt)
+ await replay_subject.complete()
+
+ execution = ResponseExecution(
+ response_id=ctx.response_id,
+ mode_flags=ResponseModeFlags(stream=True, store=ctx.store, background=ctx.background),
+ status=status,
+ subject=replay_subject,
+ input_items=deepcopy(ctx.input_items),
+ previous_response_id=ctx.previous_response_id,
+ cancel_signal=ctx.cancellation_signal if ctx.background else None,
+ )
+ execution.set_response_snapshot(generated_models.ResponseObject(response_payload))
+ await self._runtime_state.add(execution)
+
+ if ctx.store:
+ try:
+ _isolation = ctx.context.isolation if ctx.context else None
+ _history_ids = (
+ await self._provider.get_history_item_ids(
+ ctx.previous_response_id,
+ None,
+ self._runtime_options.default_fetch_history_count,
+ isolation=_isolation,
+ )
+ if ctx.previous_response_id
+ else None
+ )
+ await self._provider.create_response(
+ generated_models.ResponseObject(response_payload),
+ ctx.input_items or None,
+ _history_ids,
+ isolation=_isolation,
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Best-effort provider create failed at stream finalization (response_id=%s)",
+ ctx.response_id,
+ exc_info=True,
+ )
+
+ ctx.span.end(state.captured_error)
+
+ # ------------------------------------------------------------------
+ # Public execution methods
+ # ------------------------------------------------------------------
+
+ def run_stream(self, ctx: _ExecutionContext) -> AsyncIterator[str]:
+ """Return an async iterator of SSE-encoded strings for a streaming request.
+
+ The iterator handles:
+
+ - Pre-creation errors (B8 contract: standalone ``error`` SSE event).
+ - Empty handler (fallback synthesised events).
+ - Mid-stream handler errors (``response.failed`` SSE event, S-035).
+ - Cancellation terminal events.
+ - Optional SSE keep-alive comments.
+
+ :param ctx: Current execution context.
+ :type ctx: _ExecutionContext
+ :return: Async iterator of SSE strings.
+ :rtype: AsyncIterator[str]
+ """
+ return self._live_stream(ctx)
+
+ async def _live_stream(self, ctx: _ExecutionContext) -> AsyncIterator[str]:
+ """Drive the SSE streaming pipeline using the shared event pipeline.
+
+ Delegates all event processing (first-event handling, normalisation,
+ bg record registration, S-035 / S-015 / B11 terminal events) to
+ :meth:`_process_handler_events`. This method only encodes each event
+ dict to SSE and handles keep-alive comment injection.
+
+ :param ctx: Current execution context.
+ :type ctx: _ExecutionContext
+ :returns: Async iterator of SSE-encoded strings.
+ :rtype: AsyncIterator[str]
+ """
+ new_stream_counter()
+ state = _PipelineState()
+ handler_iterator = self._create_fn(ctx.parsed, ctx.context, ctx.cancellation_signal)
+
+ # Helper: route to the right finalize method based on the request semantics
+ # (bg+store → bg_stream path; everything else → non_bg_stream path).
+ # NOTE: state.bg_record may be None for bg+stream when the handler yields no
+ # events (fallback path in _process_handler_events); _finalize_bg_stream
+ # handles that case by creating the record itself.
+ async def _finalize() -> None:
+ await self._finalize_stream(ctx, state)
+
+ # --- Fast path: no keep-alive ---
+ if not self._runtime_options.sse_keep_alive_enabled:
+ if not (ctx.background and ctx.store):
+ # Simple fast path for non-background streaming.
+ try:
+ async for event in self._process_handler_events(ctx, state, handler_iterator):
+ yield encode_sse_any_event(event)
+ finally:
+ await _finalize()
+ return
+
+ # Background+stream without keep-alive: run the handler as an independent
+ # asyncio.Task so that finalization (including subject.complete()) is
+ # guaranteed to run even when the original SSE connection is dropped before
+ # all events are delivered. Without this, _live_stream can be abandoned
+ # mid-iteration by Starlette (the async-generator finalizer may not fire
+ # promptly), leaving GET-replay subscribers blocked on await q.get() forever.
+ _SENTINEL_BG = object()
+ bg_queue: asyncio.Queue[object] = asyncio.Queue()
+
+ async def _bg_producer_inner() -> None:
+ try:
+ async for event in self._process_handler_events(ctx, state, handler_iterator):
+ await bg_queue.put(encode_sse_any_event(event))
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logger.error(
+ "Background stream producer failed (response_id=%s)",
+ ctx.response_id,
+ exc_info=exc,
+ )
+ state.captured_error = exc
+ finally:
+ # Always finalize (includes subject.complete()) — this runs even if
+ # the original POST SSE connection was dropped and _live_stream is
+ # never properly closed by Starlette.
+ await _finalize()
+ await bg_queue.put(_SENTINEL_BG)
+
+ async def _bg_producer() -> None:
+ try:
+ # FR-013: Shield the inner producer via asyncio.shield so
+ # that Starlette's anyio cancel-scope cancellation (triggered
+ # by client disconnect) does NOT propagate into the handler.
+ # asyncio.shield() creates a new inner Task whose cancellation
+ # is independent of the outer task.
+ await asyncio.shield(_bg_producer_inner())
+ except asyncio.CancelledError:
+ pass # outer task cancelled by scope; inner task continues
+
+ bg_task = asyncio.create_task(_bg_producer())
+ try:
+ while True:
+ item = await bg_queue.get()
+ if item is _SENTINEL_BG:
+ break
+ yield item # type: ignore[misc]
+ except Exception: # pylint: disable=broad-exception-caught
+ pass # SSE connection dropped; bg_task continues independently
+ finally:
+ # Wait for the handler task so _finalize() has run before we exit.
+ # Do NOT cancel it — background+stream must reach a terminal state
+ # regardless of client connectivity.
+ if not bg_task.done():
+ try:
+ await bg_task
+ except Exception: # pylint: disable=broad-exception-caught
+ pass
+ return
+
+ # --- Keep-alive path: merge handler events with periodic keep-alive comments ---
+ # via a shared asyncio.Queue so comments are sent even while the handler is idle.
+ _SENTINEL = object()
+ merge_queue: asyncio.Queue[str | object] = asyncio.Queue()
+
+ async def _handler_producer() -> None:
+ try:
+ async for event in self._process_handler_events(ctx, state, handler_iterator):
+ await merge_queue.put(encode_sse_any_event(event))
+ finally:
+ await merge_queue.put(_SENTINEL)
+
+ async def _keep_alive_producer(interval: int) -> None:
+ try:
+ while True:
+ await asyncio.sleep(interval)
+ await merge_queue.put(encode_keep_alive_comment())
+ except asyncio.CancelledError:
+ return
+
+ handler_task = asyncio.create_task(_handler_producer())
+ keep_alive_task = asyncio.create_task(
+ _keep_alive_producer(self._runtime_options.sse_keep_alive_interval_seconds) # type: ignore[arg-type]
+ )
+
+ try:
+ while True:
+ item = await merge_queue.get()
+ if item is _SENTINEL:
+ break
+ yield item # type: ignore[misc]
+ except Exception as exc: # pylint: disable=broad-exception-caught
+ logger.error(
+ "Stream consumer failed (response_id=%s)",
+ ctx.response_id,
+ exc_info=exc,
+ )
+ state.captured_error = exc
+ finally:
+ keep_alive_task.cancel()
+ try:
+ await keep_alive_task
+ except asyncio.CancelledError:
+ pass
+ # Ensure the handler task has finished before finalising
+ if not handler_task.done():
+ handler_task.cancel()
+ try:
+ await handler_task
+ except asyncio.CancelledError:
+ pass
+ await _finalize()
+
+ async def run_sync(self, ctx: _ExecutionContext) -> dict[str, Any]:
+ """Execute a synchronous (non-stream, non-background) create-response request.
+
+ Delegates event processing to :meth:`_process_handler_events`, which
+ handles all error paths. This method collects the accumulated events,
+ builds the response snapshot, optionally persists the record, closes
+ the span, and returns the snapshot dict.
+
+ Raises :class:`_HandlerError` if the handler raises (B8 or S-035) so
+ the caller can map it to an HTTP 500 response. S-015 (handler
+ completed without emitting a terminal event) does *not* raise; instead
+ the snapshot status is ``"failed"`` and HTTP 200 is returned.
+
+ :param ctx: Current execution context.
+ :type ctx: _ExecutionContext
+ :return: Response snapshot dictionary.
+ :rtype: dict[str, Any]
+ :raises _HandlerError: If the handler raises during iteration.
+ """
+ state = _PipelineState()
+ handler_iterator = self._create_fn(ctx.parsed, ctx.context, ctx.cancellation_signal)
+ # _process_handler_events handles all error paths (B8, S-035, S-015, B11).
+ # run_sync only needs to exhaust the generator for state.handler_events side-effects.
+ async for _ in self._process_handler_events(ctx, state, handler_iterator):
+ pass
+
+ if state.captured_error is not None:
+ # Only raise _HandlerError for pre-creation errors (B8) where no
+ # terminal lifecycle event has been emitted. Post-creation errors
+ # (S-035, FR-008a) emit response.failed and should complete as
+ # HTTP 200 with failed status — not an HTTP 500.
+ if not self._has_terminal_event(state.handler_events):
+ ctx.span.end(state.captured_error)
+ raise _HandlerError(state.captured_error) from state.captured_error
+
+ events = (
+ state.handler_events
+ if state.handler_events
+ else _build_events(
+ ctx.response_id,
+ include_progress=True,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ )
+ )
+ response_payload = _extract_response_snapshot_from_events(
+ events,
+ response_id=ctx.response_id,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ remove_sequence_number=True,
+ agent_session_id=ctx.agent_session_id,
+ conversation_id=ctx.conversation_id,
+ )
+ resolved_status = response_payload.get("status")
+ status = resolved_status if isinstance(resolved_status, str) else "completed"
+
+ record = ResponseExecution(
+ response_id=ctx.response_id,
+ mode_flags=ResponseModeFlags(stream=False, store=ctx.store, background=False),
+ status=status,
+ input_items=deepcopy(ctx.input_items),
+ previous_response_id=ctx.previous_response_id,
+ response_context=ctx.context,
+ )
+ record.set_response_snapshot(generated_models.ResponseObject(response_payload))
+
+ # Always register in runtime state so that cancel/GET can find the record
+ # and return the correct status code (e.g., 400 for non-bg cancel).
+ # Always register so cancel/GET can find this record.
+ await self._runtime_state.add(record)
+
+ if ctx.store:
+ # Persist via provider (non-bg sync: single create at terminal state)
+ try:
+ _isolation = ctx.context.isolation if ctx.context else None
+ _response_obj = generated_models.ResponseObject(response_payload)
+ _history_ids = (
+ await self._provider.get_history_item_ids(
+ ctx.previous_response_id,
+ None,
+ self._runtime_options.default_fetch_history_count,
+ isolation=_isolation,
+ )
+ if ctx.previous_response_id
+ else None
+ )
+ await self._provider.create_response(
+ _response_obj,
+ ctx.input_items or None,
+ _history_ids,
+ isolation=_isolation,
+ )
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Best-effort provider create failed in sync path (response_id=%s)",
+ ctx.response_id,
+ exc_info=True,
+ )
+
+ ctx.span.end(None)
+ return _RuntimeState.to_snapshot(record)
+
+ async def run_background(self, ctx: _ExecutionContext) -> dict[str, Any]:
+ """Handle a background (non-stream) create-response request.
+
+ Launches the handler as an asyncio task, waits for the handler to
+ emit ``response.created``, then returns the in_progress snapshot.
+ The POST blocks until the handler's first event is processed
+ (the ``ResponseCreatedSignal`` pattern).
+
+ :param ctx: Current execution context.
+ :type ctx: _ExecutionContext
+ :return: Response snapshot dictionary (status: in_progress).
+ :rtype: dict[str, Any]
+ :raises _HandlerError: If the handler fails before emitting ``response.created``.
+ """
+ record = ResponseExecution(
+ response_id=ctx.response_id,
+ mode_flags=ResponseModeFlags(stream=False, store=ctx.store, background=True),
+ status="in_progress",
+ input_items=deepcopy(ctx.input_items),
+ previous_response_id=ctx.previous_response_id,
+ response_context=ctx.context,
+ cancel_signal=ctx.cancellation_signal,
+ initial_model=ctx.model,
+ initial_agent_reference=ctx.agent_reference,
+ )
+
+ # Register so GET can observe in-flight state
+ await self._runtime_state.add(record)
+
+ # Launch handler immediately (S-003: handler runs asynchronously)
+ # Use anyio.CancelScope(shield=True) + suppress CancelledError so the
+ # background task is NOT cancelled when the HTTP request scope exits
+ # (anyio structured concurrency). The shielded scope ensures the handler
+ # runs to completion; catching CancelledError prevents the Task from being
+ # marked as cancelled, so _refresh_background_status reads the real status.
+ async def _shielded_runner() -> None:
+ try:
+ with anyio.CancelScope(shield=True):
+ await _run_background_non_stream(
+ create_fn=self._create_fn,
+ parsed=ctx.parsed,
+ context=ctx.context,
+ cancellation_signal=ctx.cancellation_signal,
+ record=record,
+ response_id=ctx.response_id,
+ agent_reference=ctx.agent_reference,
+ model=ctx.model,
+ provider=self._provider,
+ store=ctx.store,
+ agent_session_id=ctx.agent_session_id,
+ conversation_id=ctx.conversation_id,
+ history_limit=self._runtime_options.default_fetch_history_count,
+ )
+ except asyncio.CancelledError:
+ pass # event-loop teardown in TestClient; background work already done
+
+ record.execution_task = asyncio.create_task(_shielded_runner())
+
+ # Wait for handler to emit response.created (or fail).
+ # Wait for handler to signal response.created (or fail).
+ await record.response_created_signal.wait()
+
+ # If handler failed before emitting any events, return the failed
+ # snapshot (status: failed). Background POST always returns 200 —
+ # the failure is reflected in the response status, not the HTTP code.
+ if record.response_failed_before_events:
+ ctx.span.end(RuntimeError("Handler failed before response.created"))
+ return _RuntimeState.to_snapshot(record)
+
+ ctx.span.end(None)
+ return _RuntimeState.to_snapshot(record)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_request_parsing.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_request_parsing.py
new file mode 100644
index 000000000000..9a85684d828e
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_request_parsing.py
@@ -0,0 +1,326 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Request pre-validation, identity resolution, and input extraction helpers."""
+
+from __future__ import annotations
+
+import uuid
+from copy import deepcopy
+from typing import Any, Mapping
+
+from .._id_generator import IdGenerator
+from ..models._generated import AgentReference, CreateResponse
+from ..models.errors import RequestValidationError
+
+_X_AGENT_RESPONSE_ID_HEADER = "x-agent-response-id"
+
+_DEFAULT_AGENT_REFERENCE_NAME = "server-default-agent"
+
+
+def _extract_input_items(raw_payload: dict[str, Any]) -> list[dict[str, Any]]:
+ """Extract and deep-copy the ``input`` items array from a raw request payload.
+
+ :param raw_payload: Raw decoded JSON request body.
+ :type raw_payload: dict[str, Any]
+ :return: List of deep-copied input item dictionaries, or empty list if absent.
+ :rtype: list[dict[str, Any]]
+ """
+ if not isinstance(raw_payload, dict):
+ return []
+
+ value = raw_payload.get("input")
+ if not isinstance(value, list):
+ return []
+
+ extracted: list[dict[str, Any]] = []
+ for item in value:
+ if isinstance(item, dict):
+ extracted.append(deepcopy(item))
+ return extracted
+
+
+def _extract_previous_response_id(raw_payload: dict[str, Any]) -> str | None:
+ """Extract the ``previous_response_id`` string from a raw request payload.
+
+ :param raw_payload: Raw decoded JSON request body.
+ :type raw_payload: dict[str, Any]
+ :return: The previous response ID string, or ``None`` if absent or invalid.
+ :rtype: str | None
+ """
+ if not isinstance(raw_payload, dict):
+ return None
+ value = raw_payload.get("previous_response_id")
+ return value if isinstance(value, str) and value else None
+
+
+def _extract_item_id(item: dict[str, Any]) -> str | None:
+ """Extract the ``id`` field from an input item dictionary.
+
+ :param item: An input item dictionary.
+ :type item: dict[str, Any]
+ :return: The item ID as a string, or ``None`` if not present.
+ :rtype: str | None
+ """
+ value = item.get("id")
+ return str(value) if value is not None else None
+
+
+def _apply_item_cursors(items: list[dict[str, Any]], *, after: str | None, before: str | None) -> list[dict[str, Any]]:
+ """Apply cursor-based pagination to a list of input items.
+
+ :param items: Ordered list of input item dictionaries.
+ :type items: list[dict[str, Any]]
+ :keyword after: Item ID to start after (exclusive lower bound), or ``None``.
+ :keyword type after: str | None
+ :keyword before: Item ID to stop before (exclusive upper bound), or ``None``.
+ :keyword type before: str | None
+ :return: The subset of items within the cursor window.
+ :rtype: list[dict[str, Any]]
+ """
+ scoped = items
+
+ if after is not None:
+ after_index = next((index for index, item in enumerate(scoped) if _extract_item_id(item) == after), None)
+ if after_index is not None:
+ scoped = scoped[after_index + 1 :]
+
+ if before is not None:
+ before_index = next((index for index, item in enumerate(scoped) if _extract_item_id(item) == before), None)
+ if before_index is not None:
+ scoped = scoped[:before_index]
+
+ return scoped
+
+
+def _validate_response_id(response_id: str) -> None:
+ """Validate that a response ID matches the expected canonical format.
+
+ :param response_id: The response ID string to validate.
+ :type response_id: str
+ :return: None
+ :rtype: None
+ :raises RequestValidationError: If the ID format is invalid.
+ """
+ is_valid_id, _ = IdGenerator.is_valid(response_id)
+ if not is_valid_id:
+ raise RequestValidationError(
+ "response_id must be in format caresp_<18-char partition key><32-char alphanumeric entropy>",
+ code="invalid_request",
+ param="response_id",
+ )
+
+
+def _normalize_agent_reference(value: Any) -> AgentReference | dict[str, Any]:
+ """Normalize an agent reference value into a validated model or empty dict.
+
+ If *value* is ``None``, an empty dict is returned as a sentinel for
+ "no agent_reference was provided". Callers use truthiness to detect
+ the sentinel and skip auto-stamping output items.
+
+ :param value: Raw agent reference from the request (dict, model, or ``None``).
+ :type value: Any
+ :return: An :class:`AgentReference` model instance,
+ or ``{}`` when no agent_reference was provided.
+ :rtype: AgentReference | dict[str, Any]
+ :raises RequestValidationError: If the value is not a valid agent reference.
+ """
+ if value is None:
+ return {}
+
+ if hasattr(value, "as_dict"):
+ candidate = value.as_dict()
+ elif isinstance(value, dict):
+ candidate = dict(value)
+ else:
+ raise RequestValidationError(
+ "agent_reference must be an object",
+ code="invalid_request",
+ param="agent_reference",
+ )
+
+ candidate.setdefault("type", "agent_reference")
+ name = candidate.get("name")
+ reference_type = candidate.get("type")
+
+ if reference_type != "agent_reference":
+ raise RequestValidationError(
+ "agent_reference.type must be 'agent_reference'",
+ code="invalid_request",
+ param="agent_reference.type",
+ )
+
+ if not isinstance(name, str) or not name.strip():
+ raise RequestValidationError(
+ "agent_reference.name must be a non-empty string",
+ code="invalid_request",
+ param="agent_reference.name",
+ )
+
+ candidate["name"] = name.strip()
+ return AgentReference(candidate)
+
+
+def _prevalidate_identity_payload(payload: dict[str, Any]) -> None:
+ """Pre-validate identity-related fields in the raw request payload.
+
+ Checks ``response_id`` format and ``agent_reference`` structure before full
+ model parsing, so that identity errors surface early.
+
+ :param payload: Raw decoded JSON request body.
+ :type payload: dict[str, Any]
+ :return: None
+ :rtype: None
+ :raises RequestValidationError: If identity fields are malformed.
+ """
+ if not isinstance(payload, dict):
+ return
+
+ raw_response_id = payload.get("response_id")
+ if raw_response_id is not None:
+ if not isinstance(raw_response_id, str) or not raw_response_id.strip():
+ raise RequestValidationError(
+ "response_id must be a non-empty string",
+ code="invalid_request",
+ param="response_id",
+ )
+ _validate_response_id(raw_response_id.strip())
+
+ raw_agent_reference = payload.get("agent_reference")
+ if raw_agent_reference is None:
+ return
+
+ if not isinstance(raw_agent_reference, dict):
+ raise RequestValidationError(
+ "agent_reference must be an object",
+ code="invalid_request",
+ param="agent_reference",
+ )
+
+ if raw_agent_reference.get("type") != "agent_reference":
+ raise RequestValidationError(
+ "agent_reference.type must be 'agent_reference'",
+ code="invalid_request",
+ param="agent_reference.type",
+ )
+
+ raw_name = raw_agent_reference.get("name")
+ if not isinstance(raw_name, str) or not raw_name.strip():
+ raise RequestValidationError(
+ "agent_reference.name must be a non-empty string",
+ code="invalid_request",
+ param="agent_reference.name",
+ )
+
+
+def _resolve_identity_fields(
+ parsed: CreateResponse,
+ *,
+ request_headers: Mapping[str, str] | None = None,
+) -> tuple[str, AgentReference | dict[str, Any]]:
+ """Resolve the response ID and agent reference from a parsed create request.
+
+ **B38 — Response ID Resolution**: If the incoming request includes an
+ ``x-agent-response-id`` HTTP header with a non-empty value, that value is
+ used as the response ID. Otherwise the library generates one using
+ ``IdGenerator.new_response_id()``, using the ``previous_response_id`` or
+ ``conversation`` ID as partition-key hint when available.
+
+ :param parsed: Parsed ``CreateResponse`` model instance.
+ :type parsed: CreateResponse
+ :keyword request_headers: HTTP request headers mapping.
+ :keyword type request_headers: Mapping[str, str] | None
+ :return: A tuple of ``(response_id, agent_reference)``. The agent reference
+ is an :class:`AgentReference` model when provided, or an empty ``dict``
+ sentinel when absent.
+ :rtype: tuple[str, AgentReference | dict[str, Any]]
+ :raises RequestValidationError: If the resolved response ID is invalid.
+ """
+ # B38: header override takes highest precedence
+ header_response_id: str | None = None
+ if request_headers is not None:
+ raw_header = request_headers.get(_X_AGENT_RESPONSE_ID_HEADER, "")
+ if isinstance(raw_header, str) and raw_header.strip():
+ header_response_id = raw_header.strip()
+
+ parsed_mapping = parsed.as_dict() if hasattr(parsed, "as_dict") else {}
+
+ if header_response_id:
+ response_id = header_response_id
+ else:
+ explicit_response_id = parsed_mapping.get("response_id") or getattr(parsed, "response_id", None)
+ if isinstance(explicit_response_id, str) and explicit_response_id.strip():
+ response_id = explicit_response_id.strip()
+ else:
+ # Use previous_response_id or conversation ID as partition key hint
+ # for co-locating related response IDs in the same partition.
+ # previous_response_id takes priority because it directly chains
+ # responses, while conversation ID groups them more loosely.
+ partition_hint = parsed_mapping.get("previous_response_id") or _resolve_conversation_id(parsed) or ""
+ response_id = IdGenerator.new_response_id(partition_hint)
+
+ _validate_response_id(response_id)
+ agent_reference = _normalize_agent_reference(
+ parsed_mapping.get("agent_reference")
+ if isinstance(parsed_mapping, dict)
+ else getattr(parsed, "agent_reference", None)
+ )
+ return response_id, agent_reference
+
+
+def _resolve_conversation_id(parsed: CreateResponse) -> str | None:
+ """Extract the conversation ID from a parsed ``CreateResponse`` request.
+
+ Handles both a plain string value and a ``ConversationParam_2`` object
+ (which carries the ID in its ``.id`` attribute).
+
+ :param parsed: The parsed ``CreateResponse`` model instance.
+ :type parsed: CreateResponse
+ :returns: The conversation ID string, or ``None`` if not present.
+ :rtype: str | None
+ """
+ raw = getattr(parsed, "conversation", None)
+ if isinstance(raw, str):
+ return raw or None
+ if isinstance(raw, dict):
+ cid = raw.get("id")
+ return str(cid) if cid else None
+ if raw is not None and hasattr(raw, "id"):
+ return str(raw.id) or None
+ return None
+
+
+def _resolve_session_id(parsed: CreateResponse, payload: dict[str, Any], *, env_session_id: str = "") -> str:
+ """Resolve the session ID for a create-response request.
+
+ **B39 — Session ID Resolution**: The library resolves ``agent_session_id``
+ using the following priority chain:
+
+ 1. ``request.agent_session_id`` — payload field (client-supplied session affinity)
+ 2. ``env_session_id`` — platform-supplied (from ``AgentConfig.session_id``)
+ 3. Generated UUID (freshly generated as a string)
+
+ :param parsed: Parsed ``CreateResponse`` model instance.
+ :type parsed: CreateResponse
+ :param payload: Raw JSON payload dict.
+ :type payload: dict[str, Any]
+ :keyword env_session_id: Platform-supplied session ID from ``AgentConfig``
+ (sourced from ``FOUNDRY_AGENT_SESSION_ID`` env var). Defaults to ``""``.
+ :keyword type env_session_id: str
+ :returns: The resolved session ID string.
+ :rtype: str
+ """
+ # Priority 1: payload field
+ session_id = getattr(parsed, "agent_session_id", None)
+ if not isinstance(session_id, str) or not session_id.strip():
+ # Also check the raw payload for when the field isn't in the model yet
+ if isinstance(payload, dict):
+ session_id = payload.get("agent_session_id")
+ if isinstance(session_id, str) and session_id.strip():
+ return session_id.strip()
+
+ # Priority 2: platform-supplied session ID
+ if env_session_id.strip():
+ return env_session_id.strip()
+
+ # Priority 3: generated UUID
+ return str(uuid.uuid4())
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_routing.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_routing.py
new file mode 100644
index 000000000000..2ae95de0c1ae
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_routing.py
@@ -0,0 +1,301 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Responses protocol host for Azure AI Hosted Agents.
+
+Provides the Responses API endpoints and handler decorators
+as a :class:`~azure.ai.agentserver.core.AgentServerHost` subclass.
+"""
+
+from __future__ import annotations
+
+import asyncio # pylint: disable=do-not-import-asyncio
+import logging
+import types
+from collections.abc import AsyncIterable, Generator
+from typing import Any, AsyncIterator, Callable, Optional, Union
+
+from starlette.routing import Route
+
+from azure.ai.agentserver.core import AgentServerHost, build_server_version
+
+from .._options import ResponsesServerOptions
+from .._response_context import ResponseContext
+from .._version import VERSION as _RESPONSES_VERSION
+from ..models._generated import CreateResponse, ResponseStreamEvent
+from ..store._base import ResponseProviderProtocol, ResponseStreamProviderProtocol
+from ..store._memory import InMemoryResponseProvider
+from ._endpoint_handler import _ResponseEndpointHandler
+from ._orchestrator import _ResponseOrchestrator
+from ._runtime_state import _RuntimeState
+
+CreateHandlerFn = Callable[
+ [CreateResponse, ResponseContext, asyncio.Event],
+ Union[
+ AsyncIterable[Union[ResponseStreamEvent, dict[str, Any]]],
+ Generator[Union[ResponseStreamEvent, dict[str, Any]], Any, None],
+ ],
+]
+"""Type alias for the user-registered create-response handler function.
+
+The handler receives:
+- ``request``: The parsed :class:`CreateResponse` model.
+- ``context``: The :class:`ResponseContext` for the current request.
+- ``cancellation_signal``: An :class:`asyncio.Event` set when cancellation is requested.
+
+It must return one of:
+- A ``TextResponse`` for text-only responses (it implements ``AsyncIterable``).
+- An ``AsyncIterable`` (async generator) of :class:`ResponseStreamEvent` instances.
+- A synchronous ``Generator`` of :class:`ResponseStreamEvent` instances.
+"""
+
+logger = logging.getLogger("azure.ai.agentserver")
+
+
+async def _sync_to_async_gen(sync_gen: types.GeneratorType) -> AsyncIterator:
+ """Wrap a synchronous generator into an async generator."""
+ for item in sync_gen:
+ yield item
+
+
+class ResponsesAgentServerHost(AgentServerHost):
+ """Responses protocol host for Azure AI Hosted Agents.
+
+ A :class:`~azure.ai.agentserver.core.AgentServerHost` subclass that adds
+ the Responses API endpoints. Use the :meth:`create_handler` decorator
+ to wire a handler function to the create endpoint.
+
+ For multi-protocol agents, compose via cooperative inheritance::
+
+ class MyHost(InvocationAgentServerHost, ResponsesAgentServerHost):
+ pass
+
+ Usage::
+
+ from azure.ai.agentserver.responses import ResponsesAgentServerHost
+
+ app = ResponsesAgentServerHost()
+
+ @app.create_handler
+ def my_handler(request, context, cancellation_signal):
+ yield event
+
+ app.run()
+
+ :param prefix: Optional URL prefix for all response routes
+ (e.g. ``"/v1"``).
+ :type prefix: str
+ :param options: Optional runtime options for the responses server.
+ :type options: ResponsesServerOptions | None
+ :param provider: Optional persistence provider for response
+ envelopes and input items.
+ :type provider: ResponseProviderProtocol | None
+ """
+
+ _INSTRUMENTATION_SCOPE = "Azure.AI.AgentServer.Responses"
+
+ def __init__(
+ self,
+ *,
+ prefix: str = "",
+ options: ResponsesServerOptions | None = None,
+ provider: ResponseProviderProtocol | None = None,
+ **kwargs: Any,
+ ) -> None:
+ # Handler slot — populated via @app.create_handler decorator
+ self._create_fn: Optional[CreateHandlerFn] = None
+
+ # Normalize prefix
+ normalized_prefix = prefix.strip()
+ if normalized_prefix and not normalized_prefix.startswith("/"):
+ normalized_prefix = f"/{normalized_prefix}"
+ normalized_prefix = normalized_prefix.rstrip("/")
+
+ # Build internal components
+ runtime_options = options or ResponsesServerOptions()
+
+ # Resolve AgentConfig — used for Foundry auto-activation and
+ # merging platform env-vars (SSE keep-alive) into runtime options.
+ from azure.ai.agentserver.core._config import AgentConfig
+
+ config = AgentConfig.from_env()
+
+ # Merge SSE keep-alive from AgentConfig when the user hasn't
+ # explicitly set one via the options constructor. AgentConfig
+ # defaults to 0 (disabled) per spec; a positive value means the
+ # platform env var SSE_KEEPALIVE_INTERVAL was explicitly set.
+ if runtime_options.sse_keep_alive_interval_seconds is None and config.sse_keepalive_interval > 0:
+ runtime_options.sse_keep_alive_interval_seconds = config.sse_keepalive_interval
+
+ # SSE-specific headers (x-platform-server is handled by hosting middleware)
+ sse_headers: dict[str, str] = {
+ "connection": "keep-alive",
+ "cache-control": "no-cache",
+ "x-accel-buffering": "no",
+ }
+
+ if provider is None:
+ if config.project_endpoint:
+ from ..store._foundry_provider import FoundryStorageProvider
+ from ..store._foundry_settings import FoundryStorageSettings
+
+ try:
+ from azure.identity.aio import DefaultAzureCredential
+ except ImportError:
+ logger.warning("azure-identity not installed; Foundry auto-activation disabled")
+ else:
+ settings = FoundryStorageSettings.from_endpoint(config.project_endpoint)
+ provider = FoundryStorageProvider(DefaultAzureCredential(), settings)
+
+ resolved_provider: ResponseProviderProtocol = provider if provider is not None else InMemoryResponseProvider()
+ stream_provider = resolved_provider if isinstance(resolved_provider, ResponseStreamProviderProtocol) else None
+ runtime_state = _RuntimeState()
+ orchestrator = _ResponseOrchestrator(
+ create_fn=self._dispatch_create,
+ runtime_state=runtime_state,
+ runtime_options=runtime_options,
+ provider=resolved_provider,
+ stream_provider=stream_provider,
+ )
+ endpoint = _ResponseEndpointHandler(
+ orchestrator=orchestrator,
+ runtime_state=runtime_state,
+ runtime_options=runtime_options,
+ response_headers={},
+ sse_headers=sse_headers,
+ host=self,
+ provider=resolved_provider,
+ stream_provider=stream_provider,
+ )
+
+ # Build response protocol routes
+ response_routes: list[Route] = [
+ Route(
+ f"{normalized_prefix}/responses",
+ endpoint.handle_create,
+ methods=["POST"],
+ name="create_response",
+ ),
+ Route(
+ f"{normalized_prefix}/responses/{{response_id}}",
+ endpoint.handle_get,
+ methods=["GET"],
+ name="get_response",
+ ),
+ Route(
+ f"{normalized_prefix}/responses/{{response_id}}",
+ endpoint.handle_delete,
+ methods=["DELETE"],
+ name="delete_response",
+ ),
+ Route(
+ f"{normalized_prefix}/responses/{{response_id}}/cancel",
+ endpoint.handle_cancel,
+ methods=["POST"],
+ name="cancel_response",
+ ),
+ Route(
+ f"{normalized_prefix}/responses/{{response_id}}/input_items",
+ endpoint.handle_input_items,
+ methods=["GET"],
+ name="get_input_items",
+ ),
+ ]
+
+ # Merge with any routes from sibling mixins via cooperative init
+ existing = list(kwargs.pop("routes", None) or [])
+ super().__init__(routes=existing + response_routes, **kwargs)
+
+ # Register the responses protocol version on the host so the
+ # x-platform-server header includes this package's version.
+ self.register_server_version(build_server_version("azure-ai-agentserver-responses", _RESPONSES_VERSION))
+
+ # Allow handler developers to append their own version segment.
+ if runtime_options.additional_server_version:
+ self.register_server_version(runtime_options.additional_server_version)
+
+ # Register shutdown handler on self (inherited from AgentServerHost)
+ self.shutdown_handler(endpoint.handle_shutdown)
+
+ # ------------------------------------------------------------------
+ # Handler decorator
+ # ------------------------------------------------------------------
+
+ def create_handler(self, fn: CreateHandlerFn) -> CreateHandlerFn:
+ """Register a function as the create-response handler.
+
+ The handler function must accept exactly three positional parameters:
+ ``(request, context, cancellation_signal)`` and return an
+ ``AsyncIterable`` of response stream events.
+
+ Usage::
+
+ @app.create_handler
+ def my_handler(request, context, cancellation_signal):
+ yield event
+
+ :param fn: A callable accepting (request, context, cancellation_signal).
+ :type fn: CreateHandlerFn
+ :return: The original function (unmodified).
+ :rtype: CreateHandlerFn
+ """
+ self._create_fn = fn
+ return fn
+
+ # ------------------------------------------------------------------
+ # Dispatch (internal)
+ # ------------------------------------------------------------------
+
+ def _dispatch_create(
+ self,
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+ ) -> AsyncIterator[ResponseStreamEvent]:
+ """Dispatch to the registered create handler.
+
+ Called by the orchestrator when processing a create request.
+ Handles all handler return signatures:
+
+ - Sync generator → wrapped into async generator.
+ - AsyncIterable (e.g. ``TextResponse``) → converted to ``AsyncIterator``.
+ - Coroutine (``async def`` that ``return`` s a value) → awaited, then the
+ result is recursively normalised.
+ - Async generator → returned as-is.
+
+ :param request: The parsed create-response request.
+ :type request: CreateResponse
+ :param context: The response context for the request.
+ :type context: ResponseContext
+ :param cancellation_signal: The cancellation signal for the request.
+ :type cancellation_signal: asyncio.Event
+ :returns: The result from the registered create handler callable.
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ if self._create_fn is None:
+ raise NotImplementedError("No create handler registered. Use the @app.create_handler decorator.")
+ result = self._create_fn(request, context, cancellation_signal)
+ return self._normalize_handler_result(result)
+
+ def _normalize_handler_result(self, result: Any) -> AsyncIterator[ResponseStreamEvent]:
+ """Convert a handler result into an AsyncIterator.
+
+ Supports sync generators, async generators, coroutines (async def
+ that returns), and AsyncIterables (e.g. TextResponse).
+ """
+ if isinstance(result, types.GeneratorType):
+ return _sync_to_async_gen(result)
+ # Coroutine: async def handler that returns (rather than yields).
+ # Await it and normalise the inner result.
+ if asyncio.iscoroutine(result):
+ return self._await_and_normalize(result)
+ # If the handler returned an AsyncIterable (e.g. TextResponse), convert
+ # to an AsyncIterator so the orchestrator can __anext__() uniformly.
+ if hasattr(result, "__aiter__") and not hasattr(result, "__anext__"):
+ return result.__aiter__() # type: ignore[union-attr, return-value]
+ return result # type: ignore[return-value]
+
+ async def _await_and_normalize(self, coro: Any) -> AsyncIterator[ResponseStreamEvent]: # type: ignore[misc]
+ """Await a coroutine and yield events from its normalised result."""
+ inner = await coro
+ async for event in self._normalize_handler_result(inner):
+ yield event
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_runtime_state.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_runtime_state.py
new file mode 100644
index 000000000000..60c10fe2f2fc
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_runtime_state.py
@@ -0,0 +1,149 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Runtime state management for the Responses server."""
+
+from __future__ import annotations
+
+import asyncio # pylint: disable=do-not-import-asyncio
+from copy import deepcopy
+from typing import Any
+
+from ..models._generated import OutputItem
+from ..models.runtime import ResponseExecution
+from ..streaming._helpers import strip_nulls
+
+
+class _RuntimeState:
+ """In-memory store for response execution records."""
+
+ def __init__(self) -> None:
+ """Initialize the runtime state with empty record and deletion sets."""
+ self._records: dict[str, ResponseExecution] = {}
+ self._deleted_response_ids: set[str] = set()
+ self._lock = asyncio.Lock()
+
+ async def add(self, record: ResponseExecution) -> None:
+ """Add or replace an execution record in the store.
+
+ :param record: The execution record to store.
+ :type record: ResponseExecution
+ :return: None
+ :rtype: None
+ """
+ async with self._lock:
+ self._records[record.response_id] = record
+ self._deleted_response_ids.discard(record.response_id)
+
+ async def get(self, response_id: str) -> ResponseExecution | None:
+ """Look up an execution record by response ID.
+
+ :param response_id: The response ID to look up.
+ :type response_id: str
+ :return: The matching execution record, or ``None`` if not found.
+ :rtype: ResponseExecution | None
+ """
+ async with self._lock:
+ return self._records.get(response_id)
+
+ async def is_deleted(self, response_id: str) -> bool:
+ """Check whether a response ID has been deleted.
+
+ :param response_id: The response ID to check.
+ :type response_id: str
+ :return: ``True`` if the response was previously deleted.
+ :rtype: bool
+ """
+ async with self._lock:
+ return response_id in self._deleted_response_ids
+
+ async def delete(self, response_id: str) -> bool:
+ """Delete an execution record by response ID.
+
+ :param response_id: The response ID to delete.
+ :type response_id: str
+ :return: ``True`` if the record was found and deleted, ``False`` otherwise.
+ :rtype: bool
+ """
+ async with self._lock:
+ record = self._records.pop(response_id, None)
+ if record is None:
+ return False
+ self._deleted_response_ids.add(response_id)
+ return True
+
+ async def get_input_items(self, response_id: str) -> list[OutputItem]:
+ """Retrieve the full input item chain for a response, including ancestors.
+
+ Walks the ``previous_response_id`` chain to build the complete ordered
+ list of input items.
+
+ :param response_id: The response ID whose input items to retrieve.
+ :type response_id: str
+ :return: Ordered list of deep-copied output items.
+ :rtype: list[OutputItem]
+ :raises ValueError: If the response has been deleted.
+ :raises KeyError: If the response is not found or not visible.
+ """
+ async with self._lock:
+ record = self._records.get(response_id)
+ if record is None:
+ if response_id in self._deleted_response_ids:
+ raise ValueError(f"response '{response_id}' has been deleted")
+ raise KeyError(f"response '{response_id}' not found")
+
+ if not record.visible_via_get:
+ raise KeyError(f"response '{response_id}' not found")
+
+ history: list[OutputItem] = []
+ cursor = record.previous_response_id
+ visited: set[str] = set()
+
+ while isinstance(cursor, str) and cursor and cursor not in visited:
+ visited.add(cursor)
+ previous = self._records.get(cursor)
+ if previous is None:
+ break
+ history = [*deepcopy(previous.input_items), *history]
+ cursor = previous.previous_response_id
+
+ return [*history, *deepcopy(record.input_items)]
+
+ async def list_records(self) -> list[ResponseExecution]:
+ """Return a snapshot list of all execution records in the store.
+
+ :return: List of all current execution records.
+ :rtype: list[ResponseExecution]
+ """
+ async with self._lock:
+ return list(self._records.values())
+
+ @staticmethod
+ def to_snapshot(execution: ResponseExecution) -> dict[str, Any]:
+ """Build a normalized response snapshot dictionary from an execution.
+
+ Uses ``execution.response.as_dict()`` directly when a response snapshot is
+ available, avoiding an unnecessary ``Response(dict).as_dict()`` round-trip.
+ Falls back to a minimal status-only dict when no response has been set yet.
+
+ :param execution: The execution whose response snapshot to build.
+ :type execution: ResponseExecution
+ :return: A normalized response payload dictionary.
+ :rtype: dict[str, Any]
+ """
+ if execution.response is not None:
+ result: dict[str, Any] = execution.response.as_dict()
+ result.setdefault("id", execution.response_id)
+ result.setdefault("response_id", execution.response_id)
+ result.setdefault("object", "response")
+ result["status"] = execution.status
+ return strip_nulls(result)
+ return {
+ "id": execution.response_id,
+ "response_id": execution.response_id,
+ "object": "response",
+ "status": execution.status,
+ "created_at": int(execution.created_at.timestamp()),
+ "output": [],
+ "model": execution.initial_model,
+ "agent_reference": deepcopy(execution.initial_agent_reference) or {},
+ }
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_validation.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_validation.py
new file mode 100644
index 000000000000..a641fc5bb623
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/hosting/_validation.py
@@ -0,0 +1,342 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Validation utilities for request and response models."""
+
+from __future__ import annotations
+
+from typing import Any, Mapping
+
+from starlette.responses import JSONResponse
+
+from azure.ai.agentserver.responses._options import ResponsesServerOptions
+from azure.ai.agentserver.responses.models._generated import ApiErrorResponse, CreateResponse, Error
+from azure.ai.agentserver.responses.models._generated._validators import validate_CreateResponse
+from azure.ai.agentserver.responses.models.errors import RequestValidationError
+
+
+def parse_create_response(payload: Mapping[str, Any]) -> CreateResponse:
+ """Parse incoming JSON payload into the generated ``CreateResponse`` model.
+
+ :param payload: Raw request payload mapping.
+ :type payload: Mapping[str, Any]
+ :returns: Parsed generated create response model.
+ :rtype: CreateResponse
+ :raises RequestValidationError: If payload is not an object or cannot be parsed.
+ """
+ if not isinstance(payload, Mapping):
+ raise RequestValidationError("request body must be a JSON object", code="invalid_request")
+
+ validation_errors = validate_CreateResponse(payload)
+ if validation_errors:
+ details = [
+ {
+ "code": "invalid_value",
+ "message": e.get("message", ""),
+ "param": ("$" + e.get("path", "")) if e.get("path", "").startswith(".") else e.get("path", ""),
+ }
+ for e in validation_errors
+ ]
+ raise RequestValidationError(
+ "request body failed schema validation",
+ code="invalid_request",
+ details=details,
+ )
+
+ try:
+ return CreateResponse(payload)
+ except Exception as exc: # pragma: no cover - generated model raises implementation-specific errors.
+ raise RequestValidationError(
+ "request body failed schema validation",
+ code="invalid_request",
+ debug_info={"exception_type": type(exc).__name__, "detail": str(exc)},
+ ) from exc
+
+
+def normalize_create_response(
+ request: CreateResponse,
+ options: ResponsesServerOptions | None,
+) -> CreateResponse:
+ """Apply server-side defaults to a parsed create request model.
+
+ :param request: The parsed create response model to normalize.
+ :type request: CreateResponse
+ :param options: Server runtime options containing defaults, or ``None``.
+ :type options: ResponsesServerOptions | None
+ :return: The same model instance with defaults applied.
+ :rtype: CreateResponse
+ """
+ if (request.model is None or (isinstance(request.model, str) and not request.model.strip())) and options:
+ request.model = options.default_model
+
+ if isinstance(request.model, str):
+ request.model = request.model.strip() or ""
+ elif request.model is None:
+ request.model = ""
+
+ return request
+
+
+def validate_create_response(request: CreateResponse) -> None:
+ """Validate create request semantics not enforced by generated model typing.
+
+ :param request: The parsed create response model to validate.
+ :type request: CreateResponse
+ :raises RequestValidationError: If semantic preconditions are violated.
+ """
+ store_enabled = True if request.store is None else bool(request.store)
+
+ if request.background and not store_enabled:
+ raise RequestValidationError(
+ "background=true requires store=true",
+ code="unsupported_parameter",
+ param="background",
+ )
+
+ if request.stream_options is not None and request.stream is not True:
+ raise RequestValidationError(
+ "stream_options requires stream=true",
+ code="invalid_mode",
+ param="stream",
+ )
+
+ # B22: model is optional — resolved to default in normalize_create_response()
+
+ # Metadata constraints: ≤16 keys, key ≤64 chars, value ≤512 chars
+ metadata = getattr(request, "metadata", None)
+ if metadata is not None and hasattr(metadata, "items"):
+ if len(metadata) > 16:
+ raise RequestValidationError(
+ "metadata must have at most 16 key-value pairs",
+ code="invalid_request",
+ param="metadata",
+ )
+ for key, value in metadata.items():
+ if isinstance(key, str) and len(key) > 64:
+ raise RequestValidationError(
+ f"metadata key '{key[:64]}...' exceeds maximum length of 64 characters",
+ code="invalid_request",
+ param="metadata",
+ )
+ if isinstance(value, str) and len(value) > 512:
+ raise RequestValidationError(
+ f"metadata value for key '{key}' exceeds maximum length of 512 characters",
+ code="invalid_request",
+ param="metadata",
+ )
+
+
+def parse_and_validate_create_response(
+ payload: Mapping[str, Any],
+ *,
+ options: ResponsesServerOptions | None = None,
+) -> CreateResponse:
+ """Parse, normalize, and validate a create request using generated models.
+
+ :param payload: Raw request payload mapping.
+ :type payload: Mapping[str, Any]
+ :keyword options: Server runtime options for defaults, or ``None``.
+ :keyword type options: ResponsesServerOptions | None
+ :return: A fully validated ``CreateResponse`` model.
+ :rtype: CreateResponse
+ :raises RequestValidationError: If parsing or validation fails.
+ """
+ request = parse_create_response(payload)
+ request = normalize_create_response(request, options)
+ validate_create_response(request)
+ return request
+
+
+def build_api_error_response(
+ message: str,
+ *,
+ code: str,
+ param: str | None = None,
+ error_type: str = "invalid_request_error",
+ debug_info: dict[str, Any] | None = None,
+) -> ApiErrorResponse:
+ """Build a generated ``ApiErrorResponse`` envelope for client-visible failures.
+
+ :param message: Human-readable error message.
+ :type message: str
+ :keyword code: Machine-readable error code.
+ :keyword type code: str
+ :keyword param: The request parameter that caused the error, or ``None``.
+ :keyword type param: str | None
+ :keyword error_type: Error type category (default ``"invalid_request_error"``).
+ :keyword type error_type: str
+ :keyword debug_info: Optional debug information dictionary.
+ :keyword type debug_info: dict[str, Any] | None
+ :return: A generated ``ApiErrorResponse`` envelope.
+ :rtype: ApiErrorResponse
+ """
+ return ApiErrorResponse(
+ error=Error(
+ code=code,
+ message=message,
+ param=param,
+ type=error_type,
+ debug_info=debug_info,
+ )
+ )
+
+
+def build_not_found_error_response(
+ resource_id: str,
+ *,
+ param: str = "response_id",
+ resource_name: str = "response",
+) -> ApiErrorResponse:
+ """Build a canonical generated not-found error envelope.
+
+ :param resource_id: The ID of the resource that was not found.
+ :type resource_id: str
+ :keyword param: The parameter name to include in the error (default ``"response_id"``).
+ :keyword type param: str
+ :keyword resource_name: Display name for the resource type (default ``"response"``).
+ :keyword type resource_name: str
+ :return: A generated ``ApiErrorResponse`` envelope with not-found error.
+ :rtype: ApiErrorResponse
+ """
+ return build_api_error_response(
+ message=f"{resource_name} '{resource_id}' was not found",
+ code="not_found",
+ param=param,
+ error_type="invalid_request_error",
+ )
+
+
+def build_invalid_mode_error_response(
+ message: str,
+ *,
+ param: str | None = None,
+) -> ApiErrorResponse:
+ """Build a canonical generated invalid-mode error envelope.
+
+ :param message: Human-readable error message.
+ :type message: str
+ :keyword param: The request parameter that caused the error, or ``None``.
+ :keyword type param: str | None
+ :return: A generated ``ApiErrorResponse`` envelope with invalid-mode error.
+ :rtype: ApiErrorResponse
+ """
+ return build_api_error_response(
+ message=message,
+ code="invalid_mode",
+ param=param,
+ error_type="invalid_request_error",
+ )
+
+
+def to_api_error_response(error: Exception) -> ApiErrorResponse:
+ """Map a Python exception to a generated API error envelope.
+
+ :param error: The exception to convert.
+ :type error: Exception
+ :return: A generated ``ApiErrorResponse`` envelope.
+ :rtype: ApiErrorResponse
+ """
+ if isinstance(error, RequestValidationError):
+ return error.to_api_error_response()
+
+ if isinstance(error, ValueError):
+ return build_api_error_response(
+ message=str(error) or "invalid request",
+ code="invalid_request",
+ error_type="invalid_request_error",
+ )
+
+ return build_api_error_response(
+ message="internal server error",
+ code="internal_error",
+ error_type="server_error",
+ )
+
+
+# ---------------------------------------------------------------------------
+# HTTP error response factories (moved from _http_errors.py)
+# ---------------------------------------------------------------------------
+
+
+def _json_payload(value: Any) -> Any:
+ """Convert a value to a JSON-serializable form."""
+ if hasattr(value, "as_dict"):
+ return value.as_dict() # type: ignore[no-any-return]
+ return value
+
+
+def _api_error(
+ *,
+ message: str,
+ code: str,
+ param: str | None = None,
+ error_type: str = "invalid_request_error",
+ status_code: int,
+ headers: dict[str, str],
+) -> JSONResponse:
+ """Build a standard API error ``JSONResponse``."""
+ payload = _json_payload(build_api_error_response(message=message, code=code, param=param, error_type=error_type))
+ return JSONResponse(payload, status_code=status_code, headers=headers)
+
+
+def error_response(error: Exception, headers: dict[str, str]) -> JSONResponse:
+ """Map an exception to an appropriate HTTP error ``JSONResponse``."""
+ envelope = to_api_error_response(error)
+ payload = _json_payload(envelope)
+ error_type = payload.get("error", {}).get("type") if isinstance(payload, dict) else None
+ status_code = 500
+ if error_type == "invalid_request_error":
+ status_code = 400
+ elif error_type == "not_found_error":
+ status_code = 404
+ return JSONResponse(payload, status_code=status_code, headers=headers)
+
+
+def not_found_response(response_id: str, headers: dict[str, str]) -> JSONResponse:
+ """Build a 404 Not Found error response."""
+ return _api_error(
+ message=f"Response with id '{response_id}' not found.",
+ code="invalid_request",
+ param="response_id",
+ error_type="invalid_request_error",
+ status_code=404,
+ headers=headers,
+ )
+
+
+def invalid_request_response(message: str, headers: dict[str, str], *, param: str | None = None) -> JSONResponse:
+ """Build a 400 Bad Request error response."""
+ return _api_error(
+ message=message,
+ code="invalid_request",
+ param=param,
+ error_type="invalid_request_error",
+ status_code=400,
+ headers=headers,
+ )
+
+
+def invalid_mode_response(message: str, headers: dict[str, str], *, param: str | None = None) -> JSONResponse:
+ """Build a 400 Bad Request error response for an invalid mode combination."""
+ payload = _json_payload(build_invalid_mode_error_response(message, param=param))
+ return JSONResponse(payload, status_code=400, headers=headers)
+
+
+def service_unavailable_response(message: str, headers: dict[str, str]) -> JSONResponse:
+ """Build a 503 Service Unavailable error response."""
+ return _api_error(
+ message=message,
+ code="service_unavailable",
+ param=None,
+ error_type="server_error",
+ status_code=503,
+ headers=headers,
+ )
+
+
+def deleted_response(response_id: str, headers: dict[str, str]) -> JSONResponse:
+ """Build a 400 error response indicating the response has been deleted."""
+ return invalid_request_response(
+ f"Response with id '{response_id}' has been deleted.",
+ headers,
+ param="response_id",
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/__init__.py
new file mode 100644
index 000000000000..e254c07dbbe7
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/__init__.py
@@ -0,0 +1,40 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Canonical non-generated model types for the response server."""
+
+from ._generated import * # type: ignore # noqa: F401,F403
+from ._generated.sdk.models.models import __all__ as _generated_all
+from ._helpers import (
+ get_content_expanded,
+ get_conversation_expanded,
+ get_conversation_id,
+ get_input_expanded,
+ get_instruction_items,
+ get_output_item_id,
+ get_tool_choice_expanded,
+ to_output_item,
+)
+from .runtime import (
+ ResponseExecution,
+ ResponseStatus,
+ StreamEventRecord,
+ StreamReplayState,
+ TerminalResponseStatus,
+)
+
+__all__ = [
+ "ResponseExecution",
+ "ResponseStatus",
+ "StreamEventRecord",
+ "StreamReplayState",
+ "TerminalResponseStatus",
+ "get_content_expanded",
+ "get_conversation_expanded",
+ "get_conversation_id",
+ "get_input_expanded",
+ "get_instruction_items",
+ "get_output_item_id",
+ "get_tool_choice_expanded",
+ "to_output_item",
+ *_generated_all,
+]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/__init__.py
new file mode 100644
index 000000000000..b783bfa73795
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility re-exports for generated models preserved under sdk/models."""
+
+from .sdk.models.models import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_enums.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_enums.py
new file mode 100644
index 000000000000..481d6d628755
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_enums.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility shim for generated enum symbols."""
+
+from .sdk.models.models._enums import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_models.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_models.py
new file mode 100644
index 000000000000..01e649adb824
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_models.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility shim for generated model symbols."""
+
+from .sdk.models.models._models import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_patch.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_patch.py
new file mode 100644
index 000000000000..66ee2dea3a63
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_patch.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility shim for generated patch helpers."""
+
+from .sdk.models.models._patch import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_validators.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_validators.py
new file mode 100644
index 000000000000..6a4861b0714e
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/_validators.py
@@ -0,0 +1,5338 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+
+from __future__ import annotations
+
+from typing import Any
+
+try:
+ from . import _enums as _generated_enums
+except Exception:
+ _generated_enums = None
+
+def _append_error(errors: list[dict[str, str]], path: str, message: str) -> None:
+ errors.append({'path': path, 'message': message})
+
+def _type_label(value: Any) -> str:
+ if value is None:
+ return 'null'
+ if isinstance(value, bool):
+ return 'boolean'
+ if isinstance(value, int):
+ return 'integer'
+ if isinstance(value, float):
+ return 'number'
+ if isinstance(value, str):
+ return 'string'
+ if isinstance(value, dict):
+ return 'object'
+ if isinstance(value, list):
+ return 'array'
+ return type(value).__name__
+
+def _is_type(value: Any, expected: str) -> bool:
+ if expected == 'string':
+ return isinstance(value, str)
+ if expected == 'integer':
+ return isinstance(value, int) and not isinstance(value, bool)
+ if expected == 'number':
+ return (isinstance(value, int) and not isinstance(value, bool)) or isinstance(value, float)
+ if expected == 'boolean':
+ return isinstance(value, bool)
+ if expected == 'object':
+ return isinstance(value, dict)
+ if expected == 'array':
+ return isinstance(value, list)
+ return True
+
+def _append_type_mismatch(errors: list[dict[str, str]], path: str, expected: str, value: Any) -> None:
+ _append_error(errors, path, f"Expected {expected}, got {_type_label(value)}")
+
+def _enum_values(enum_name: str) -> tuple[tuple[str, ...] | None, str | None]:
+ if _generated_enums is None:
+ return None, f'enum type _enums.{enum_name} is unavailable'
+ enum_cls = getattr(_generated_enums, enum_name, None)
+ if enum_cls is None:
+ return None, f'enum type _enums.{enum_name} is not defined'
+ try:
+ return tuple(str(member.value) for member in enum_cls), None
+ except Exception:
+ return None, f'enum type _enums.{enum_name} failed to load values'
+
+def _validate_CreateResponse(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'agent' in value:
+ _validate_CreateResponse_agent(value['agent'], f"{path}.agent", errors)
+ if 'agent_reference' in value:
+ _validate_CreateResponse_agent_reference(value['agent_reference'], f"{path}.agent_reference", errors)
+ if 'agent_session_id' in value:
+ _validate_CreateResponse_agent_session_id(value['agent_session_id'], f"{path}.agent_session_id", errors)
+ if 'background' in value:
+ _validate_CreateResponse_background(value['background'], f"{path}.background", errors)
+ if 'context_management' in value:
+ _validate_CreateResponse_context_management(value['context_management'], f"{path}.context_management", errors)
+ if 'conversation' in value:
+ _validate_CreateResponse_conversation(value['conversation'], f"{path}.conversation", errors)
+ if 'include' in value:
+ _validate_CreateResponse_include(value['include'], f"{path}.include", errors)
+ if 'input' in value:
+ _validate_CreateResponse_input(value['input'], f"{path}.input", errors)
+ if 'instructions' in value:
+ _validate_CreateResponse_instructions(value['instructions'], f"{path}.instructions", errors)
+ if 'max_output_tokens' in value:
+ _validate_CreateResponse_max_output_tokens(value['max_output_tokens'], f"{path}.max_output_tokens", errors)
+ if 'max_tool_calls' in value:
+ _validate_CreateResponse_max_output_tokens(value['max_tool_calls'], f"{path}.max_tool_calls", errors)
+ if 'metadata' in value:
+ _validate_CreateResponse_metadata(value['metadata'], f"{path}.metadata", errors)
+ if 'model' in value:
+ _validate_CreateResponse_model(value['model'], f"{path}.model", errors)
+ if 'parallel_tool_calls' in value:
+ _validate_CreateResponse_parallel_tool_calls(value['parallel_tool_calls'], f"{path}.parallel_tool_calls", errors)
+ if 'previous_response_id' in value:
+ _validate_CreateResponse_instructions(value['previous_response_id'], f"{path}.previous_response_id", errors)
+ if 'prompt' in value:
+ _validate_CreateResponse_prompt(value['prompt'], f"{path}.prompt", errors)
+ if 'prompt_cache_key' in value:
+ _validate_CreateResponse_prompt_cache_key(value['prompt_cache_key'], f"{path}.prompt_cache_key", errors)
+ if 'prompt_cache_retention' in value:
+ _validate_CreateResponse_prompt_cache_retention(value['prompt_cache_retention'], f"{path}.prompt_cache_retention", errors)
+ if 'reasoning' in value:
+ _validate_CreateResponse_reasoning(value['reasoning'], f"{path}.reasoning", errors)
+ if 'safety_identifier' in value:
+ _validate_CreateResponse_safety_identifier(value['safety_identifier'], f"{path}.safety_identifier", errors)
+ if 'service_tier' in value:
+ _validate_CreateResponse_service_tier(value['service_tier'], f"{path}.service_tier", errors)
+ if 'store' in value:
+ _validate_CreateResponse_parallel_tool_calls(value['store'], f"{path}.store", errors)
+ if 'stream' in value:
+ _validate_CreateResponse_background(value['stream'], f"{path}.stream", errors)
+ if 'stream_options' in value:
+ _validate_CreateResponse_stream_options(value['stream_options'], f"{path}.stream_options", errors)
+ if 'structured_inputs' in value:
+ _validate_CreateResponse_structured_inputs(value['structured_inputs'], f"{path}.structured_inputs", errors)
+ if 'temperature' in value:
+ _validate_CreateResponse_temperature(value['temperature'], f"{path}.temperature", errors)
+ if 'text' in value:
+ _validate_CreateResponse_text(value['text'], f"{path}.text", errors)
+ if 'tool_choice' in value:
+ _validate_CreateResponse_tool_choice(value['tool_choice'], f"{path}.tool_choice", errors)
+ if 'tools' in value:
+ _validate_CreateResponse_tools(value['tools'], f"{path}.tools", errors)
+ if 'top_logprobs' in value:
+ _validate_CreateResponse_max_output_tokens(value['top_logprobs'], f"{path}.top_logprobs", errors)
+ if 'top_p' in value:
+ _validate_CreateResponse_temperature(value['top_p'], f"{path}.top_p", errors)
+ if 'truncation' in value:
+ _validate_CreateResponse_truncation(value['truncation'], f"{path}.truncation", errors)
+ if 'user' in value:
+ _validate_CreateResponse_user(value['user'], f"{path}.user", errors)
+
+def _validate_CreateResponse_agent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_CreateResponse_agent_reference(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_CreateResponse_agent_session_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_background(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'boolean'):
+ _append_type_mismatch(errors, path, 'boolean', value)
+ return
+
+def _validate_CreateResponse_context_management(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_CreateResponse_context_management_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_CreateResponse_conversation(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_CreateResponse_include(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_CreateResponse_include_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_CreateResponse_input(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_InputParam(value, path, errors)
+
+def _validate_CreateResponse_instructions(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_max_output_tokens(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_CreateResponse_metadata(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_CreateResponse_model(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_parallel_tool_calls(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'boolean'):
+ _append_type_mismatch(errors, path, 'boolean', value)
+ return
+
+def _validate_CreateResponse_prompt(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_Prompt(value, path, errors)
+
+def _validate_CreateResponse_prompt_cache_key(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_prompt_cache_retention(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ _allowed_values = ('in-memory', '24h')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_reasoning(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_CreateResponse_safety_identifier(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_service_tier(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ServiceTier(value, path, errors)
+
+def _validate_CreateResponse_stream_options(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_CreateResponse_structured_inputs(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ for _key, _item in value.items():
+ if _key not in ():
+ _validate_CreateResponse_structured_inputs_additional_property(_item, f"{path}.{_key}", errors)
+
+def _validate_CreateResponse_temperature(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'number'):
+ _append_type_mismatch(errors, path, 'number', value)
+ return
+
+def _validate_CreateResponse_text(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ResponseTextParam(value, path, errors)
+
+def _validate_CreateResponse_tool_choice(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_ToolChoiceOptions(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ToolChoiceParam(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: OpenAI.ToolChoiceOptions, OpenAI.ToolChoiceParam; got {_type_label(value)}")
+ return
+
+def _validate_CreateResponse_tools(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ToolsArray(value, path, errors)
+
+def _validate_CreateResponse_truncation(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ _allowed_values = ('auto', 'disabled')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_user(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_context_management_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ContextManagementParam(value, path, errors)
+
+def _validate_CreateResponse_include_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_IncludeEnum(value, path, errors)
+
+def _validate_OpenAI_InputParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'array'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_array(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: string, array; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_Prompt(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'id' in value:
+ _validate_OpenAI_Prompt_id(value['id'], f"{path}.id", errors)
+ if 'variables' in value:
+ _validate_OpenAI_Prompt_variables(value['variables'], f"{path}.variables", errors)
+ if 'version' in value:
+ _validate_CreateResponse_instructions(value['version'], f"{path}.version", errors)
+
+def _validate_OpenAI_ServiceTier(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ _allowed_values, _enum_error = _enum_values('ServiceTier')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CreateResponse_structured_inputs_additional_property(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ResponseTextParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'format' in value:
+ _validate_OpenAI_ResponseTextParam_format(value['format'], f"{path}.format", errors)
+ if 'verbosity' in value:
+ _validate_OpenAI_ResponseTextParam_verbosity(value['verbosity'], f"{path}.verbosity", errors)
+
+def _validate_OpenAI_ToolChoiceOptions(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('ToolChoiceOptions')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceParam_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'allowed_tools':
+ _validate_OpenAI_ToolChoiceAllowed(value, path, errors)
+ if _disc_value == 'apply_patch':
+ _validate_OpenAI_SpecificApplyPatchParam(value, path, errors)
+ if _disc_value == 'code_interpreter':
+ _validate_OpenAI_ToolChoiceCodeInterpreter(value, path, errors)
+ if _disc_value == 'computer_use_preview':
+ _validate_OpenAI_ToolChoiceComputerUsePreview(value, path, errors)
+ if _disc_value == 'custom':
+ _validate_OpenAI_ToolChoiceCustom(value, path, errors)
+ if _disc_value == 'file_search':
+ _validate_OpenAI_ToolChoiceFileSearch(value, path, errors)
+ if _disc_value == 'function':
+ _validate_OpenAI_ToolChoiceFunction(value, path, errors)
+ if _disc_value == 'image_generation':
+ _validate_OpenAI_ToolChoiceImageGeneration(value, path, errors)
+ if _disc_value == 'mcp':
+ _validate_OpenAI_ToolChoiceMCP(value, path, errors)
+ if _disc_value == 'shell':
+ _validate_OpenAI_SpecificFunctionShellParam(value, path, errors)
+ if _disc_value == 'web_search_preview':
+ _validate_OpenAI_ToolChoiceWebSearchPreview(value, path, errors)
+ if _disc_value == 'web_search_preview_2025_03_11':
+ _validate_OpenAI_ToolChoiceWebSearchPreview20250311(value, path, errors)
+
+def _validate_OpenAI_ToolsArray(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ToolsArray_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ContextManagementParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'compact_threshold' in value:
+ _validate_CreateResponse_max_output_tokens(value['compact_threshold'], f"{path}.compact_threshold", errors)
+ if 'type' in value:
+ _validate_OpenAI_ContextManagementParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_IncludeEnum(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_IncludeEnum_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected IncludeEnum to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_InputParam_string(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_InputParam_array(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_array_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_Prompt_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_Prompt_variables(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_ResponseTextParam_format(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_TextResponseFormatConfiguration(value, path, errors)
+
+def _validate_OpenAI_ResponseTextParam_verbosity(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_Verbosity(value, path, errors)
+
+def _validate_OpenAI_ToolChoiceParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ToolChoiceParamType(value, path, errors)
+
+def _validate_OpenAI_ToolChoiceAllowed(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'mode' not in value:
+ _append_error(errors, f"{path}.mode", "Required property 'mode' is missing")
+ if 'tools' not in value:
+ _append_error(errors, f"{path}.tools", "Required property 'tools' is missing")
+ if 'mode' in value:
+ _validate_OpenAI_ToolChoiceAllowed_mode(value['mode'], f"{path}.mode", errors)
+ if 'tools' in value:
+ _validate_OpenAI_ToolChoiceAllowed_tools(value['tools'], f"{path}.tools", errors)
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceAllowed_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_SpecificApplyPatchParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_SpecificApplyPatchParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceCodeInterpreter(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceCodeInterpreter_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceComputerUsePreview(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceComputerUsePreview_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceCustom(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'name' in value:
+ _validate_OpenAI_ToolChoiceCustom_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceCustom_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceFileSearch(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceFileSearch_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceFunction(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'name' in value:
+ _validate_OpenAI_ToolChoiceFunction_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceFunction_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceImageGeneration(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceImageGeneration_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceMCP(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'server_label' not in value:
+ _append_error(errors, f"{path}.server_label", "Required property 'server_label' is missing")
+ if 'name' in value:
+ _validate_CreateResponse_instructions(value['name'], f"{path}.name", errors)
+ if 'server_label' in value:
+ _validate_OpenAI_ToolChoiceMCP_server_label(value['server_label'], f"{path}.server_label", errors)
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceMCP_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_SpecificFunctionShellParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_SpecificFunctionShellParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceWebSearchPreview(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceWebSearchPreview_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceWebSearchPreview20250311(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ToolChoiceWebSearchPreview20250311_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolsArray_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_Tool(value, path, errors)
+
+def _validate_OpenAI_ContextManagementParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_IncludeEnum_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('IncludeEnum')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_InputParam_array_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_Item(value, path, errors)
+
+def _validate_OpenAI_TextResponseFormatConfiguration(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_TextResponseFormatConfiguration_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'json_object':
+ _validate_OpenAI_TextResponseFormatConfigurationResponseFormatJsonObject(value, path, errors)
+ if _disc_value == 'json_schema':
+ _validate_OpenAI_TextResponseFormatJsonSchema(value, path, errors)
+ if _disc_value == 'text':
+ _validate_OpenAI_TextResponseFormatConfigurationResponseFormatText(value, path, errors)
+
+def _validate_OpenAI_Verbosity(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ _allowed_values, _enum_error = _enum_values('Verbosity')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceParamType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ToolChoiceParamType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected ToolChoiceParamType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ToolChoiceAllowed_mode(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('auto', 'required')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceAllowed_tools(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ToolChoiceAllowed_tools_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ToolChoiceAllowed_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('allowed_tools',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_SpecificApplyPatchParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('apply_patch',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceCodeInterpreter_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('code_interpreter',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceComputerUsePreview_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('computer_use_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceCustom_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceCustom_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('custom',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceFileSearch_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('file_search',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceFunction_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceFunction_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('function',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceImageGeneration_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('image_generation',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceMCP_server_label(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceMCP_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('mcp',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_SpecificFunctionShellParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('shell',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceWebSearchPreview_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('web_search_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceWebSearchPreview20250311_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('web_search_preview_2025_03_11',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_Tool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_Tool_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'a2a_preview':
+ _validate_A2APreviewTool(value, path, errors)
+ if _disc_value == 'apply_patch':
+ _validate_OpenAI_ApplyPatchToolParam(value, path, errors)
+ if _disc_value == 'azure_ai_search':
+ _validate_AzureAISearchTool(value, path, errors)
+ if _disc_value == 'azure_function':
+ _validate_AzureFunctionTool(value, path, errors)
+ if _disc_value == 'bing_custom_search_preview':
+ _validate_BingCustomSearchPreviewTool(value, path, errors)
+ if _disc_value == 'bing_grounding':
+ _validate_BingGroundingTool(value, path, errors)
+ if _disc_value == 'browser_automation_preview':
+ _validate_BrowserAutomationPreviewTool(value, path, errors)
+ if _disc_value == 'capture_structured_outputs':
+ _validate_CaptureStructuredOutputsTool(value, path, errors)
+ if _disc_value == 'code_interpreter':
+ _validate_OpenAI_CodeInterpreterTool(value, path, errors)
+ if _disc_value == 'computer_use_preview':
+ _validate_OpenAI_ComputerUsePreviewTool(value, path, errors)
+ if _disc_value == 'custom':
+ _validate_OpenAI_CustomToolParam(value, path, errors)
+ if _disc_value == 'fabric_dataagent_preview':
+ _validate_MicrosoftFabricPreviewTool(value, path, errors)
+ if _disc_value == 'file_search':
+ _validate_OpenAI_FileSearchTool(value, path, errors)
+ if _disc_value == 'function':
+ _validate_OpenAI_FunctionTool(value, path, errors)
+ if _disc_value == 'image_generation':
+ _validate_OpenAI_ImageGenTool(value, path, errors)
+ if _disc_value == 'local_shell':
+ _validate_OpenAI_LocalShellToolParam(value, path, errors)
+ if _disc_value == 'mcp':
+ _validate_OpenAI_MCPTool(value, path, errors)
+ if _disc_value == 'memory_search':
+ _validate_MemorySearchTool(value, path, errors)
+ if _disc_value == 'memory_search_preview':
+ _validate_MemorySearchPreviewTool(value, path, errors)
+ if _disc_value == 'openapi':
+ _validate_OpenApiTool(value, path, errors)
+ if _disc_value == 'sharepoint_grounding_preview':
+ _validate_SharepointPreviewTool(value, path, errors)
+ if _disc_value == 'shell':
+ _validate_OpenAI_FunctionShellToolParam(value, path, errors)
+ if _disc_value == 'web_search':
+ _validate_OpenAI_WebSearchTool(value, path, errors)
+ if _disc_value == 'web_search_preview':
+ _validate_OpenAI_WebSearchPreviewTool(value, path, errors)
+ if _disc_value == 'work_iq_preview':
+ _validate_WorkIQPreviewTool(value, path, errors)
+
+def _validate_OpenAI_Item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' in value:
+ _validate_OpenAI_Item_type(value['type'], f"{path}.type", errors)
+ if 'type' in value:
+ _validate_OpenAI_Item_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type', 'message')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'apply_patch_call':
+ _validate_OpenAI_ApplyPatchToolCallItemParam(value, path, errors)
+ if _disc_value == 'apply_patch_call_output':
+ _validate_OpenAI_ApplyPatchToolCallOutputItemParam(value, path, errors)
+ if _disc_value == 'code_interpreter_call':
+ _validate_OpenAI_ItemCodeInterpreterToolCall(value, path, errors)
+ if _disc_value == 'compaction':
+ _validate_OpenAI_CompactionSummaryItemParam(value, path, errors)
+ if _disc_value == 'computer_call':
+ _validate_OpenAI_ItemComputerToolCall(value, path, errors)
+ if _disc_value == 'computer_call_output':
+ _validate_OpenAI_ComputerCallOutputItemParam(value, path, errors)
+ if _disc_value == 'custom_tool_call':
+ _validate_OpenAI_ItemCustomToolCall(value, path, errors)
+ if _disc_value == 'custom_tool_call_output':
+ _validate_OpenAI_ItemCustomToolCallOutput(value, path, errors)
+ if _disc_value == 'file_search_call':
+ _validate_OpenAI_ItemFileSearchToolCall(value, path, errors)
+ if _disc_value == 'function_call':
+ _validate_OpenAI_ItemFunctionToolCall(value, path, errors)
+ if _disc_value == 'function_call_output':
+ _validate_OpenAI_FunctionCallOutputItemParam(value, path, errors)
+ if _disc_value == 'image_generation_call':
+ _validate_OpenAI_ItemImageGenToolCall(value, path, errors)
+ if _disc_value == 'item_reference':
+ _validate_OpenAI_ItemReferenceParam(value, path, errors)
+ if _disc_value == 'local_shell_call':
+ _validate_OpenAI_ItemLocalShellToolCall(value, path, errors)
+ if _disc_value == 'local_shell_call_output':
+ _validate_OpenAI_ItemLocalShellToolCallOutput(value, path, errors)
+ if _disc_value == 'mcp_approval_request':
+ _validate_OpenAI_ItemMcpApprovalRequest(value, path, errors)
+ if _disc_value == 'mcp_approval_response':
+ _validate_OpenAI_MCPApprovalResponse(value, path, errors)
+ if _disc_value == 'mcp_call':
+ _validate_OpenAI_ItemMcpToolCall(value, path, errors)
+ if _disc_value == 'mcp_list_tools':
+ _validate_OpenAI_ItemMcpListTools(value, path, errors)
+ if _disc_value == 'memory_search_call':
+ _validate_MemorySearchToolCallItemParam(value, path, errors)
+ if _disc_value == 'message':
+ _validate_OpenAI_ItemMessage(value, path, errors)
+ if _disc_value == 'output_message':
+ _validate_OpenAI_ItemOutputMessage(value, path, errors)
+ if _disc_value == 'reasoning':
+ _validate_OpenAI_ItemReasoningItem(value, path, errors)
+ if _disc_value == 'shell_call':
+ _validate_OpenAI_FunctionShellCallItemParam(value, path, errors)
+ if _disc_value == 'shell_call_output':
+ _validate_OpenAI_FunctionShellCallOutputItemParam(value, path, errors)
+ if _disc_value == 'web_search_call':
+ _validate_OpenAI_ItemWebSearchToolCall(value, path, errors)
+
+def _validate_OpenAI_TextResponseFormatConfiguration_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_TextResponseFormatConfigurationType(value, path, errors)
+
+def _validate_OpenAI_TextResponseFormatConfigurationResponseFormatJsonObject(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_TextResponseFormatConfigurationResponseFormatJsonObject_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_TextResponseFormatJsonSchema(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'schema' not in value:
+ _append_error(errors, f"{path}.schema", "Required property 'schema' is missing")
+ if 'description' in value:
+ _validate_OpenAI_TextResponseFormatJsonSchema_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_OpenAI_TextResponseFormatJsonSchema_name(value['name'], f"{path}.name", errors)
+ if 'schema' in value:
+ _validate_OpenAI_TextResponseFormatJsonSchema_schema(value['schema'], f"{path}.schema", errors)
+ if 'strict' in value:
+ _validate_CreateResponse_background(value['strict'], f"{path}.strict", errors)
+ if 'type' in value:
+ _validate_OpenAI_TextResponseFormatJsonSchema_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_TextResponseFormatConfigurationResponseFormatText(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_TextResponseFormatConfigurationResponseFormatText_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ToolChoiceParamType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('ToolChoiceParamType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolChoiceAllowed_tools_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ for _key, _item in value.items():
+ if _key not in ():
+ _validate_CreateResponse_structured_inputs_additional_property(_item, f"{path}.{_key}", errors)
+
+def _validate_OpenAI_Tool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ToolType(value, path, errors)
+
+def _validate_A2APreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'agent_card_path' in value:
+ _validate_A2APreviewTool_agent_card_path(value['agent_card_path'], f"{path}.agent_card_path", errors)
+ if 'base_url' in value:
+ _validate_A2APreviewTool_base_url(value['base_url'], f"{path}.base_url", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'project_connection_id' in value:
+ _validate_A2APreviewTool_project_connection_id(value['project_connection_id'], f"{path}.project_connection_id", errors)
+ if 'type' in value:
+ _validate_A2APreviewTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ApplyPatchToolParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ApplyPatchToolParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_AzureAISearchTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'azure_ai_search' not in value:
+ _append_error(errors, f"{path}.azure_ai_search", "Required property 'azure_ai_search' is missing")
+ if 'azure_ai_search' in value:
+ _validate_AzureAISearchTool_azure_ai_search(value['azure_ai_search'], f"{path}.azure_ai_search", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_AzureAISearchTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_AzureFunctionTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'azure_function' not in value:
+ _append_error(errors, f"{path}.azure_function", "Required property 'azure_function' is missing")
+ if 'azure_function' in value:
+ _validate_AzureFunctionTool_azure_function(value['azure_function'], f"{path}.azure_function", errors)
+ if 'type' in value:
+ _validate_AzureFunctionTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_BingCustomSearchPreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'bing_custom_search_preview' not in value:
+ _append_error(errors, f"{path}.bing_custom_search_preview", "Required property 'bing_custom_search_preview' is missing")
+ if 'bing_custom_search_preview' in value:
+ _validate_BingCustomSearchPreviewTool_bing_custom_search_preview(value['bing_custom_search_preview'], f"{path}.bing_custom_search_preview", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_BingCustomSearchPreviewTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_BingGroundingTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'bing_grounding' not in value:
+ _append_error(errors, f"{path}.bing_grounding", "Required property 'bing_grounding' is missing")
+ if 'bing_grounding' in value:
+ _validate_BingGroundingTool_bing_grounding(value['bing_grounding'], f"{path}.bing_grounding", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_BingGroundingTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_BrowserAutomationPreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'browser_automation_preview' not in value:
+ _append_error(errors, f"{path}.browser_automation_preview", "Required property 'browser_automation_preview' is missing")
+ if 'browser_automation_preview' in value:
+ _validate_BrowserAutomationPreviewTool_browser_automation_preview(value['browser_automation_preview'], f"{path}.browser_automation_preview", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_BrowserAutomationPreviewTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_CaptureStructuredOutputsTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'outputs' not in value:
+ _append_error(errors, f"{path}.outputs", "Required property 'outputs' is missing")
+ if 'outputs' in value:
+ _validate_CaptureStructuredOutputsTool_outputs(value['outputs'], f"{path}.outputs", errors)
+ if 'type' in value:
+ _validate_CaptureStructuredOutputsTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_CodeInterpreterTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'container' in value:
+ _validate_OpenAI_CodeInterpreterTool_container(value['container'], f"{path}.container", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_OpenAI_CodeInterpreterTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ComputerUsePreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'environment' not in value:
+ _append_error(errors, f"{path}.environment", "Required property 'environment' is missing")
+ if 'display_width' not in value:
+ _append_error(errors, f"{path}.display_width", "Required property 'display_width' is missing")
+ if 'display_height' not in value:
+ _append_error(errors, f"{path}.display_height", "Required property 'display_height' is missing")
+ if 'display_height' in value:
+ _validate_OpenAI_ComputerUsePreviewTool_display_height(value['display_height'], f"{path}.display_height", errors)
+ if 'display_width' in value:
+ _validate_OpenAI_ComputerUsePreviewTool_display_width(value['display_width'], f"{path}.display_width", errors)
+ if 'environment' in value:
+ _validate_OpenAI_ComputerUsePreviewTool_environment(value['environment'], f"{path}.environment", errors)
+ if 'type' in value:
+ _validate_OpenAI_ComputerUsePreviewTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_CustomToolParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'description' in value:
+ _validate_OpenAI_CustomToolParam_description(value['description'], f"{path}.description", errors)
+ if 'format' in value:
+ _validate_OpenAI_CustomToolParam_format(value['format'], f"{path}.format", errors)
+ if 'name' in value:
+ _validate_OpenAI_CustomToolParam_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_OpenAI_CustomToolParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_MicrosoftFabricPreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'fabric_dataagent_preview' not in value:
+ _append_error(errors, f"{path}.fabric_dataagent_preview", "Required property 'fabric_dataagent_preview' is missing")
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'fabric_dataagent_preview' in value:
+ _validate_MicrosoftFabricPreviewTool_fabric_dataagent_preview(value['fabric_dataagent_preview'], f"{path}.fabric_dataagent_preview", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_MicrosoftFabricPreviewTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FileSearchTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'vector_store_ids' not in value:
+ _append_error(errors, f"{path}.vector_store_ids", "Required property 'vector_store_ids' is missing")
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'filters' in value:
+ _validate_OpenAI_FileSearchTool_filters(value['filters'], f"{path}.filters", errors)
+ if 'max_num_results' in value:
+ _validate_OpenAI_FileSearchTool_max_num_results(value['max_num_results'], f"{path}.max_num_results", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'ranking_options' in value:
+ _validate_OpenAI_FileSearchTool_ranking_options(value['ranking_options'], f"{path}.ranking_options", errors)
+ if 'type' in value:
+ _validate_OpenAI_FileSearchTool_type(value['type'], f"{path}.type", errors)
+ if 'vector_store_ids' in value:
+ _validate_OpenAI_FileSearchTool_vector_store_ids(value['vector_store_ids'], f"{path}.vector_store_ids", errors)
+
+def _validate_OpenAI_FunctionTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'description' in value:
+ _validate_CreateResponse_instructions(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_OpenAI_ToolChoiceFunction_name(value['name'], f"{path}.name", errors)
+ if 'parameters' in value:
+ _validate_OpenAI_FunctionTool_parameters(value['parameters'], f"{path}.parameters", errors)
+ if 'strict' in value:
+ _validate_CreateResponse_background(value['strict'], f"{path}.strict", errors)
+ if 'type' in value:
+ _validate_OpenAI_FunctionTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ImageGenTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'action' in value:
+ _validate_OpenAI_ImageGenTool_action(value['action'], f"{path}.action", errors)
+ if 'background' in value:
+ _validate_OpenAI_ImageGenTool_background(value['background'], f"{path}.background", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'input_fidelity' in value:
+ _validate_OpenAI_ImageGenTool_input_fidelity(value['input_fidelity'], f"{path}.input_fidelity", errors)
+ if 'input_image_mask' in value:
+ _validate_OpenAI_ImageGenTool_input_image_mask(value['input_image_mask'], f"{path}.input_image_mask", errors)
+ if 'model' in value:
+ _validate_OpenAI_ImageGenTool_model(value['model'], f"{path}.model", errors)
+ if 'moderation' in value:
+ _validate_OpenAI_ImageGenTool_moderation(value['moderation'], f"{path}.moderation", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'output_compression' in value:
+ _validate_OpenAI_ImageGenTool_output_compression(value['output_compression'], f"{path}.output_compression", errors)
+ if 'output_format' in value:
+ _validate_OpenAI_ImageGenTool_output_format(value['output_format'], f"{path}.output_format", errors)
+ if 'partial_images' in value:
+ _validate_OpenAI_ImageGenTool_partial_images(value['partial_images'], f"{path}.partial_images", errors)
+ if 'quality' in value:
+ _validate_OpenAI_ImageGenTool_quality(value['quality'], f"{path}.quality", errors)
+ if 'size' in value:
+ _validate_OpenAI_ImageGenTool_size(value['size'], f"{path}.size", errors)
+ if 'type' in value:
+ _validate_OpenAI_ImageGenTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_LocalShellToolParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_OpenAI_LocalShellToolParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MCPTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'server_label' not in value:
+ _append_error(errors, f"{path}.server_label", "Required property 'server_label' is missing")
+ if 'allowed_tools' in value:
+ _validate_OpenAI_MCPTool_allowed_tools(value['allowed_tools'], f"{path}.allowed_tools", errors)
+ if 'authorization' in value:
+ _validate_OpenAI_MCPTool_authorization(value['authorization'], f"{path}.authorization", errors)
+ if 'connector_id' in value:
+ _validate_OpenAI_MCPTool_connector_id(value['connector_id'], f"{path}.connector_id", errors)
+ if 'headers' in value:
+ _validate_OpenAI_MCPTool_headers(value['headers'], f"{path}.headers", errors)
+ if 'project_connection_id' in value:
+ _validate_OpenAI_MCPTool_project_connection_id(value['project_connection_id'], f"{path}.project_connection_id", errors)
+ if 'require_approval' in value:
+ _validate_OpenAI_MCPTool_require_approval(value['require_approval'], f"{path}.require_approval", errors)
+ if 'server_description' in value:
+ _validate_OpenAI_MCPTool_server_description(value['server_description'], f"{path}.server_description", errors)
+ if 'server_label' in value:
+ _validate_OpenAI_MCPTool_server_label(value['server_label'], f"{path}.server_label", errors)
+ if 'server_url' in value:
+ _validate_OpenAI_MCPTool_server_url(value['server_url'], f"{path}.server_url", errors)
+ if 'type' in value:
+ _validate_OpenAI_MCPTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_MemorySearchTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'memory_store_name' not in value:
+ _append_error(errors, f"{path}.memory_store_name", "Required property 'memory_store_name' is missing")
+ if 'scope' not in value:
+ _append_error(errors, f"{path}.scope", "Required property 'scope' is missing")
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'memory_store_name' in value:
+ _validate_MemorySearchTool_memory_store_name(value['memory_store_name'], f"{path}.memory_store_name", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'scope' in value:
+ _validate_MemorySearchTool_scope(value['scope'], f"{path}.scope", errors)
+ if 'search_options' in value:
+ _validate_MemorySearchTool_search_options(value['search_options'], f"{path}.search_options", errors)
+ if 'type' in value:
+ _validate_MemorySearchTool_type(value['type'], f"{path}.type", errors)
+ if 'update_delay' in value:
+ _validate_MemorySearchTool_update_delay(value['update_delay'], f"{path}.update_delay", errors)
+
+def _validate_MemorySearchPreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'memory_store_name' not in value:
+ _append_error(errors, f"{path}.memory_store_name", "Required property 'memory_store_name' is missing")
+ if 'scope' not in value:
+ _append_error(errors, f"{path}.scope", "Required property 'scope' is missing")
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'memory_store_name' in value:
+ _validate_MemorySearchTool_memory_store_name(value['memory_store_name'], f"{path}.memory_store_name", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'scope' in value:
+ _validate_MemorySearchTool_scope(value['scope'], f"{path}.scope", errors)
+ if 'search_options' in value:
+ _validate_MemorySearchTool_search_options(value['search_options'], f"{path}.search_options", errors)
+ if 'type' in value:
+ _validate_MemorySearchPreviewTool_type(value['type'], f"{path}.type", errors)
+ if 'update_delay' in value:
+ _validate_MemorySearchTool_update_delay(value['update_delay'], f"{path}.update_delay", errors)
+
+def _validate_OpenApiTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'openapi' not in value:
+ _append_error(errors, f"{path}.openapi", "Required property 'openapi' is missing")
+ if 'openapi' in value:
+ _validate_OpenApiTool_openapi(value['openapi'], f"{path}.openapi", errors)
+ if 'type' in value:
+ _validate_OpenApiTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_SharepointPreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'sharepoint_grounding_preview' not in value:
+ _append_error(errors, f"{path}.sharepoint_grounding_preview", "Required property 'sharepoint_grounding_preview' is missing")
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'sharepoint_grounding_preview' in value:
+ _validate_SharepointPreviewTool_sharepoint_grounding_preview(value['sharepoint_grounding_preview'], f"{path}.sharepoint_grounding_preview", errors)
+ if 'type' in value:
+ _validate_SharepointPreviewTool_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FunctionShellToolParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'environment' in value:
+ _validate_OpenAI_FunctionShellToolParam_environment(value['environment'], f"{path}.environment", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_OpenAI_FunctionShellToolParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_WebSearchTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'custom_search_configuration' in value:
+ _validate_OpenAI_WebSearchTool_custom_search_configuration(value['custom_search_configuration'], f"{path}.custom_search_configuration", errors)
+ if 'description' in value:
+ _validate_A2APreviewTool_description(value['description'], f"{path}.description", errors)
+ if 'filters' in value:
+ _validate_OpenAI_WebSearchTool_filters(value['filters'], f"{path}.filters", errors)
+ if 'name' in value:
+ _validate_A2APreviewTool_name(value['name'], f"{path}.name", errors)
+ if 'search_context_size' in value:
+ _validate_OpenAI_WebSearchTool_search_context_size(value['search_context_size'], f"{path}.search_context_size", errors)
+ if 'type' in value:
+ _validate_OpenAI_WebSearchTool_type(value['type'], f"{path}.type", errors)
+ if 'user_location' in value:
+ _validate_OpenAI_WebSearchTool_user_location(value['user_location'], f"{path}.user_location", errors)
+
+def _validate_OpenAI_WebSearchPreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'search_context_size' in value:
+ _validate_OpenAI_WebSearchPreviewTool_search_context_size(value['search_context_size'], f"{path}.search_context_size", errors)
+ if 'type' in value:
+ _validate_OpenAI_WebSearchPreviewTool_type(value['type'], f"{path}.type", errors)
+ if 'user_location' in value:
+ _validate_OpenAI_WebSearchPreviewTool_user_location(value['user_location'], f"{path}.user_location", errors)
+
+def _validate_WorkIQPreviewTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'work_iq_preview' not in value:
+ _append_error(errors, f"{path}.work_iq_preview", "Required property 'work_iq_preview' is missing")
+ if 'type' in value:
+ _validate_WorkIQPreviewTool_type(value['type'], f"{path}.type", errors)
+ if 'work_iq_preview' in value:
+ _validate_WorkIQPreviewTool_work_iq_preview(value['work_iq_preview'], f"{path}.work_iq_preview", errors)
+
+def _validate_OpenAI_Item_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ItemType(value, path, errors)
+
+def _validate_OpenAI_ApplyPatchToolCallItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'operation' not in value:
+ _append_error(errors, f"{path}.operation", "Required property 'operation' is missing")
+ if 'call_id' in value:
+ _validate_OpenAI_ApplyPatchToolCallItemParam_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'operation' in value:
+ _validate_OpenAI_ApplyPatchToolCallItemParam_operation(value['operation'], f"{path}.operation", errors)
+ if 'status' in value:
+ _validate_OpenAI_ApplyPatchToolCallItemParam_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ApplyPatchToolCallItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ApplyPatchToolCallOutputItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'call_id' in value:
+ _validate_OpenAI_ApplyPatchToolCallItemParam_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'output' in value:
+ _validate_CreateResponse_instructions(value['output'], f"{path}.output", errors)
+ if 'status' in value:
+ _validate_OpenAI_ApplyPatchToolCallOutputItemParam_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ApplyPatchToolCallOutputItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemCodeInterpreterToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'container_id' not in value:
+ _append_error(errors, f"{path}.container_id", "Required property 'container_id' is missing")
+ if 'code' not in value:
+ _append_error(errors, f"{path}.code", "Required property 'code' is missing")
+ if 'outputs' not in value:
+ _append_error(errors, f"{path}.outputs", "Required property 'outputs' is missing")
+ if 'code' in value:
+ _validate_CreateResponse_instructions(value['code'], f"{path}.code", errors)
+ if 'container_id' in value:
+ _validate_OpenAI_ItemCodeInterpreterToolCall_container_id(value['container_id'], f"{path}.container_id", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemCodeInterpreterToolCall_id(value['id'], f"{path}.id", errors)
+ if 'outputs' in value:
+ _validate_OpenAI_ItemCodeInterpreterToolCall_outputs(value['outputs'], f"{path}.outputs", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemCodeInterpreterToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemCodeInterpreterToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_CompactionSummaryItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'encrypted_content' not in value:
+ _append_error(errors, f"{path}.encrypted_content", "Required property 'encrypted_content' is missing")
+ if 'encrypted_content' in value:
+ _validate_OpenAI_CompactionSummaryItemParam_encrypted_content(value['encrypted_content'], f"{path}.encrypted_content", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'type' in value:
+ _validate_OpenAI_CompactionSummaryItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemComputerToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'action' not in value:
+ _append_error(errors, f"{path}.action", "Required property 'action' is missing")
+ if 'pending_safety_checks' not in value:
+ _append_error(errors, f"{path}.pending_safety_checks", "Required property 'pending_safety_checks' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'action' in value:
+ _validate_OpenAI_ItemComputerToolCall_action(value['action'], f"{path}.action", errors)
+ if 'call_id' in value:
+ _validate_OpenAI_ItemComputerToolCall_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemComputerToolCall_id(value['id'], f"{path}.id", errors)
+ if 'pending_safety_checks' in value:
+ _validate_OpenAI_ItemComputerToolCall_pending_safety_checks(value['pending_safety_checks'], f"{path}.pending_safety_checks", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemComputerToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemComputerToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ComputerCallOutputItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'output' not in value:
+ _append_error(errors, f"{path}.output", "Required property 'output' is missing")
+ if 'acknowledged_safety_checks' in value:
+ _validate_OpenAI_ComputerCallOutputItemParam_acknowledged_safety_checks(value['acknowledged_safety_checks'], f"{path}.acknowledged_safety_checks", errors)
+ if 'call_id' in value:
+ _validate_OpenAI_ComputerCallOutputItemParam_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'output' in value:
+ _validate_OpenAI_ComputerCallOutputItemParam_output(value['output'], f"{path}.output", errors)
+ if 'status' in value:
+ _validate_OpenAI_ComputerCallOutputItemParam_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ComputerCallOutputItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemCustomToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'input' not in value:
+ _append_error(errors, f"{path}.input", "Required property 'input' is missing")
+ if 'call_id' in value:
+ _validate_OpenAI_ItemCustomToolCall_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemCustomToolCall_id(value['id'], f"{path}.id", errors)
+ if 'input' in value:
+ _validate_OpenAI_ItemCustomToolCall_input(value['input'], f"{path}.input", errors)
+ if 'name' in value:
+ _validate_OpenAI_ItemCustomToolCall_name(value['name'], f"{path}.name", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemCustomToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemCustomToolCallOutput(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'output' not in value:
+ _append_error(errors, f"{path}.output", "Required property 'output' is missing")
+ if 'call_id' in value:
+ _validate_OpenAI_ItemCustomToolCallOutput_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemCustomToolCallOutput_id(value['id'], f"{path}.id", errors)
+ if 'output' in value:
+ _validate_OpenAI_ItemCustomToolCallOutput_output(value['output'], f"{path}.output", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemCustomToolCallOutput_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemFileSearchToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'queries' not in value:
+ _append_error(errors, f"{path}.queries", "Required property 'queries' is missing")
+ if 'id' in value:
+ _validate_OpenAI_ItemFileSearchToolCall_id(value['id'], f"{path}.id", errors)
+ if 'queries' in value:
+ _validate_OpenAI_ItemFileSearchToolCall_queries(value['queries'], f"{path}.queries", errors)
+ if 'results' in value:
+ _validate_OpenAI_ItemFileSearchToolCall_results(value['results'], f"{path}.results", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemFileSearchToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemFileSearchToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemFunctionToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'arguments' not in value:
+ _append_error(errors, f"{path}.arguments", "Required property 'arguments' is missing")
+ if 'arguments' in value:
+ _validate_OpenAI_ItemFunctionToolCall_arguments(value['arguments'], f"{path}.arguments", errors)
+ if 'call_id' in value:
+ _validate_OpenAI_ItemFunctionToolCall_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemFunctionToolCall_id(value['id'], f"{path}.id", errors)
+ if 'name' in value:
+ _validate_OpenAI_ItemFunctionToolCall_name(value['name'], f"{path}.name", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemComputerToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemFunctionToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FunctionCallOutputItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'output' not in value:
+ _append_error(errors, f"{path}.output", "Required property 'output' is missing")
+ if 'call_id' in value:
+ _validate_OpenAI_FunctionCallOutputItemParam_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'output' in value:
+ _validate_OpenAI_FunctionCallOutputItemParam_output(value['output'], f"{path}.output", errors)
+ if 'status' in value:
+ _validate_OpenAI_ComputerCallOutputItemParam_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_FunctionCallOutputItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemImageGenToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'result' not in value:
+ _append_error(errors, f"{path}.result", "Required property 'result' is missing")
+ if 'id' in value:
+ _validate_OpenAI_ItemImageGenToolCall_id(value['id'], f"{path}.id", errors)
+ if 'result' in value:
+ _validate_CreateResponse_instructions(value['result'], f"{path}.result", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemImageGenToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemImageGenToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemReferenceParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'id' in value:
+ _validate_OpenAI_ItemReferenceParam_id(value['id'], f"{path}.id", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemReferenceParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemLocalShellToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'action' not in value:
+ _append_error(errors, f"{path}.action", "Required property 'action' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'action' in value:
+ _validate_OpenAI_ItemLocalShellToolCall_action(value['action'], f"{path}.action", errors)
+ if 'call_id' in value:
+ _validate_OpenAI_ItemLocalShellToolCall_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemLocalShellToolCall_id(value['id'], f"{path}.id", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemLocalShellToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemLocalShellToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemLocalShellToolCallOutput(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'output' not in value:
+ _append_error(errors, f"{path}.output", "Required property 'output' is missing")
+ if 'id' in value:
+ _validate_OpenAI_ItemLocalShellToolCall_call_id(value['id'], f"{path}.id", errors)
+ if 'output' in value:
+ _validate_OpenAI_ItemLocalShellToolCallOutput_output(value['output'], f"{path}.output", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemLocalShellToolCallOutput_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemLocalShellToolCallOutput_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemMcpApprovalRequest(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'server_label' not in value:
+ _append_error(errors, f"{path}.server_label", "Required property 'server_label' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'arguments' not in value:
+ _append_error(errors, f"{path}.arguments", "Required property 'arguments' is missing")
+ if 'arguments' in value:
+ _validate_OpenAI_ItemMcpApprovalRequest_arguments(value['arguments'], f"{path}.arguments", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemMcpApprovalRequest_id(value['id'], f"{path}.id", errors)
+ if 'name' in value:
+ _validate_OpenAI_ItemMcpApprovalRequest_name(value['name'], f"{path}.name", errors)
+ if 'server_label' in value:
+ _validate_OpenAI_ItemMcpApprovalRequest_server_label(value['server_label'], f"{path}.server_label", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemMcpApprovalRequest_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MCPApprovalResponse(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'approval_request_id' not in value:
+ _append_error(errors, f"{path}.approval_request_id", "Required property 'approval_request_id' is missing")
+ if 'approve' not in value:
+ _append_error(errors, f"{path}.approve", "Required property 'approve' is missing")
+ if 'approval_request_id' in value:
+ _validate_OpenAI_MCPApprovalResponse_approval_request_id(value['approval_request_id'], f"{path}.approval_request_id", errors)
+ if 'approve' in value:
+ _validate_OpenAI_MCPApprovalResponse_approve(value['approve'], f"{path}.approve", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'reason' in value:
+ _validate_CreateResponse_instructions(value['reason'], f"{path}.reason", errors)
+ if 'type' in value:
+ _validate_OpenAI_MCPApprovalResponse_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemMcpToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'server_label' not in value:
+ _append_error(errors, f"{path}.server_label", "Required property 'server_label' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'arguments' not in value:
+ _append_error(errors, f"{path}.arguments", "Required property 'arguments' is missing")
+ if 'approval_request_id' in value:
+ _validate_CreateResponse_instructions(value['approval_request_id'], f"{path}.approval_request_id", errors)
+ if 'arguments' in value:
+ _validate_OpenAI_ItemMcpToolCall_arguments(value['arguments'], f"{path}.arguments", errors)
+ if 'error' in value:
+ _validate_OpenAI_ToolChoiceAllowed_tools_item(value['error'], f"{path}.error", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemMcpToolCall_id(value['id'], f"{path}.id", errors)
+ if 'name' in value:
+ _validate_OpenAI_ItemMcpToolCall_name(value['name'], f"{path}.name", errors)
+ if 'output' in value:
+ _validate_CreateResponse_instructions(value['output'], f"{path}.output", errors)
+ if 'server_label' in value:
+ _validate_OpenAI_ItemMcpToolCall_server_label(value['server_label'], f"{path}.server_label", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemMcpToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemMcpToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemMcpListTools(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'server_label' not in value:
+ _append_error(errors, f"{path}.server_label", "Required property 'server_label' is missing")
+ if 'tools' not in value:
+ _append_error(errors, f"{path}.tools", "Required property 'tools' is missing")
+ if 'error' in value:
+ _validate_CreateResponse_instructions(value['error'], f"{path}.error", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemMcpListTools_id(value['id'], f"{path}.id", errors)
+ if 'server_label' in value:
+ _validate_OpenAI_ItemMcpListTools_server_label(value['server_label'], f"{path}.server_label", errors)
+ if 'tools' in value:
+ _validate_OpenAI_ItemMcpListTools_tools(value['tools'], f"{path}.tools", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemMcpListTools_type(value['type'], f"{path}.type", errors)
+
+def _validate_MemorySearchToolCallItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'results' in value:
+ _validate_MemorySearchToolCallItemParam_results(value['results'], f"{path}.results", errors)
+ if 'type' in value:
+ _validate_MemorySearchToolCallItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemMessage(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'role' not in value:
+ _append_error(errors, f"{path}.role", "Required property 'role' is missing")
+ if 'content' not in value:
+ _append_error(errors, f"{path}.content", "Required property 'content' is missing")
+ if 'content' in value:
+ _validate_OpenAI_ItemMessage_content(value['content'], f"{path}.content", errors)
+ if 'role' in value:
+ _validate_OpenAI_ItemMessage_role(value['role'], f"{path}.role", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemMessage_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemOutputMessage(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'role' not in value:
+ _append_error(errors, f"{path}.role", "Required property 'role' is missing")
+ if 'content' not in value:
+ _append_error(errors, f"{path}.content", "Required property 'content' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'content' in value:
+ _validate_OpenAI_ItemOutputMessage_content(value['content'], f"{path}.content", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemOutputMessage_id(value['id'], f"{path}.id", errors)
+ if 'role' in value:
+ _validate_OpenAI_ItemOutputMessage_role(value['role'], f"{path}.role", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemOutputMessage_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemOutputMessage_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemReasoningItem(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'summary' not in value:
+ _append_error(errors, f"{path}.summary", "Required property 'summary' is missing")
+ if 'content' in value:
+ _validate_OpenAI_ItemReasoningItem_content(value['content'], f"{path}.content", errors)
+ if 'encrypted_content' in value:
+ _validate_CreateResponse_instructions(value['encrypted_content'], f"{path}.encrypted_content", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemReasoningItem_id(value['id'], f"{path}.id", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemComputerToolCall_status(value['status'], f"{path}.status", errors)
+ if 'summary' in value:
+ _validate_OpenAI_ItemReasoningItem_summary(value['summary'], f"{path}.summary", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemReasoningItem_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FunctionShellCallItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'action' not in value:
+ _append_error(errors, f"{path}.action", "Required property 'action' is missing")
+ if 'action' in value:
+ _validate_OpenAI_FunctionShellCallItemParam_action(value['action'], f"{path}.action", errors)
+ if 'call_id' in value:
+ _validate_OpenAI_FunctionShellCallItemParam_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'environment' in value:
+ _validate_OpenAI_FunctionShellCallItemParam_environment(value['environment'], f"{path}.environment", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'status' in value:
+ _validate_OpenAI_FunctionShellCallItemParam_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_FunctionShellCallItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FunctionShellCallOutputItemParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'call_id' not in value:
+ _append_error(errors, f"{path}.call_id", "Required property 'call_id' is missing")
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'output' not in value:
+ _append_error(errors, f"{path}.output", "Required property 'output' is missing")
+ if 'call_id' in value:
+ _validate_OpenAI_FunctionShellCallItemParam_call_id(value['call_id'], f"{path}.call_id", errors)
+ if 'id' in value:
+ _validate_CreateResponse_instructions(value['id'], f"{path}.id", errors)
+ if 'max_output_length' in value:
+ _validate_CreateResponse_max_output_tokens(value['max_output_length'], f"{path}.max_output_length", errors)
+ if 'output' in value:
+ _validate_OpenAI_FunctionShellCallOutputItemParam_output(value['output'], f"{path}.output", errors)
+ if 'status' in value:
+ _validate_OpenAI_FunctionShellCallItemParam_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_FunctionShellCallOutputItemParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemWebSearchToolCall(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'status' not in value:
+ _append_error(errors, f"{path}.status", "Required property 'status' is missing")
+ if 'action' not in value:
+ _append_error(errors, f"{path}.action", "Required property 'action' is missing")
+ if 'action' in value:
+ _validate_OpenAI_ItemWebSearchToolCall_action(value['action'], f"{path}.action", errors)
+ if 'id' in value:
+ _validate_OpenAI_ItemWebSearchToolCall_id(value['id'], f"{path}.id", errors)
+ if 'status' in value:
+ _validate_OpenAI_ItemWebSearchToolCall_status(value['status'], f"{path}.status", errors)
+ if 'type' in value:
+ _validate_OpenAI_ItemWebSearchToolCall_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_TextResponseFormatConfigurationType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_TextResponseFormatConfigurationType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected TextResponseFormatConfigurationType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_TextResponseFormatConfigurationResponseFormatJsonObject_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('json_object',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_TextResponseFormatJsonSchema_description(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_TextResponseFormatJsonSchema_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_TextResponseFormatJsonSchema_schema(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ResponseFormatJsonSchemaSchema(value, path, errors)
+
+def _validate_OpenAI_TextResponseFormatJsonSchema_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('json_schema',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_TextResponseFormatConfigurationResponseFormatText_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('text',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ToolType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ToolType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected ToolType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_A2APreviewTool_agent_card_path(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_A2APreviewTool_base_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_A2APreviewTool_description(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_A2APreviewTool_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_A2APreviewTool_project_connection_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_A2APreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('a2a_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ApplyPatchToolParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('apply_patch',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_AzureAISearchTool_azure_ai_search(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_AzureAISearchTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('azure_ai_search',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_AzureFunctionTool_azure_function(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_AzureFunctionTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('azure_function',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_BingCustomSearchPreviewTool_bing_custom_search_preview(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_BingCustomSearchPreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('bing_custom_search_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_BingGroundingTool_bing_grounding(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_BingGroundingTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('bing_grounding',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_BrowserAutomationPreviewTool_browser_automation_preview(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_BrowserAutomationPreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('browser_automation_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_CaptureStructuredOutputsTool_outputs(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_CaptureStructuredOutputsTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('capture_structured_outputs',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CodeInterpreterTool_container(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_AutoCodeInterpreterToolParam(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: string, OpenAI.AutoCodeInterpreterToolParam; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_CodeInterpreterTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('code_interpreter',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ComputerUsePreviewTool_display_height(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ComputerUsePreviewTool_display_width(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ComputerUsePreviewTool_environment(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ComputerUsePreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('computer_use_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CustomToolParam_description(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CustomToolParam_format(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_CustomToolParam_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CustomToolParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('custom',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_MicrosoftFabricPreviewTool_fabric_dataagent_preview(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_MicrosoftFabricPreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('fabric_dataagent_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FileSearchTool_filters(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_OpenAI_FileSearchTool_max_num_results(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_FileSearchTool_ranking_options(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_FileSearchTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('file_search',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FileSearchTool_vector_store_ids(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_FunctionTool_parameters(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ for _key, _item in value.items():
+ if _key not in ():
+ _validate_CreateResponse_structured_inputs_additional_property(_item, f"{path}.{_key}", errors)
+
+def _validate_OpenAI_FunctionTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('function',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_action(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ImageGenTool_background(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('transparent', 'opaque', 'auto')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_input_fidelity(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_OpenAI_ImageGenTool_input_image_mask(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ImageGenTool_model(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ImageGenTool_model_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected ImageGenTool_model to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ImageGenTool_moderation(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('auto', 'low')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_output_compression(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_output_format(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('png', 'webp', 'jpeg')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_partial_images(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_quality(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('low', 'medium', 'high', 'auto')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_size(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('1024x1024', '1024x1536', '1536x1024', 'auto')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ImageGenTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('image_generation',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_LocalShellToolParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('local_shell',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_allowed_tools(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'array'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_MCPTool_allowed_tools_array(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_MCPTool_allowed_tools_object(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: array, object; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_MCPTool_authorization(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_connector_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('connector_dropbox', 'connector_gmail', 'connector_googlecalendar', 'connector_googledrive', 'connector_microsoftteams', 'connector_outlookcalendar', 'connector_outlookemail', 'connector_sharepoint')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_headers(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ for _key, _item in value.items():
+ if _key not in ():
+ _validate_OpenAI_InputParam_string(_item, f"{path}.{_key}", errors)
+
+def _validate_OpenAI_MCPTool_project_connection_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_require_approval(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_MCPTool_require_approval_object(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_MCPTool_require_approval_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: object, string; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_MCPTool_server_description(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_server_label(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_server_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('mcp',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_MemorySearchTool_memory_store_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_MemorySearchTool_scope(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_MemorySearchTool_search_options(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_MemorySearchTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('memory_search',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_MemorySearchTool_update_delay(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_MemorySearchPreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('memory_search_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenApiTool_openapi(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenApiTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('openapi',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_SharepointPreviewTool_sharepoint_grounding_preview(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_SharepointPreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('sharepoint_grounding_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionShellToolParam_environment(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_FunctionShellToolParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('shell',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchTool_custom_search_configuration(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_WebSearchTool_filters(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_WebSearchTool_search_context_size(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('low', 'medium', 'high')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('web_search',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchTool_user_location(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_WebSearchPreviewTool_search_context_size(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_WebSearchPreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('web_search_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchPreviewTool_user_location(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_WorkIQPreviewTool_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('work_iq_preview',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_WorkIQPreviewTool_work_iq_preview(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ItemType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ItemType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected ItemType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ApplyPatchToolCallItemParam_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ApplyPatchToolCallItemParam_operation(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ApplyPatchToolCallItemParam_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ApplyPatchToolCallItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('apply_patch_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ApplyPatchToolCallOutputItemParam_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ApplyPatchToolCallOutputItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('apply_patch_call_output',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCodeInterpreterToolCall_container_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCodeInterpreterToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCodeInterpreterToolCall_outputs(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemCodeInterpreterToolCall_outputs_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemCodeInterpreterToolCall_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('in_progress', 'completed', 'incomplete', 'interpreting', 'failed')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCodeInterpreterToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('code_interpreter_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CompactionSummaryItemParam_encrypted_content(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CompactionSummaryItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('compaction',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemComputerToolCall_action(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ComputerAction(value, path, errors)
+
+def _validate_OpenAI_ItemComputerToolCall_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemComputerToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemComputerToolCall_pending_safety_checks(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemComputerToolCall_pending_safety_checks_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemComputerToolCall_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('in_progress', 'completed', 'incomplete')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemComputerToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('computer_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ComputerCallOutputItemParam_acknowledged_safety_checks(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemComputerToolCall_pending_safety_checks_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ComputerCallOutputItemParam_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ComputerCallOutputItemParam_output(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ComputerScreenshotImage(value, path, errors)
+
+def _validate_OpenAI_ComputerCallOutputItemParam_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_OpenAI_ComputerCallOutputItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('computer_call_output',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCall_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCall_input(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCall_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('custom_tool_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCallOutput_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCallOutput_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCallOutput_output(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'array'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ItemCustomToolCallOutput_output_array(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: string, array; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ItemCustomToolCallOutput_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('custom_tool_call_output',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFileSearchToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFileSearchToolCall_queries(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemFileSearchToolCall_results(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemFileSearchToolCall_results_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemFileSearchToolCall_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('in_progress', 'searching', 'completed', 'incomplete', 'failed')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFileSearchToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('file_search_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFunctionToolCall_arguments(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFunctionToolCall_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFunctionToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFunctionToolCall_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemFunctionToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('function_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionCallOutputItemParam_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionCallOutputItemParam_output(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'array'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_FunctionCallOutputItemParam_output_array(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: string, array; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_FunctionCallOutputItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('function_call_output',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemImageGenToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemImageGenToolCall_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('in_progress', 'completed', 'generating', 'failed')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemImageGenToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('image_generation_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemReferenceParam_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemReferenceParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('item_reference',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemLocalShellToolCall_action(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_LocalShellExecAction(value, path, errors)
+
+def _validate_OpenAI_ItemLocalShellToolCall_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemLocalShellToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemLocalShellToolCall_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('in_progress', 'completed', 'incomplete')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemLocalShellToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('local_shell_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemLocalShellToolCallOutput_output(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemLocalShellToolCallOutput_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ _allowed_values = ('in_progress', 'completed', 'incomplete')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemLocalShellToolCallOutput_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('local_shell_call_output',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpApprovalRequest_arguments(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpApprovalRequest_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpApprovalRequest_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpApprovalRequest_server_label(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpApprovalRequest_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('mcp_approval_request',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPApprovalResponse_approval_request_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPApprovalResponse_approve(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'boolean'):
+ _append_type_mismatch(errors, path, 'boolean', value)
+ return
+
+def _validate_OpenAI_MCPApprovalResponse_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('mcp_approval_response',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpToolCall_arguments(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpToolCall_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpToolCall_server_label(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpToolCall_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ItemMcpToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('mcp_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpListTools_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpListTools_server_label(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMcpListTools_tools(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemMcpListTools_tools_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemMcpListTools_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('mcp_list_tools',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_MemorySearchToolCallItemParam_results(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_MemorySearchToolCallItemParam_results_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_MemorySearchToolCallItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('memory_search_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemMessage_content(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'array'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ItemMessage_content_array(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: string, array; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ItemMessage_role(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ItemMessage_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ _allowed_values = ('message',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemOutputMessage_content(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemOutputMessage_content_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemOutputMessage_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemOutputMessage_role(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('assistant',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemOutputMessage_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('in_progress', 'completed', 'incomplete')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemOutputMessage_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('output_message',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemReasoningItem_content(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemReasoningItem_content_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemReasoningItem_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemReasoningItem_summary(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemReasoningItem_summary_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemReasoningItem_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('reasoning',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionShellCallItemParam_action(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_FunctionShellCallItemParam_call_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionShellCallItemParam_environment(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_FunctionShellCallItemParam_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_OpenAI_FunctionShellCallItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('shell_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionShellCallOutputItemParam_output(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_FunctionShellCallOutputItemParam_output_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_FunctionShellCallOutputItemParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('shell_call_output',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemWebSearchToolCall_action(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_WebSearchActionSearch(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_WebSearchActionOpenPage(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_2: list[dict[str, str]] = []
+ _validate_OpenAI_WebSearchActionFind(value, path, _branch_errors_2)
+ if not _branch_errors_2:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: OpenAI.WebSearchActionSearch, OpenAI.WebSearchActionOpenPage, OpenAI.WebSearchActionFind; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ItemWebSearchToolCall_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemWebSearchToolCall_status(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('in_progress', 'searching', 'completed', 'failed')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemWebSearchToolCall_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('web_search_call',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_TextResponseFormatConfigurationType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('TextResponseFormatConfigurationType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ResponseFormatJsonSchemaSchema(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ for _key, _item in value.items():
+ if _key not in ():
+ _validate_CreateResponse_structured_inputs_additional_property(_item, f"{path}.{_key}", errors)
+
+def _validate_OpenAI_ToolType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('ToolType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_AutoCodeInterpreterToolParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'file_ids' in value:
+ _validate_OpenAI_AutoCodeInterpreterToolParam_file_ids(value['file_ids'], f"{path}.file_ids", errors)
+ if 'memory_limit' in value:
+ _validate_OpenAI_AutoCodeInterpreterToolParam_memory_limit(value['memory_limit'], f"{path}.memory_limit", errors)
+ if 'network_policy' in value:
+ _validate_OpenAI_AutoCodeInterpreterToolParam_network_policy(value['network_policy'], f"{path}.network_policy", errors)
+ if 'type' in value:
+ _validate_OpenAI_AutoCodeInterpreterToolParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ImageGenTool_model_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('gpt-image-1', 'gpt-image-1-mini', 'gpt-image-1.5')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPTool_allowed_tools_array(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_MCPTool_allowed_tools_object(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_MCPTool_require_approval_object(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_MCPTool_require_approval_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ _allowed_values = ('always', 'never')
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('ItemType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCodeInterpreterToolCall_outputs_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_CodeInterpreterOutputLogs(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_CodeInterpreterOutputImage(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: OpenAI.CodeInterpreterOutputLogs, OpenAI.CodeInterpreterOutputImage; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ComputerAction(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ComputerAction_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'click':
+ _validate_OpenAI_ClickParam(value, path, errors)
+ if _disc_value == 'double_click':
+ _validate_OpenAI_DoubleClickAction(value, path, errors)
+ if _disc_value == 'drag':
+ _validate_OpenAI_DragParam(value, path, errors)
+ if _disc_value == 'keypress':
+ _validate_OpenAI_KeyPressAction(value, path, errors)
+ if _disc_value == 'move':
+ _validate_OpenAI_MoveParam(value, path, errors)
+ if _disc_value == 'screenshot':
+ _validate_OpenAI_ScreenshotParam(value, path, errors)
+ if _disc_value == 'scroll':
+ _validate_OpenAI_ScrollParam(value, path, errors)
+ if _disc_value == 'type':
+ _validate_OpenAI_TypeParam(value, path, errors)
+ if _disc_value == 'wait':
+ _validate_OpenAI_WaitParam(value, path, errors)
+
+def _validate_OpenAI_ItemComputerToolCall_pending_safety_checks_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ComputerCallSafetyCheckParam(value, path, errors)
+
+def _validate_OpenAI_ComputerScreenshotImage(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'file_id' in value:
+ _validate_OpenAI_ComputerScreenshotImage_file_id(value['file_id'], f"{path}.file_id", errors)
+ if 'image_url' in value:
+ _validate_OpenAI_ComputerScreenshotImage_image_url(value['image_url'], f"{path}.image_url", errors)
+ if 'type' in value:
+ _validate_OpenAI_ComputerScreenshotImage_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ItemCustomToolCallOutput_output_array(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemCustomToolCallOutput_output_array_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemFileSearchToolCall_results_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_FileSearchToolCallResults(value, path, errors)
+
+def _validate_OpenAI_FunctionCallOutputItemParam_output_array(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_FunctionCallOutputItemParam_output_array_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_LocalShellExecAction(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'command' not in value:
+ _append_error(errors, f"{path}.command", "Required property 'command' is missing")
+ if 'env' not in value:
+ _append_error(errors, f"{path}.env", "Required property 'env' is missing")
+ if 'command' in value:
+ _validate_OpenAI_LocalShellExecAction_command(value['command'], f"{path}.command", errors)
+ if 'env' in value:
+ _validate_OpenAI_LocalShellExecAction_env(value['env'], f"{path}.env", errors)
+ if 'timeout_ms' in value:
+ _validate_CreateResponse_max_output_tokens(value['timeout_ms'], f"{path}.timeout_ms", errors)
+ if 'type' in value:
+ _validate_OpenAI_LocalShellExecAction_type(value['type'], f"{path}.type", errors)
+ if 'user' in value:
+ _validate_CreateResponse_instructions(value['user'], f"{path}.user", errors)
+ if 'working_directory' in value:
+ _validate_CreateResponse_instructions(value['working_directory'], f"{path}.working_directory", errors)
+
+def _validate_OpenAI_ItemMcpListTools_tools_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_MCPListToolsTool(value, path, errors)
+
+def _validate_MemorySearchToolCallItemParam_results_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_MemorySearchItem(value, path, errors)
+
+def _validate_OpenAI_ItemMessage_content_array(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ItemMessage_content_array_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ItemOutputMessage_content_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_OutputMessageContent(value, path, errors)
+
+def _validate_OpenAI_ItemReasoningItem_content_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ReasoningTextContent(value, path, errors)
+
+def _validate_OpenAI_ItemReasoningItem_summary_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_SummaryTextContent(value, path, errors)
+
+def _validate_OpenAI_FunctionShellCallOutputItemParam_output_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_FunctionShellCallOutputContentParam(value, path, errors)
+
+def _validate_OpenAI_WebSearchActionSearch(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'query' not in value:
+ _append_error(errors, f"{path}.query", "Required property 'query' is missing")
+ if 'queries' in value:
+ _validate_OpenAI_WebSearchActionSearch_queries(value['queries'], f"{path}.queries", errors)
+ if 'query' in value:
+ _validate_OpenAI_WebSearchActionSearch_query(value['query'], f"{path}.query", errors)
+ if 'sources' in value:
+ _validate_OpenAI_WebSearchActionSearch_sources(value['sources'], f"{path}.sources", errors)
+ if 'type' in value:
+ _validate_OpenAI_WebSearchActionSearch_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_WebSearchActionOpenPage(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_WebSearchActionOpenPage_type(value['type'], f"{path}.type", errors)
+ if 'url' in value:
+ _validate_OpenAI_WebSearchActionOpenPage_url(value['url'], f"{path}.url", errors)
+
+def _validate_OpenAI_WebSearchActionFind(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'url' not in value:
+ _append_error(errors, f"{path}.url", "Required property 'url' is missing")
+ if 'pattern' not in value:
+ _append_error(errors, f"{path}.pattern", "Required property 'pattern' is missing")
+ if 'pattern' in value:
+ _validate_OpenAI_WebSearchActionFind_pattern(value['pattern'], f"{path}.pattern", errors)
+ if 'type' in value:
+ _validate_OpenAI_WebSearchActionFind_type(value['type'], f"{path}.type", errors)
+ if 'url' in value:
+ _validate_OpenAI_WebSearchActionFind_url(value['url'], f"{path}.url", errors)
+
+def _validate_OpenAI_AutoCodeInterpreterToolParam_file_ids(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_AutoCodeInterpreterToolParam_memory_limit(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_OpenAI_AutoCodeInterpreterToolParam_network_policy(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ContainerNetworkPolicyParam(value, path, errors)
+
+def _validate_OpenAI_AutoCodeInterpreterToolParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('auto',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CodeInterpreterOutputLogs(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'logs' not in value:
+ _append_error(errors, f"{path}.logs", "Required property 'logs' is missing")
+ if 'logs' in value:
+ _validate_OpenAI_CodeInterpreterOutputLogs_logs(value['logs'], f"{path}.logs", errors)
+ if 'type' in value:
+ _validate_OpenAI_CodeInterpreterOutputLogs_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_CodeInterpreterOutputImage(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'url' not in value:
+ _append_error(errors, f"{path}.url", "Required property 'url' is missing")
+ if 'type' in value:
+ _validate_OpenAI_CodeInterpreterOutputImage_type(value['type'], f"{path}.type", errors)
+ if 'url' in value:
+ _validate_OpenAI_CodeInterpreterOutputImage_url(value['url'], f"{path}.url", errors)
+
+def _validate_OpenAI_ComputerAction_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ComputerActionType(value, path, errors)
+
+def _validate_OpenAI_ClickParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'button' not in value:
+ _append_error(errors, f"{path}.button", "Required property 'button' is missing")
+ if 'x' not in value:
+ _append_error(errors, f"{path}.x", "Required property 'x' is missing")
+ if 'y' not in value:
+ _append_error(errors, f"{path}.y", "Required property 'y' is missing")
+ if 'button' in value:
+ _validate_OpenAI_ClickParam_button(value['button'], f"{path}.button", errors)
+ if 'type' in value:
+ _validate_OpenAI_ClickParam_type(value['type'], f"{path}.type", errors)
+ if 'x' in value:
+ _validate_OpenAI_ClickParam_x(value['x'], f"{path}.x", errors)
+ if 'y' in value:
+ _validate_OpenAI_ClickParam_y(value['y'], f"{path}.y", errors)
+
+def _validate_OpenAI_DoubleClickAction(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'x' not in value:
+ _append_error(errors, f"{path}.x", "Required property 'x' is missing")
+ if 'y' not in value:
+ _append_error(errors, f"{path}.y", "Required property 'y' is missing")
+ if 'type' in value:
+ _validate_OpenAI_DoubleClickAction_type(value['type'], f"{path}.type", errors)
+ if 'x' in value:
+ _validate_OpenAI_DoubleClickAction_x(value['x'], f"{path}.x", errors)
+ if 'y' in value:
+ _validate_OpenAI_DoubleClickAction_y(value['y'], f"{path}.y", errors)
+
+def _validate_OpenAI_DragParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'path' not in value:
+ _append_error(errors, f"{path}.path", "Required property 'path' is missing")
+ if 'path' in value:
+ _validate_OpenAI_DragParam_path(value['path'], f"{path}.path", errors)
+ if 'type' in value:
+ _validate_OpenAI_DragParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_KeyPressAction(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'keys' not in value:
+ _append_error(errors, f"{path}.keys", "Required property 'keys' is missing")
+ if 'keys' in value:
+ _validate_OpenAI_KeyPressAction_keys(value['keys'], f"{path}.keys", errors)
+ if 'type' in value:
+ _validate_OpenAI_KeyPressAction_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MoveParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'x' not in value:
+ _append_error(errors, f"{path}.x", "Required property 'x' is missing")
+ if 'y' not in value:
+ _append_error(errors, f"{path}.y", "Required property 'y' is missing")
+ if 'type' in value:
+ _validate_OpenAI_MoveParam_type(value['type'], f"{path}.type", errors)
+ if 'x' in value:
+ _validate_OpenAI_MoveParam_x(value['x'], f"{path}.x", errors)
+ if 'y' in value:
+ _validate_OpenAI_MoveParam_y(value['y'], f"{path}.y", errors)
+
+def _validate_OpenAI_ScreenshotParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ScreenshotParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ScrollParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'x' not in value:
+ _append_error(errors, f"{path}.x", "Required property 'x' is missing")
+ if 'y' not in value:
+ _append_error(errors, f"{path}.y", "Required property 'y' is missing")
+ if 'scroll_x' not in value:
+ _append_error(errors, f"{path}.scroll_x", "Required property 'scroll_x' is missing")
+ if 'scroll_y' not in value:
+ _append_error(errors, f"{path}.scroll_y", "Required property 'scroll_y' is missing")
+ if 'scroll_x' in value:
+ _validate_OpenAI_ScrollParam_scroll_x(value['scroll_x'], f"{path}.scroll_x", errors)
+ if 'scroll_y' in value:
+ _validate_OpenAI_ScrollParam_scroll_y(value['scroll_y'], f"{path}.scroll_y", errors)
+ if 'type' in value:
+ _validate_OpenAI_ScrollParam_type(value['type'], f"{path}.type", errors)
+ if 'x' in value:
+ _validate_OpenAI_ScrollParam_x(value['x'], f"{path}.x", errors)
+ if 'y' in value:
+ _validate_OpenAI_ScrollParam_y(value['y'], f"{path}.y", errors)
+
+def _validate_OpenAI_TypeParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_TypeParam_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_TypeParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_WaitParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_WaitParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ComputerCallSafetyCheckParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'id' not in value:
+ _append_error(errors, f"{path}.id", "Required property 'id' is missing")
+ if 'code' in value:
+ _validate_CreateResponse_instructions(value['code'], f"{path}.code", errors)
+ if 'id' in value:
+ _validate_OpenAI_ComputerCallSafetyCheckParam_id(value['id'], f"{path}.id", errors)
+ if 'message' in value:
+ _validate_CreateResponse_instructions(value['message'], f"{path}.message", errors)
+
+def _validate_OpenAI_ComputerScreenshotImage_file_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ComputerScreenshotImage_image_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ComputerScreenshotImage_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('computer_screenshot',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ItemCustomToolCallOutput_output_array_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_FunctionAndCustomToolCallOutput(value, path, errors)
+
+def _validate_OpenAI_FileSearchToolCallResults(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'attributes' in value:
+ _validate_OpenAI_FileSearchToolCallResults_attributes(value['attributes'], f"{path}.attributes", errors)
+ if 'file_id' in value:
+ _validate_OpenAI_InputParam_string(value['file_id'], f"{path}.file_id", errors)
+ if 'filename' in value:
+ _validate_OpenAI_InputParam_string(value['filename'], f"{path}.filename", errors)
+ if 'score' in value:
+ _validate_OpenAI_FileSearchToolCallResults_score(value['score'], f"{path}.score", errors)
+ if 'text' in value:
+ _validate_OpenAI_InputParam_string(value['text'], f"{path}.text", errors)
+
+def _validate_OpenAI_FunctionCallOutputItemParam_output_array_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputTextContentParam(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_InputImageContentParamAutoParam(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'object'):
+ _branch_errors_2: list[dict[str, str]] = []
+ _validate_OpenAI_InputFileContentParam(value, path, _branch_errors_2)
+ if not _branch_errors_2:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected one of: OpenAI.InputTextContentParam, OpenAI.InputImageContentParamAutoParam, OpenAI.InputFileContentParam; got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_LocalShellExecAction_command(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_LocalShellExecAction_env(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ for _key, _item in value.items():
+ if _key not in ():
+ _validate_OpenAI_InputParam_string(_item, f"{path}.{_key}", errors)
+
+def _validate_OpenAI_LocalShellExecAction_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('exec',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MCPListToolsTool(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'input_schema' not in value:
+ _append_error(errors, f"{path}.input_schema", "Required property 'input_schema' is missing")
+ if 'annotations' in value:
+ _validate_OpenAI_MCPListToolsTool_annotations(value['annotations'], f"{path}.annotations", errors)
+ if 'description' in value:
+ _validate_CreateResponse_instructions(value['description'], f"{path}.description", errors)
+ if 'input_schema' in value:
+ _validate_OpenAI_MCPListToolsTool_input_schema(value['input_schema'], f"{path}.input_schema", errors)
+ if 'name' in value:
+ _validate_OpenAI_MCPListToolsTool_name(value['name'], f"{path}.name", errors)
+
+def _validate_MemorySearchItem(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'memory_item' not in value:
+ _append_error(errors, f"{path}.memory_item", "Required property 'memory_item' is missing")
+ if 'memory_item' in value:
+ _validate_MemorySearchItem_memory_item(value['memory_item'], f"{path}.memory_item", errors)
+
+def _validate_OpenAI_ItemMessage_content_array_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_MessageContent(value, path, errors)
+
+def _validate_OpenAI_OutputMessageContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_OutputMessageContent_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'output_text':
+ _validate_OpenAI_OutputMessageContentOutputTextContent(value, path, errors)
+ if _disc_value == 'refusal':
+ _validate_OpenAI_OutputMessageContentRefusalContent(value, path, errors)
+
+def _validate_OpenAI_ReasoningTextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_ReasoningTextContent_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_ReasoningTextContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_SummaryTextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_SummaryTextContent_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_SummaryTextContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FunctionShellCallOutputContentParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'stdout' not in value:
+ _append_error(errors, f"{path}.stdout", "Required property 'stdout' is missing")
+ if 'stderr' not in value:
+ _append_error(errors, f"{path}.stderr", "Required property 'stderr' is missing")
+ if 'outcome' not in value:
+ _append_error(errors, f"{path}.outcome", "Required property 'outcome' is missing")
+ if 'outcome' in value:
+ _validate_OpenAI_FunctionShellCallOutputContentParam_outcome(value['outcome'], f"{path}.outcome", errors)
+ if 'stderr' in value:
+ _validate_OpenAI_FunctionShellCallOutputContentParam_stderr(value['stderr'], f"{path}.stderr", errors)
+ if 'stdout' in value:
+ _validate_OpenAI_FunctionShellCallOutputContentParam_stdout(value['stdout'], f"{path}.stdout", errors)
+
+def _validate_OpenAI_WebSearchActionSearch_queries(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_WebSearchActionSearch_query(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionSearch_sources(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_WebSearchActionSearch_sources_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_WebSearchActionSearch_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('search',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionOpenPage_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('open_page',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionOpenPage_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionFind_pattern(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionFind_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('find_in_page',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionFind_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerNetworkPolicyParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ContainerNetworkPolicyParam_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'allowlist':
+ _validate_OpenAI_ContainerNetworkPolicyAllowlistParam(value, path, errors)
+ if _disc_value == 'disabled':
+ _validate_OpenAI_ContainerNetworkPolicyDisabledParam(value, path, errors)
+
+def _validate_OpenAI_CodeInterpreterOutputLogs_logs(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CodeInterpreterOutputLogs_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('logs',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CodeInterpreterOutputImage_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('image',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CodeInterpreterOutputImage_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ComputerActionType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ComputerActionType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected ComputerActionType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ClickParam_button(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_ClickParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('click',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ClickParam_x(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ClickParam_y(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_DoubleClickAction_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('double_click',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_DoubleClickAction_x(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_DoubleClickAction_y(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_DragParam_path(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_DragParam_path_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_DragParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('drag',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_KeyPressAction_keys(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_KeyPressAction_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('keypress',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MoveParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('move',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MoveParam_x(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_MoveParam_y(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ScreenshotParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('screenshot',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ScrollParam_scroll_x(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ScrollParam_scroll_y(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ScrollParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('scroll',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ScrollParam_x(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ScrollParam_y(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_TypeParam_text(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_TypeParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('type',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WaitParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('wait',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ComputerCallSafetyCheckParam_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutput(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutput_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'input_file':
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent(value, path, errors)
+ if _disc_value == 'input_image':
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputImageContent(value, path, errors)
+ if _disc_value == 'input_text':
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputTextContent(value, path, errors)
+
+def _validate_OpenAI_FileSearchToolCallResults_attributes(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_FileSearchToolCallResults_score(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'number'):
+ _append_type_mismatch(errors, path, 'number', value)
+ return
+
+def _validate_OpenAI_InputTextContentParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_InputTextContentParam_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputTextContentParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_InputImageContentParamAutoParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'detail' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_detail(value['detail'], f"{path}.detail", errors)
+ if 'file_id' in value:
+ _validate_CreateResponse_instructions(value['file_id'], f"{path}.file_id", errors)
+ if 'image_url' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_image_url(value['image_url'], f"{path}.image_url", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_InputFileContentParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'file_data' in value:
+ _validate_CreateResponse_instructions(value['file_data'], f"{path}.file_data", errors)
+ if 'file_id' in value:
+ _validate_CreateResponse_instructions(value['file_id'], f"{path}.file_id", errors)
+ if 'file_url' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_image_url(value['file_url'], f"{path}.file_url", errors)
+ if 'filename' in value:
+ _validate_CreateResponse_instructions(value['filename'], f"{path}.filename", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputFileContentParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MCPListToolsTool_annotations(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+
+def _validate_OpenAI_MCPListToolsTool_input_schema(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_MCPListToolsTool_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_MemorySearchItem_memory_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_MessageContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_MessageContent_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'computer_screenshot':
+ _validate_OpenAI_ComputerScreenshotContent(value, path, errors)
+ if _disc_value == 'input_file':
+ _validate_OpenAI_MessageContentInputFileContent(value, path, errors)
+ if _disc_value == 'input_image':
+ _validate_OpenAI_MessageContentInputImageContent(value, path, errors)
+ if _disc_value == 'input_text':
+ _validate_OpenAI_MessageContentInputTextContent(value, path, errors)
+ if _disc_value == 'output_text':
+ _validate_OpenAI_MessageContentOutputTextContent(value, path, errors)
+ if _disc_value == 'reasoning_text':
+ _validate_OpenAI_MessageContentReasoningTextContent(value, path, errors)
+ if _disc_value == 'refusal':
+ _validate_OpenAI_MessageContentRefusalContent(value, path, errors)
+ if _disc_value == 'text':
+ _validate_OpenAI_TextContent(value, path, errors)
+
+def _validate_OpenAI_OutputMessageContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_OutputMessageContentType(value, path, errors)
+
+def _validate_OpenAI_OutputMessageContentOutputTextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'annotations' not in value:
+ _append_error(errors, f"{path}.annotations", "Required property 'annotations' is missing")
+ if 'logprobs' not in value:
+ _append_error(errors, f"{path}.logprobs", "Required property 'logprobs' is missing")
+ if 'annotations' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_annotations(value['annotations'], f"{path}.annotations", errors)
+ if 'logprobs' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_logprobs(value['logprobs'], f"{path}.logprobs", errors)
+ if 'text' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_OutputMessageContentRefusalContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'refusal' not in value:
+ _append_error(errors, f"{path}.refusal", "Required property 'refusal' is missing")
+ if 'refusal' in value:
+ _validate_OpenAI_OutputMessageContentRefusalContent_refusal(value['refusal'], f"{path}.refusal", errors)
+ if 'type' in value:
+ _validate_OpenAI_OutputMessageContentRefusalContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ReasoningTextContent_text(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ReasoningTextContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('reasoning_text',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_SummaryTextContent_text(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_SummaryTextContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('summary_text',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionShellCallOutputContentParam_outcome(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_FunctionShellCallOutputContentParam_stderr(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionShellCallOutputContentParam_stdout(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionSearch_sources_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_WebSearchActionSearchSources(value, path, errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ContainerNetworkPolicyParamType(value, path, errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyAllowlistParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'allowed_domains' not in value:
+ _append_error(errors, f"{path}.allowed_domains", "Required property 'allowed_domains' is missing")
+ if 'allowed_domains' in value:
+ _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_allowed_domains(value['allowed_domains'], f"{path}.allowed_domains", errors)
+ if 'domain_secrets' in value:
+ _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_domain_secrets(value['domain_secrets'], f"{path}.domain_secrets", errors)
+ if 'type' in value:
+ _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyDisabledParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_ContainerNetworkPolicyDisabledParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_ComputerActionType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('ComputerActionType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_DragParam_path_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_CoordParam(value, path, errors)
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutput_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputType(value, path, errors)
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'file_data' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_file_data(value['file_data'], f"{path}.file_data", errors)
+ if 'file_id' in value:
+ _validate_CreateResponse_instructions(value['file_id'], f"{path}.file_id", errors)
+ if 'file_url' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_file_url(value['file_url'], f"{path}.file_url", errors)
+ if 'filename' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_filename(value['filename'], f"{path}.filename", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputFileContentParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputImageContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'detail' not in value:
+ _append_error(errors, f"{path}.detail", "Required property 'detail' is missing")
+ if 'detail' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputImageContent_detail(value['detail'], f"{path}.detail", errors)
+ if 'file_id' in value:
+ _validate_CreateResponse_instructions(value['file_id'], f"{path}.file_id", errors)
+ if 'image_url' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_image_url(value['image_url'], f"{path}.image_url", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputTextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputTextContent_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputTextContentParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_InputTextContentParam_text(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_InputTextContentParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('input_text',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_InputImageContentParamAutoParam_detail(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_OpenAI_InputImageContentParamAutoParam_image_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_InputImageContentParamAutoParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('input_image',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_InputFileContentParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('input_file',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MessageContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_MessageContentType(value, path, errors)
+
+def _validate_OpenAI_ComputerScreenshotContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'image_url' not in value:
+ _append_error(errors, f"{path}.image_url", "Required property 'image_url' is missing")
+ if 'file_id' not in value:
+ _append_error(errors, f"{path}.file_id", "Required property 'file_id' is missing")
+ if 'file_id' in value:
+ _validate_CreateResponse_instructions(value['file_id'], f"{path}.file_id", errors)
+ if 'image_url' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_image_url(value['image_url'], f"{path}.image_url", errors)
+ if 'type' in value:
+ _validate_OpenAI_ComputerScreenshotContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MessageContentInputFileContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'file_data' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_file_data(value['file_data'], f"{path}.file_data", errors)
+ if 'file_id' in value:
+ _validate_CreateResponse_instructions(value['file_id'], f"{path}.file_id", errors)
+ if 'file_url' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_file_url(value['file_url'], f"{path}.file_url", errors)
+ if 'filename' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_filename(value['filename'], f"{path}.filename", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputFileContentParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MessageContentInputImageContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'detail' in value:
+ _validate_OpenAI_MessageContentInputImageContent_detail(value['detail'], f"{path}.detail", errors)
+ if 'file_id' in value:
+ _validate_CreateResponse_instructions(value['file_id'], f"{path}.file_id", errors)
+ if 'image_url' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_image_url(value['image_url'], f"{path}.image_url", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputImageContentParamAutoParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MessageContentInputTextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_FunctionAndCustomToolCallOutputInputTextContent_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_InputTextContentParam_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MessageContentOutputTextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'annotations' not in value:
+ _append_error(errors, f"{path}.annotations", "Required property 'annotations' is missing")
+ if 'logprobs' not in value:
+ _append_error(errors, f"{path}.logprobs", "Required property 'logprobs' is missing")
+ if 'annotations' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_annotations(value['annotations'], f"{path}.annotations", errors)
+ if 'logprobs' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_logprobs(value['logprobs'], f"{path}.logprobs", errors)
+ if 'text' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_OutputMessageContentOutputTextContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MessageContentReasoningTextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_ReasoningTextContent_text(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_ReasoningTextContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_MessageContentRefusalContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'refusal' not in value:
+ _append_error(errors, f"{path}.refusal", "Required property 'refusal' is missing")
+ if 'refusal' in value:
+ _validate_OpenAI_OutputMessageContentRefusalContent_refusal(value['refusal'], f"{path}.refusal", errors)
+ if 'type' in value:
+ _validate_OpenAI_OutputMessageContentRefusalContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_TextContent(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'text' not in value:
+ _append_error(errors, f"{path}.text", "Required property 'text' is missing")
+ if 'text' in value:
+ _validate_OpenAI_InputParam_string(value['text'], f"{path}.text", errors)
+ if 'type' in value:
+ _validate_OpenAI_TextContent_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_OutputMessageContentType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_OutputMessageContentType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected OutputMessageContentType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_OutputMessageContentOutputTextContent_annotations(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_OutputMessageContentOutputTextContent_annotations_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_OutputMessageContentOutputTextContent_logprobs(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_OutputMessageContentOutputTextContent_logprobs_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_OutputMessageContentOutputTextContent_text(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_OutputMessageContentOutputTextContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('output_text',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_OutputMessageContentRefusalContent_refusal(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_OutputMessageContentRefusalContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('refusal',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_WebSearchActionSearchSources(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'url' not in value:
+ _append_error(errors, f"{path}.url", "Required property 'url' is missing")
+ if 'type' in value:
+ _validate_OpenAI_WebSearchActionSearchSources_type(value['type'], f"{path}.type", errors)
+ if 'url' in value:
+ _validate_OpenAI_InputParam_string(value['url'], f"{path}.url", errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyParamType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_ContainerNetworkPolicyParamType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected ContainerNetworkPolicyParamType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_allowed_domains(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_InputParam_string(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_domain_secrets(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_domain_secrets_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('allowlist',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerNetworkPolicyDisabledParam_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('disabled',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_CoordParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'x' not in value:
+ _append_error(errors, f"{path}.x", "Required property 'x' is missing")
+ if 'y' not in value:
+ _append_error(errors, f"{path}.y", "Required property 'y' is missing")
+ if 'x' in value:
+ _validate_OpenAI_CoordParam_x(value['x'], f"{path}.x", errors)
+ if 'y' in value:
+ _validate_OpenAI_CoordParam_y(value['y'], f"{path}.y", errors)
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_FunctionAndCustomToolCallOutputType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected FunctionAndCustomToolCallOutputType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_file_data(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_file_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputFileContent_filename(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputImageContent_detail(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ return
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputInputTextContent_text(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MessageContentType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_MessageContentType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected MessageContentType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ComputerScreenshotContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('computer_screenshot',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MessageContentInputImageContent_detail(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if value is None:
+ return
+
+def _validate_OpenAI_TextContent_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('text',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_OutputMessageContentType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('OutputMessageContentType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_OutputMessageContentOutputTextContent_annotations_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_Annotation(value, path, errors)
+
+def _validate_OpenAI_OutputMessageContentOutputTextContent_logprobs_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_LogProb(value, path, errors)
+
+def _validate_OpenAI_WebSearchActionSearchSources_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('url',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerNetworkPolicyParamType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('ContainerNetworkPolicyParamType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerNetworkPolicyAllowlistParam_domain_secrets_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam(value, path, errors)
+
+def _validate_OpenAI_CoordParam_x(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_CoordParam_y(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_FunctionAndCustomToolCallOutputType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('FunctionAndCustomToolCallOutputType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_MessageContentType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('MessageContentType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_Annotation(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'type' in value:
+ _validate_OpenAI_Annotation_type(value['type'], f"{path}.type", errors)
+ _disc_value = value.get('type')
+ if not isinstance(_disc_value, str):
+ _append_error(errors, f"{path}.type", "Required discriminator 'type' is missing or invalid")
+ return
+ if _disc_value == 'container_file_citation':
+ _validate_OpenAI_ContainerFileCitationBody(value, path, errors)
+ if _disc_value == 'file_citation':
+ _validate_OpenAI_FileCitationBody(value, path, errors)
+ if _disc_value == 'file_path':
+ _validate_OpenAI_FilePath(value, path, errors)
+ if _disc_value == 'url_citation':
+ _validate_OpenAI_UrlCitationBody(value, path, errors)
+
+def _validate_OpenAI_LogProb(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'token' not in value:
+ _append_error(errors, f"{path}.token", "Required property 'token' is missing")
+ if 'logprob' not in value:
+ _append_error(errors, f"{path}.logprob", "Required property 'logprob' is missing")
+ if 'bytes' not in value:
+ _append_error(errors, f"{path}.bytes", "Required property 'bytes' is missing")
+ if 'top_logprobs' not in value:
+ _append_error(errors, f"{path}.top_logprobs", "Required property 'top_logprobs' is missing")
+ if 'bytes' in value:
+ _validate_OpenAI_LogProb_bytes(value['bytes'], f"{path}.bytes", errors)
+ if 'logprob' in value:
+ _validate_OpenAI_LogProb_logprob(value['logprob'], f"{path}.logprob", errors)
+ if 'token' in value:
+ _validate_OpenAI_InputParam_string(value['token'], f"{path}.token", errors)
+ if 'top_logprobs' in value:
+ _validate_OpenAI_LogProb_top_logprobs(value['top_logprobs'], f"{path}.top_logprobs", errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'domain' not in value:
+ _append_error(errors, f"{path}.domain", "Required property 'domain' is missing")
+ if 'name' not in value:
+ _append_error(errors, f"{path}.name", "Required property 'name' is missing")
+ if 'value' not in value:
+ _append_error(errors, f"{path}.value", "Required property 'value' is missing")
+ if 'domain' in value:
+ _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam_domain(value['domain'], f"{path}.domain", errors)
+ if 'name' in value:
+ _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam_name(value['name'], f"{path}.name", errors)
+ if 'value' in value:
+ _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam_value(value['value'], f"{path}.value", errors)
+
+def _validate_OpenAI_Annotation_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_AnnotationType(value, path, errors)
+
+def _validate_OpenAI_ContainerFileCitationBody(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'container_id' not in value:
+ _append_error(errors, f"{path}.container_id", "Required property 'container_id' is missing")
+ if 'file_id' not in value:
+ _append_error(errors, f"{path}.file_id", "Required property 'file_id' is missing")
+ if 'start_index' not in value:
+ _append_error(errors, f"{path}.start_index", "Required property 'start_index' is missing")
+ if 'end_index' not in value:
+ _append_error(errors, f"{path}.end_index", "Required property 'end_index' is missing")
+ if 'filename' not in value:
+ _append_error(errors, f"{path}.filename", "Required property 'filename' is missing")
+ if 'container_id' in value:
+ _validate_OpenAI_ContainerFileCitationBody_container_id(value['container_id'], f"{path}.container_id", errors)
+ if 'end_index' in value:
+ _validate_OpenAI_ContainerFileCitationBody_end_index(value['end_index'], f"{path}.end_index", errors)
+ if 'file_id' in value:
+ _validate_OpenAI_ContainerFileCitationBody_file_id(value['file_id'], f"{path}.file_id", errors)
+ if 'filename' in value:
+ _validate_OpenAI_ContainerFileCitationBody_filename(value['filename'], f"{path}.filename", errors)
+ if 'start_index' in value:
+ _validate_OpenAI_ContainerFileCitationBody_start_index(value['start_index'], f"{path}.start_index", errors)
+ if 'type' in value:
+ _validate_OpenAI_ContainerFileCitationBody_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FileCitationBody(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'file_id' not in value:
+ _append_error(errors, f"{path}.file_id", "Required property 'file_id' is missing")
+ if 'index' not in value:
+ _append_error(errors, f"{path}.index", "Required property 'index' is missing")
+ if 'filename' not in value:
+ _append_error(errors, f"{path}.filename", "Required property 'filename' is missing")
+ if 'file_id' in value:
+ _validate_OpenAI_ContainerFileCitationBody_file_id(value['file_id'], f"{path}.file_id", errors)
+ if 'filename' in value:
+ _validate_OpenAI_FileCitationBody_filename(value['filename'], f"{path}.filename", errors)
+ if 'index' in value:
+ _validate_OpenAI_FileCitationBody_index(value['index'], f"{path}.index", errors)
+ if 'type' in value:
+ _validate_OpenAI_FileCitationBody_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_FilePath(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'file_id' not in value:
+ _append_error(errors, f"{path}.file_id", "Required property 'file_id' is missing")
+ if 'index' not in value:
+ _append_error(errors, f"{path}.index", "Required property 'index' is missing")
+ if 'file_id' in value:
+ _validate_OpenAI_ContainerFileCitationBody_file_id(value['file_id'], f"{path}.file_id", errors)
+ if 'index' in value:
+ _validate_OpenAI_FileCitationBody_index(value['index'], f"{path}.index", errors)
+ if 'type' in value:
+ _validate_OpenAI_FilePath_type(value['type'], f"{path}.type", errors)
+
+def _validate_OpenAI_UrlCitationBody(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'type' not in value:
+ _append_error(errors, f"{path}.type", "Required property 'type' is missing")
+ if 'url' not in value:
+ _append_error(errors, f"{path}.url", "Required property 'url' is missing")
+ if 'start_index' not in value:
+ _append_error(errors, f"{path}.start_index", "Required property 'start_index' is missing")
+ if 'end_index' not in value:
+ _append_error(errors, f"{path}.end_index", "Required property 'end_index' is missing")
+ if 'title' not in value:
+ _append_error(errors, f"{path}.title", "Required property 'title' is missing")
+ if 'end_index' in value:
+ _validate_OpenAI_UrlCitationBody_end_index(value['end_index'], f"{path}.end_index", errors)
+ if 'start_index' in value:
+ _validate_OpenAI_UrlCitationBody_start_index(value['start_index'], f"{path}.start_index", errors)
+ if 'title' in value:
+ _validate_OpenAI_UrlCitationBody_title(value['title'], f"{path}.title", errors)
+ if 'type' in value:
+ _validate_OpenAI_UrlCitationBody_type(value['type'], f"{path}.type", errors)
+ if 'url' in value:
+ _validate_OpenAI_UrlCitationBody_url(value['url'], f"{path}.url", errors)
+
+def _validate_OpenAI_LogProb_bytes(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_LogProb_bytes_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_LogProb_logprob(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'number'):
+ _append_type_mismatch(errors, path, 'number', value)
+ return
+
+def _validate_OpenAI_LogProb_top_logprobs(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'array'):
+ _append_type_mismatch(errors, path, 'array', value)
+ return
+ for _idx, _item in enumerate(value):
+ _validate_OpenAI_LogProb_top_logprobs_item(_item, f"{path}[{_idx}]", errors)
+
+def _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam_domain(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam_name(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerNetworkPolicyDomainSecretParam_value(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_AnnotationType(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _matched_union = False
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_0: list[dict[str, str]] = []
+ _validate_OpenAI_InputParam_string(value, path, _branch_errors_0)
+ if not _branch_errors_0:
+ _matched_union = True
+ if not _matched_union and _is_type(value, 'string'):
+ _branch_errors_1: list[dict[str, str]] = []
+ _validate_OpenAI_AnnotationType_2(value, path, _branch_errors_1)
+ if not _branch_errors_1:
+ _matched_union = True
+ if not _matched_union:
+ _append_error(errors, path, f"Expected AnnotationType to be a string value, got {_type_label(value)}")
+ return
+
+def _validate_OpenAI_ContainerFileCitationBody_container_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerFileCitationBody_end_index(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ContainerFileCitationBody_file_id(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerFileCitationBody_filename(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_ContainerFileCitationBody_start_index(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_ContainerFileCitationBody_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('container_file_citation',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FileCitationBody_filename(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FileCitationBody_index(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_FileCitationBody_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('file_citation',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_FilePath_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('file_path',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_UrlCitationBody_end_index(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_UrlCitationBody_start_index(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_UrlCitationBody_title(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_UrlCitationBody_type(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values = ('url_citation',)
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_UrlCitationBody_url(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_LogProb_bytes_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'integer'):
+ _append_type_mismatch(errors, path, 'integer', value)
+ return
+
+def _validate_OpenAI_LogProb_top_logprobs_item(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _validate_OpenAI_TopLogProb(value, path, errors)
+
+def _validate_OpenAI_AnnotationType_2(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ _allowed_values, _enum_error = _enum_values('AnnotationType')
+ if _enum_error is not None:
+ _append_error(errors, path, _enum_error)
+ return
+ if _allowed_values is None:
+ return
+ if value not in _allowed_values:
+ _append_error(errors, path, f"Invalid value '{value}'. Allowed: {', '.join(str(v) for v in _allowed_values)}")
+ if not _is_type(value, 'string'):
+ _append_type_mismatch(errors, path, 'string', value)
+ return
+
+def _validate_OpenAI_TopLogProb(value: Any, path: str, errors: list[dict[str, str]]) -> None:
+ if not _is_type(value, 'object'):
+ _append_type_mismatch(errors, path, 'object', value)
+ return
+ if 'token' not in value:
+ _append_error(errors, f"{path}.token", "Required property 'token' is missing")
+ if 'logprob' not in value:
+ _append_error(errors, f"{path}.logprob", "Required property 'logprob' is missing")
+ if 'bytes' not in value:
+ _append_error(errors, f"{path}.bytes", "Required property 'bytes' is missing")
+ if 'bytes' in value:
+ _validate_OpenAI_LogProb_bytes(value['bytes'], f"{path}.bytes", errors)
+ if 'logprob' in value:
+ _validate_OpenAI_LogProb_logprob(value['logprob'], f"{path}.logprob", errors)
+ if 'token' in value:
+ _validate_OpenAI_InputParam_string(value['token'], f"{path}.token", errors)
+
+ROOT_SCHEMAS = ['CreateResponse']
+
+class CreateResponseValidator:
+ """Generated validator for the root schema."""
+
+ @staticmethod
+ def validate(payload: Any) -> list[dict[str, str]]:
+ errors: list[dict[str, str]] = []
+ _validate_CreateResponse(payload, '$', errors)
+ return errors
+
+def validate_CreateResponse(payload: Any) -> list[dict[str, str]]:
+ return CreateResponseValidator.validate(payload)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/__init__.py
new file mode 100644
index 000000000000..9abd30ab9c84
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Model-only generated package surface."""
+
+from .models import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_patch.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_patch.py
new file mode 100644
index 000000000000..87676c65a8f0
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_patch.py
@@ -0,0 +1,21 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Customize generated code here.
+
+Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+
+
+__all__: list[str] = [] # Add all objects you want publicly available to users at this package level
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_types.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_types.py
new file mode 100644
index 000000000000..c99439ce635a
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_types.py
@@ -0,0 +1,71 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, TYPE_CHECKING, Union
+
+if TYPE_CHECKING:
+ from . import models as _models
+Filters = Union["_models.ComparisonFilter", "_models.CompoundFilter"]
+ToolCallOutputContent = Union[dict[str, Any], str, list[Any]]
+InputParam = Union[str, list["_models.Item"]]
+ConversationParam = Union[str, "_models.ConversationParam_2"]
+CreateResponseStreamingResponse = Union[
+ "_models.ResponseAudioDeltaEvent",
+ "_models.ResponseAudioTranscriptDeltaEvent",
+ "_models.ResponseCodeInterpreterCallCodeDeltaEvent",
+ "_models.ResponseCodeInterpreterCallInProgressEvent",
+ "_models.ResponseCodeInterpreterCallInterpretingEvent",
+ "_models.ResponseContentPartAddedEvent",
+ "_models.ResponseCreatedEvent",
+ "_models.ResponseErrorEvent",
+ "_models.ResponseFileSearchCallInProgressEvent",
+ "_models.ResponseFileSearchCallSearchingEvent",
+ "_models.ResponseFunctionCallArgumentsDeltaEvent",
+ "_models.ResponseInProgressEvent",
+ "_models.ResponseFailedEvent",
+ "_models.ResponseIncompleteEvent",
+ "_models.ResponseOutputItemAddedEvent",
+ "_models.ResponseReasoningSummaryPartAddedEvent",
+ "_models.ResponseReasoningSummaryTextDeltaEvent",
+ "_models.ResponseReasoningTextDeltaEvent",
+ "_models.ResponseRefusalDeltaEvent",
+ "_models.ResponseTextDeltaEvent",
+ "_models.ResponseWebSearchCallInProgressEvent",
+ "_models.ResponseWebSearchCallSearchingEvent",
+ "_models.ResponseImageGenCallGeneratingEvent",
+ "_models.ResponseImageGenCallInProgressEvent",
+ "_models.ResponseImageGenCallPartialImageEvent",
+ "_models.ResponseMCPCallArgumentsDeltaEvent",
+ "_models.ResponseMCPCallFailedEvent",
+ "_models.ResponseMCPCallInProgressEvent",
+ "_models.ResponseMCPListToolsFailedEvent",
+ "_models.ResponseMCPListToolsInProgressEvent",
+ "_models.ResponseOutputTextAnnotationAddedEvent",
+ "_models.ResponseQueuedEvent",
+ "_models.ResponseCustomToolCallInputDeltaEvent",
+ "_models.ResponseAudioDoneEvent",
+ "_models.ResponseAudioTranscriptDoneEvent",
+ "_models.ResponseCodeInterpreterCallCodeDoneEvent",
+ "_models.ResponseCodeInterpreterCallCompletedEvent",
+ "_models.ResponseCompletedEvent",
+ "_models.ResponseContentPartDoneEvent",
+ "_models.ResponseFileSearchCallCompletedEvent",
+ "_models.ResponseFunctionCallArgumentsDoneEvent",
+ "_models.ResponseOutputItemDoneEvent",
+ "_models.ResponseReasoningSummaryPartDoneEvent",
+ "_models.ResponseReasoningSummaryTextDoneEvent",
+ "_models.ResponseReasoningTextDoneEvent",
+ "_models.ResponseRefusalDoneEvent",
+ "_models.ResponseTextDoneEvent",
+ "_models.ResponseWebSearchCallCompletedEvent",
+ "_models.ResponseImageGenCallCompletedEvent",
+ "_models.ResponseMCPCallArgumentsDoneEvent",
+ "_models.ResponseMCPCallCompletedEvent",
+ "_models.ResponseMCPListToolsCompletedEvent",
+ "_models.ResponseCustomToolCallInputDoneEvent",
+]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/__init__.py
new file mode 100644
index 000000000000..8026245c2abc
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/__init__.py
@@ -0,0 +1,6 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/model_base.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/model_base.py
new file mode 100644
index 000000000000..6f38c0c9e0df
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/model_base.py
@@ -0,0 +1,1370 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=protected-access, broad-except
+
+import copy
+import calendar
+import decimal
+import functools
+import sys
+import logging
+import base64
+import re
+import typing
+import enum
+import email.utils
+from datetime import datetime, date, time, timedelta, timezone
+from json import JSONEncoder
+import xml.etree.ElementTree as ET
+from collections.abc import MutableMapping
+from typing_extensions import Self
+import isodate
+from azure.core.exceptions import DeserializationError
+from azure.core import CaseInsensitiveEnumMeta
+from azure.core.pipeline import PipelineResponse
+from azure.core.serialization import _Null
+from azure.core.rest import HttpResponse
+
+_LOGGER = logging.getLogger(__name__)
+
+__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"]
+
+TZ_UTC = timezone.utc
+_T = typing.TypeVar("_T")
+_NONE_TYPE = type(None)
+
+
+def _timedelta_as_isostr(td: timedelta) -> str:
+ """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S'
+
+ Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython
+
+ :param timedelta td: The timedelta to convert
+ :rtype: str
+ :return: ISO8601 version of this timedelta
+ """
+
+ # Split seconds to larger units
+ seconds = td.total_seconds()
+ minutes, seconds = divmod(seconds, 60)
+ hours, minutes = divmod(minutes, 60)
+ days, hours = divmod(hours, 24)
+
+ days, hours, minutes = list(map(int, (days, hours, minutes)))
+ seconds = round(seconds, 6)
+
+ # Build date
+ date_str = ""
+ if days:
+ date_str = "%sD" % days
+
+ if hours or minutes or seconds:
+ # Build time
+ time_str = "T"
+
+ # Hours
+ bigger_exists = date_str or hours
+ if bigger_exists:
+ time_str += "{:02}H".format(hours)
+
+ # Minutes
+ bigger_exists = bigger_exists or minutes
+ if bigger_exists:
+ time_str += "{:02}M".format(minutes)
+
+ # Seconds
+ try:
+ if seconds.is_integer():
+ seconds_string = "{:02}".format(int(seconds))
+ else:
+ # 9 chars long w/ leading 0, 6 digits after decimal
+ seconds_string = "%09.6f" % seconds
+ # Remove trailing zeros
+ seconds_string = seconds_string.rstrip("0")
+ except AttributeError: # int.is_integer() raises
+ seconds_string = "{:02}".format(seconds)
+
+ time_str += "{}S".format(seconds_string)
+ else:
+ time_str = ""
+
+ return "P" + date_str + time_str
+
+
+def _serialize_bytes(o, format: typing.Optional[str] = None) -> str:
+ encoded = base64.b64encode(o).decode()
+ if format == "base64url":
+ return encoded.strip("=").replace("+", "-").replace("/", "_")
+ return encoded
+
+
+def _serialize_datetime(o, format: typing.Optional[str] = None):
+ if hasattr(o, "year") and hasattr(o, "hour"):
+ if format == "rfc7231":
+ return email.utils.format_datetime(o, usegmt=True)
+ if format == "unix-timestamp":
+ return int(calendar.timegm(o.utctimetuple()))
+
+ # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set)
+ if not o.tzinfo:
+ iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat()
+ else:
+ iso_formatted = o.astimezone(TZ_UTC).isoformat()
+ # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt)
+ return iso_formatted.replace("+00:00", "Z")
+ # Next try datetime.date or datetime.time
+ return o.isoformat()
+
+
+def _is_readonly(p):
+ try:
+ return p._visibility == ["read"]
+ except AttributeError:
+ return False
+
+
+class SdkJSONEncoder(JSONEncoder):
+ """A JSON encoder that's capable of serializing datetime objects and bytes."""
+
+ def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.exclude_readonly = exclude_readonly
+ self.format = format
+
+ def default(self, o): # pylint: disable=too-many-return-statements
+ if _is_model(o):
+ if self.exclude_readonly:
+ readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)]
+ return {k: v for k, v in o.items() if k not in readonly_props}
+ return dict(o.items())
+ try:
+ return super(SdkJSONEncoder, self).default(o)
+ except TypeError:
+ if isinstance(o, _Null):
+ return None
+ if isinstance(o, decimal.Decimal):
+ return float(o)
+ if isinstance(o, (bytes, bytearray)):
+ return _serialize_bytes(o, self.format)
+ try:
+ # First try datetime.datetime
+ return _serialize_datetime(o, self.format)
+ except AttributeError:
+ pass
+ # Last, try datetime.timedelta
+ try:
+ return _timedelta_as_isostr(o)
+ except AttributeError:
+ # This will be raised when it hits value.total_seconds in the method above
+ pass
+ return super(SdkJSONEncoder, self).default(o)
+
+
+_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+_VALID_RFC7231 = re.compile(
+ r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s"
+ r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT"
+)
+
+_ARRAY_ENCODE_MAPPING = {
+ "pipeDelimited": "|",
+ "spaceDelimited": " ",
+ "commaDelimited": ",",
+ "newlineDelimited": "\n",
+}
+
+
+def _deserialize_array_encoded(delimit: str, attr):
+ if isinstance(attr, str):
+ if attr == "":
+ return []
+ return attr.split(delimit)
+ return attr
+
+
+def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime:
+ """Deserialize ISO-8601 formatted string into Datetime object.
+
+ :param str attr: response string to be deserialized.
+ :rtype: ~datetime.datetime
+ :returns: The datetime object from that input
+ """
+ if isinstance(attr, datetime):
+ # i'm already deserialized
+ return attr
+ attr = attr.upper()
+ match = _VALID_DATE.match(attr)
+ if not match:
+ raise ValueError("Invalid datetime string: " + attr)
+
+ check_decimal = attr.split(".")
+ if len(check_decimal) > 1:
+ decimal_str = ""
+ for digit in check_decimal[1]:
+ if digit.isdigit():
+ decimal_str += digit
+ else:
+ break
+ if len(decimal_str) > 6:
+ attr = attr.replace(decimal_str, decimal_str[0:6])
+
+ date_obj = isodate.parse_datetime(attr)
+ test_utc = date_obj.utctimetuple()
+ if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
+ raise OverflowError("Hit max or min date")
+ return date_obj # type: ignore[no-any-return]
+
+
+def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime:
+ """Deserialize RFC7231 formatted string into Datetime object.
+
+ :param str attr: response string to be deserialized.
+ :rtype: ~datetime.datetime
+ :returns: The datetime object from that input
+ """
+ if isinstance(attr, datetime):
+ # i'm already deserialized
+ return attr
+ match = _VALID_RFC7231.match(attr)
+ if not match:
+ raise ValueError("Invalid datetime string: " + attr)
+
+ return email.utils.parsedate_to_datetime(attr)
+
+
+def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime:
+ """Deserialize unix timestamp into Datetime object.
+
+ :param str attr: response string to be deserialized.
+ :rtype: ~datetime.datetime
+ :returns: The datetime object from that input
+ """
+ if isinstance(attr, datetime):
+ # i'm already deserialized
+ return attr
+ return datetime.fromtimestamp(attr, TZ_UTC)
+
+
+def _deserialize_date(attr: typing.Union[str, date]) -> date:
+ """Deserialize ISO-8601 formatted string into Date object.
+ :param str attr: response string to be deserialized.
+ :rtype: date
+ :returns: The date object from that input
+ """
+ # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
+ if isinstance(attr, date):
+ return attr
+ return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore
+
+
+def _deserialize_time(attr: typing.Union[str, time]) -> time:
+ """Deserialize ISO-8601 formatted string into time object.
+
+ :param str attr: response string to be deserialized.
+ :rtype: datetime.time
+ :returns: The time object from that input
+ """
+ if isinstance(attr, time):
+ return attr
+ return isodate.parse_time(attr) # type: ignore[no-any-return]
+
+
+def _deserialize_bytes(attr):
+ if isinstance(attr, (bytes, bytearray)):
+ return attr
+ return bytes(base64.b64decode(attr))
+
+
+def _deserialize_bytes_base64(attr):
+ if isinstance(attr, (bytes, bytearray)):
+ return attr
+ padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore
+ attr = attr + padding # type: ignore
+ encoded = attr.replace("-", "+").replace("_", "/")
+ return bytes(base64.b64decode(encoded))
+
+
+def _deserialize_duration(attr):
+ if isinstance(attr, timedelta):
+ return attr
+ return isodate.parse_duration(attr)
+
+
+def _deserialize_decimal(attr):
+ if isinstance(attr, decimal.Decimal):
+ return attr
+ return decimal.Decimal(str(attr))
+
+
+def _deserialize_int_as_str(attr):
+ if isinstance(attr, int):
+ return attr
+ return int(attr)
+
+
+_DESERIALIZE_MAPPING = {
+ datetime: _deserialize_datetime,
+ date: _deserialize_date,
+ time: _deserialize_time,
+ bytes: _deserialize_bytes,
+ bytearray: _deserialize_bytes,
+ timedelta: _deserialize_duration,
+ typing.Any: lambda x: x,
+ decimal.Decimal: _deserialize_decimal,
+}
+
+_DESERIALIZE_MAPPING_WITHFORMAT = {
+ "rfc3339": _deserialize_datetime,
+ "rfc7231": _deserialize_datetime_rfc7231,
+ "unix-timestamp": _deserialize_datetime_unix_timestamp,
+ "base64": _deserialize_bytes,
+ "base64url": _deserialize_bytes_base64,
+}
+
+
+def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None):
+ if annotation is int and rf and rf._format == "str":
+ return _deserialize_int_as_str
+ if annotation is str and rf and rf._format in _ARRAY_ENCODE_MAPPING:
+ return functools.partial(_deserialize_array_encoded, _ARRAY_ENCODE_MAPPING[rf._format])
+ if rf and rf._format:
+ return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format)
+ return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore
+
+
+def _get_type_alias_type(module_name: str, alias_name: str):
+ types = {
+ k: v
+ for k, v in sys.modules[module_name].__dict__.items()
+ if isinstance(v, typing._GenericAlias) # type: ignore
+ }
+ if alias_name not in types:
+ return alias_name
+ return types[alias_name]
+
+
+def _get_model(module_name: str, model_name: str):
+ models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)}
+ module_end = module_name.rsplit(".", 1)[0]
+ models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)})
+ if isinstance(model_name, str):
+ model_name = model_name.split(".")[-1]
+ if model_name not in models:
+ return model_name
+ return models[model_name]
+
+
+_UNSET = object()
+
+
+class _MyMutableMapping(MutableMapping[str, typing.Any]):
+ def __init__(self, data: dict[str, typing.Any]) -> None:
+ self._data = data
+
+ def __contains__(self, key: typing.Any) -> bool:
+ return key in self._data
+
+ def __getitem__(self, key: str) -> typing.Any:
+ # If this key has been deserialized (for mutable types), we need to handle serialization
+ if hasattr(self, "_attr_to_rest_field"):
+ cache_attr = f"_deserialized_{key}"
+ if hasattr(self, cache_attr):
+ rf = _get_rest_field(getattr(self, "_attr_to_rest_field"), key)
+ if rf:
+ value = self._data.get(key)
+ if isinstance(value, (dict, list, set)):
+ # For mutable types, serialize and return
+ # But also update _data with serialized form and clear flag
+ # so mutations via this returned value affect _data
+ serialized = _serialize(value, rf._format)
+ # If serialized form is same type (no transformation needed),
+ # return _data directly so mutations work
+ if isinstance(serialized, type(value)) and serialized == value:
+ return self._data.get(key)
+ # Otherwise return serialized copy and clear flag
+ try:
+ object.__delattr__(self, cache_attr)
+ except AttributeError:
+ pass
+ # Store serialized form back
+ self._data[key] = serialized
+ return serialized
+ return self._data.__getitem__(key)
+
+ def __setitem__(self, key: str, value: typing.Any) -> None:
+ # Clear any cached deserialized value when setting through dictionary access
+ cache_attr = f"_deserialized_{key}"
+ try:
+ object.__delattr__(self, cache_attr)
+ except AttributeError:
+ pass
+ self._data.__setitem__(key, value)
+
+ def __delitem__(self, key: str) -> None:
+ self._data.__delitem__(key)
+
+ def __iter__(self) -> typing.Iterator[typing.Any]:
+ return self._data.__iter__()
+
+ def __len__(self) -> int:
+ return self._data.__len__()
+
+ def __ne__(self, other: typing.Any) -> bool:
+ return not self.__eq__(other)
+
+ def keys(self) -> typing.KeysView[str]:
+ """
+ :returns: a set-like object providing a view on D's keys
+ :rtype: ~typing.KeysView
+ """
+ return self._data.keys()
+
+ def values(self) -> typing.ValuesView[typing.Any]:
+ """
+ :returns: an object providing a view on D's values
+ :rtype: ~typing.ValuesView
+ """
+ return self._data.values()
+
+ def items(self) -> typing.ItemsView[str, typing.Any]:
+ """
+ :returns: set-like object providing a view on D's items
+ :rtype: ~typing.ItemsView
+ """
+ return self._data.items()
+
+ def get(self, key: str, default: typing.Any = None) -> typing.Any:
+ """
+ Get the value for key if key is in the dictionary, else default.
+ :param str key: The key to look up.
+ :param any default: The value to return if key is not in the dictionary. Defaults to None
+ :returns: D[k] if k in D, else d.
+ :rtype: any
+ """
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ @typing.overload
+ def pop(self, key: str) -> typing.Any: ... # pylint: disable=arguments-differ
+
+ @typing.overload
+ def pop(self, key: str, default: _T) -> _T: ... # pylint: disable=signature-differs
+
+ @typing.overload
+ def pop(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs
+
+ def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any:
+ """
+ Removes specified key and return the corresponding value.
+ :param str key: The key to pop.
+ :param any default: The value to return if key is not in the dictionary
+ :returns: The value corresponding to the key.
+ :rtype: any
+ :raises KeyError: If key is not found and default is not given.
+ """
+ if default is _UNSET:
+ return self._data.pop(key)
+ return self._data.pop(key, default)
+
+ def popitem(self) -> tuple[str, typing.Any]:
+ """
+ Removes and returns some (key, value) pair
+ :returns: The (key, value) pair.
+ :rtype: tuple
+ :raises KeyError: if D is empty.
+ """
+ return self._data.popitem()
+
+ def clear(self) -> None:
+ """
+ Remove all items from D.
+ """
+ self._data.clear()
+
+ def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: # pylint: disable=arguments-differ
+ """
+ Updates D from mapping/iterable E and F.
+ :param any args: Either a mapping object or an iterable of key-value pairs.
+ """
+ self._data.update(*args, **kwargs)
+
+ @typing.overload
+ def setdefault(self, key: str, default: None = None) -> None: ...
+
+ @typing.overload
+ def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... # pylint: disable=signature-differs
+
+ def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any:
+ """
+ Same as calling D.get(k, d), and setting D[k]=d if k not found
+ :param str key: The key to look up.
+ :param any default: The value to set if key is not in the dictionary
+ :returns: D[k] if k in D, else d.
+ :rtype: any
+ """
+ if default is _UNSET:
+ return self._data.setdefault(key)
+ return self._data.setdefault(key, default)
+
+ def __eq__(self, other: typing.Any) -> bool:
+ if isinstance(other, _MyMutableMapping):
+ return self._data == other._data
+ try:
+ other_model = self.__class__(other)
+ except Exception:
+ return False
+ return self._data == other_model._data
+
+ def __repr__(self) -> str:
+ return str(self._data)
+
+
+def _is_model(obj: typing.Any) -> bool:
+ return getattr(obj, "_is_model", False)
+
+
+def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements
+ if isinstance(o, list):
+ if format in _ARRAY_ENCODE_MAPPING and all(isinstance(x, str) for x in o):
+ return _ARRAY_ENCODE_MAPPING[format].join(o)
+ return [_serialize(x, format) for x in o]
+ if isinstance(o, dict):
+ return {k: _serialize(v, format) for k, v in o.items()}
+ if isinstance(o, set):
+ return {_serialize(x, format) for x in o}
+ if isinstance(o, tuple):
+ return tuple(_serialize(x, format) for x in o)
+ if isinstance(o, (bytes, bytearray)):
+ return _serialize_bytes(o, format)
+ if isinstance(o, decimal.Decimal):
+ return float(o)
+ if isinstance(o, enum.Enum):
+ return o.value
+ if isinstance(o, int):
+ if format == "str":
+ return str(o)
+ return o
+ try:
+ # First try datetime.datetime
+ return _serialize_datetime(o, format)
+ except AttributeError:
+ pass
+ # Last, try datetime.timedelta
+ try:
+ return _timedelta_as_isostr(o)
+ except AttributeError:
+ # This will be raised when it hits value.total_seconds in the method above
+ pass
+ return o
+
+
+def _get_rest_field(attr_to_rest_field: dict[str, "_RestField"], rest_name: str) -> typing.Optional["_RestField"]:
+ try:
+ return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name)
+ except StopIteration:
+ return None
+
+
+def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any:
+ if not rf:
+ return _serialize(value, None)
+ if rf._is_multipart_file_input:
+ return value
+ if rf._is_model:
+ return _deserialize(rf._type, value)
+ if isinstance(value, ET.Element):
+ value = _deserialize(rf._type, value)
+ return _serialize(value, rf._format)
+
+
+class Model(_MyMutableMapping):
+ _is_model = True
+ # label whether current class's _attr_to_rest_field has been calculated
+ # could not see _attr_to_rest_field directly because subclass inherits it from parent class
+ _calculated: set[str] = set()
+
+ def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
+ class_name = self.__class__.__name__
+ if len(args) > 1:
+ raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given")
+ dict_to_pass = {
+ rest_field._rest_name: rest_field._default
+ for rest_field in self._attr_to_rest_field.values()
+ if rest_field._default is not _UNSET
+ }
+ if args: # pylint: disable=too-many-nested-blocks
+ if isinstance(args[0], ET.Element):
+ existed_attr_keys = []
+ model_meta = getattr(self, "_xml", {})
+
+ for rf in self._attr_to_rest_field.values():
+ prop_meta = getattr(rf, "_xml", {})
+ xml_name = prop_meta.get("name", rf._rest_name)
+ xml_ns = prop_meta.get("ns", model_meta.get("ns", None))
+ if xml_ns:
+ xml_name = "{" + xml_ns + "}" + xml_name
+
+ # attribute
+ if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None:
+ existed_attr_keys.append(xml_name)
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name))
+ continue
+
+ # unwrapped element is array
+ if prop_meta.get("unwrapped", False):
+ # unwrapped array could either use prop items meta/prop meta
+ if prop_meta.get("itemsName"):
+ xml_name = prop_meta.get("itemsName")
+ xml_ns = prop_meta.get("itemNs")
+ if xml_ns:
+ xml_name = "{" + xml_ns + "}" + xml_name
+ items = args[0].findall(xml_name) # pyright: ignore
+ if len(items) > 0:
+ existed_attr_keys.append(xml_name)
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, items)
+ elif not rf._is_optional:
+ existed_attr_keys.append(xml_name)
+ dict_to_pass[rf._rest_name] = []
+ continue
+
+ # text element is primitive type
+ if prop_meta.get("text", False):
+ if args[0].text is not None:
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text)
+ continue
+
+ # wrapped element could be normal property or array, it should only have one element
+ item = args[0].find(xml_name)
+ if item is not None:
+ existed_attr_keys.append(xml_name)
+ dict_to_pass[rf._rest_name] = _deserialize(rf._type, item)
+
+ # rest thing is additional properties
+ for e in args[0]:
+ if e.tag not in existed_attr_keys:
+ dict_to_pass[e.tag] = _convert_element(e)
+ else:
+ dict_to_pass.update(
+ {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()}
+ )
+ else:
+ non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field]
+ if non_attr_kwargs:
+ # actual type errors only throw the first wrong keyword arg they see, so following that.
+ raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'")
+ dict_to_pass.update(
+ {
+ self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v)
+ for k, v in kwargs.items()
+ if v is not None
+ }
+ )
+ super().__init__(dict_to_pass)
+
+ def copy(self) -> "Model":
+ return Model(self.__dict__)
+
+ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self:
+ if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated:
+ # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping',
+ # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object'
+ mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order
+ attr_to_rest_field: dict[str, _RestField] = { # map attribute name to rest_field property
+ k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type")
+ }
+ annotations = {
+ k: v
+ for mro_class in mros
+ if hasattr(mro_class, "__annotations__")
+ for k, v in mro_class.__annotations__.items()
+ }
+ for attr, rf in attr_to_rest_field.items():
+ rf._module = cls.__module__
+ if not rf._type:
+ rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None))
+ if not rf._rest_name_input:
+ rf._rest_name_input = attr
+ cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items())
+ cls._backcompat_attr_to_rest_field: dict[str, _RestField] = {
+ Model._get_backcompat_attribute_name(cls._attr_to_rest_field, attr): rf
+ for attr, rf in cls._attr_to_rest_field.items()
+ }
+ cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}")
+
+ return super().__new__(cls)
+
+ def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None:
+ for base in cls.__bases__:
+ if hasattr(base, "__mapping__"):
+ base.__mapping__[discriminator or cls.__name__] = cls # type: ignore
+
+ @classmethod
+ def _get_backcompat_attribute_name(cls, attr_to_rest_field: dict[str, "_RestField"], attr_name: str) -> str:
+ rest_field_obj = attr_to_rest_field.get(attr_name) # pylint: disable=protected-access
+ if rest_field_obj is None:
+ return attr_name
+ original_tsp_name = getattr(rest_field_obj, "_original_tsp_name", None) # pylint: disable=protected-access
+ if original_tsp_name:
+ return original_tsp_name
+ return attr_name
+
+ @classmethod
+ def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]:
+ for v in cls.__dict__.values():
+ if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators:
+ return v
+ return None
+
+ @classmethod
+ def _deserialize(cls, data, exist_discriminators):
+ if not hasattr(cls, "__mapping__"):
+ return cls(data)
+ discriminator = cls._get_discriminator(exist_discriminators)
+ if discriminator is None:
+ return cls(data)
+ exist_discriminators.append(discriminator._rest_name)
+ if isinstance(data, ET.Element):
+ model_meta = getattr(cls, "_xml", {})
+ prop_meta = getattr(discriminator, "_xml", {})
+ xml_name = prop_meta.get("name", discriminator._rest_name)
+ xml_ns = prop_meta.get("ns", model_meta.get("ns", None))
+ if xml_ns:
+ xml_name = "{" + xml_ns + "}" + xml_name
+
+ if data.get(xml_name) is not None:
+ discriminator_value = data.get(xml_name)
+ else:
+ discriminator_value = data.find(xml_name).text # pyright: ignore
+ else:
+ discriminator_value = data.get(discriminator._rest_name)
+ mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member
+ return mapped_cls._deserialize(data, exist_discriminators)
+
+ def as_dict(self, *, exclude_readonly: bool = False) -> dict[str, typing.Any]:
+ """Return a dict that can be turned into json using json.dump.
+
+ :keyword bool exclude_readonly: Whether to remove the readonly properties.
+ :returns: A dict JSON compatible object
+ :rtype: dict
+ """
+
+ result = {}
+ readonly_props = []
+ if exclude_readonly:
+ readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)]
+ for k, v in self.items():
+ if exclude_readonly and k in readonly_props: # pyright: ignore
+ continue
+ is_multipart_file_input = False
+ try:
+ is_multipart_file_input = next(
+ rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k
+ )._is_multipart_file_input
+ except StopIteration:
+ pass
+ result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly)
+ return result
+
+ @staticmethod
+ def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any:
+ if v is None or isinstance(v, _Null):
+ return None
+ if isinstance(v, (list, tuple, set)):
+ return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v)
+ if isinstance(v, dict):
+ return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()}
+ return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v
+
+
+def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj):
+ if _is_model(obj):
+ return obj
+ return _deserialize(model_deserializer, obj)
+
+
+def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj):
+ if obj is None:
+ return obj
+ return _deserialize_with_callable(if_obj_deserializer, obj)
+
+
+def _deserialize_with_union(deserializers, obj):
+ for deserializer in deserializers:
+ try:
+ return _deserialize(deserializer, obj)
+ except DeserializationError:
+ pass
+ raise DeserializationError()
+
+
+def _deserialize_dict(
+ value_deserializer: typing.Optional[typing.Callable],
+ module: typing.Optional[str],
+ obj: dict[typing.Any, typing.Any],
+):
+ if obj is None:
+ return obj
+ if isinstance(obj, ET.Element):
+ obj = {child.tag: child for child in obj}
+ return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()}
+
+
+def _deserialize_multiple_sequence(
+ entry_deserializers: list[typing.Optional[typing.Callable]],
+ module: typing.Optional[str],
+ obj,
+):
+ if obj is None:
+ return obj
+ return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers))
+
+
+def _is_array_encoded_deserializer(deserializer: functools.partial) -> bool:
+ return (
+ isinstance(deserializer, functools.partial)
+ and isinstance(deserializer.args[0], functools.partial)
+ and deserializer.args[0].func == _deserialize_array_encoded # pylint: disable=comparison-with-callable
+ )
+
+
+def _deserialize_sequence(
+ deserializer: typing.Optional[typing.Callable],
+ module: typing.Optional[str],
+ obj,
+):
+ if obj is None:
+ return obj
+ if isinstance(obj, ET.Element):
+ obj = list(obj)
+
+ # encoded string may be deserialized to sequence
+ if isinstance(obj, str) and isinstance(deserializer, functools.partial):
+ # for list[str]
+ if _is_array_encoded_deserializer(deserializer):
+ return deserializer(obj)
+
+ # for list[Union[...]]
+ if isinstance(deserializer.args[0], list):
+ for sub_deserializer in deserializer.args[0]:
+ if _is_array_encoded_deserializer(sub_deserializer):
+ return sub_deserializer(obj)
+
+ if isinstance(obj, str):
+ raise DeserializationError()
+ return type(obj)(_deserialize(deserializer, entry, module) for entry in obj)
+
+
+def _sorted_annotations(types: list[typing.Any]) -> list[typing.Any]:
+ return sorted(
+ types,
+ key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"),
+ )
+
+
+def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-statements, too-many-branches
+ annotation: typing.Any,
+ module: typing.Optional[str],
+ rf: typing.Optional["_RestField"] = None,
+) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]:
+ if not annotation:
+ return None
+
+ # is it a type alias?
+ if isinstance(annotation, str):
+ if module is not None:
+ annotation = _get_type_alias_type(module, annotation)
+
+ # is it a forward ref / in quotes?
+ if isinstance(annotation, (str, typing.ForwardRef)):
+ try:
+ model_name = annotation.__forward_arg__ # type: ignore
+ except AttributeError:
+ model_name = annotation
+ if module is not None:
+ annotation = _get_model(module, model_name) # type: ignore
+
+ try:
+ if module and _is_model(annotation):
+ if rf:
+ rf._is_model = True
+
+ return functools.partial(_deserialize_model, annotation) # pyright: ignore
+ except Exception:
+ pass
+
+ # is it a literal?
+ try:
+ if annotation.__origin__ is typing.Literal: # pyright: ignore
+ return None
+ except AttributeError:
+ pass
+
+ # is it optional?
+ try:
+ if any(a is _NONE_TYPE for a in annotation.__args__): # pyright: ignore
+ if rf:
+ rf._is_optional = True
+ if len(annotation.__args__) <= 2: # pyright: ignore
+ if_obj_deserializer = _get_deserialize_callable_from_annotation(
+ next(a for a in annotation.__args__ if a is not _NONE_TYPE), module, rf # pyright: ignore
+ )
+
+ return functools.partial(_deserialize_with_optional, if_obj_deserializer)
+ # the type is Optional[Union[...]], we need to remove the None type from the Union
+ annotation_copy = copy.copy(annotation)
+ annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a is not _NONE_TYPE] # pyright: ignore
+ return _get_deserialize_callable_from_annotation(annotation_copy, module, rf)
+ except AttributeError:
+ pass
+
+ # is it union?
+ if getattr(annotation, "__origin__", None) is typing.Union:
+ # initial ordering is we make `string` the last deserialization option, because it is often them most generic
+ deserializers = [
+ _get_deserialize_callable_from_annotation(arg, module, rf)
+ for arg in _sorted_annotations(annotation.__args__) # pyright: ignore
+ ]
+
+ return functools.partial(_deserialize_with_union, deserializers)
+
+ try:
+ annotation_name = (
+ annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore
+ )
+ if annotation_name.lower() == "dict":
+ value_deserializer = _get_deserialize_callable_from_annotation(
+ annotation.__args__[1], module, rf # pyright: ignore
+ )
+
+ return functools.partial(
+ _deserialize_dict,
+ value_deserializer,
+ module,
+ )
+ except (AttributeError, IndexError):
+ pass
+ try:
+ annotation_name = (
+ annotation.__name__ if hasattr(annotation, "__name__") else annotation._name # pyright: ignore
+ )
+ if annotation_name.lower() in ["list", "set", "tuple", "sequence"]:
+ if len(annotation.__args__) > 1: # pyright: ignore
+ entry_deserializers = [
+ _get_deserialize_callable_from_annotation(dt, module, rf)
+ for dt in annotation.__args__ # pyright: ignore
+ ]
+ return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module)
+ deserializer = _get_deserialize_callable_from_annotation(
+ annotation.__args__[0], module, rf # pyright: ignore
+ )
+
+ return functools.partial(_deserialize_sequence, deserializer, module)
+ except (TypeError, IndexError, AttributeError, SyntaxError):
+ pass
+
+ def _deserialize_default(
+ deserializer,
+ obj,
+ ):
+ if obj is None:
+ return obj
+ try:
+ return _deserialize_with_callable(deserializer, obj)
+ except Exception:
+ pass
+ return obj
+
+ if get_deserializer(annotation, rf):
+ return functools.partial(_deserialize_default, get_deserializer(annotation, rf))
+
+ return functools.partial(_deserialize_default, annotation)
+
+
+def _deserialize_with_callable(
+ deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]],
+ value: typing.Any,
+): # pylint: disable=too-many-return-statements
+ try:
+ if value is None or isinstance(value, _Null):
+ return None
+ if isinstance(value, ET.Element):
+ if deserializer is str:
+ return value.text or ""
+ if deserializer is int:
+ return int(value.text) if value.text else None
+ if deserializer is float:
+ return float(value.text) if value.text else None
+ if deserializer is bool:
+ return value.text == "true" if value.text else None
+ if deserializer and deserializer in _DESERIALIZE_MAPPING.values():
+ return deserializer(value.text) if value.text else None
+ if deserializer and deserializer in _DESERIALIZE_MAPPING_WITHFORMAT.values():
+ return deserializer(value.text) if value.text else None
+ if deserializer is None:
+ return value
+ if deserializer in [int, float, bool]:
+ return deserializer(value)
+ if isinstance(deserializer, CaseInsensitiveEnumMeta):
+ try:
+ return deserializer(value.text if isinstance(value, ET.Element) else value)
+ except ValueError:
+ # for unknown value, return raw value
+ return value.text if isinstance(value, ET.Element) else value
+ if isinstance(deserializer, type) and issubclass(deserializer, Model):
+ return deserializer._deserialize(value, [])
+ return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value)
+ except Exception as e:
+ raise DeserializationError() from e
+
+
+def _deserialize(
+ deserializer: typing.Any,
+ value: typing.Any,
+ module: typing.Optional[str] = None,
+ rf: typing.Optional["_RestField"] = None,
+ format: typing.Optional[str] = None,
+) -> typing.Any:
+ if isinstance(value, PipelineResponse):
+ value = value.http_response.json()
+ if rf is None and format:
+ rf = _RestField(format=format)
+ if not isinstance(deserializer, functools.partial):
+ deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf)
+ return _deserialize_with_callable(deserializer, value)
+
+
+def _failsafe_deserialize(
+ deserializer: typing.Any,
+ response: HttpResponse,
+ module: typing.Optional[str] = None,
+ rf: typing.Optional["_RestField"] = None,
+ format: typing.Optional[str] = None,
+) -> typing.Any:
+ try:
+ return _deserialize(deserializer, response.json(), module, rf, format)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.warning(
+ "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+ )
+ return None
+
+
+def _failsafe_deserialize_xml(
+ deserializer: typing.Any,
+ response: HttpResponse,
+) -> typing.Any:
+ try:
+ return _deserialize_xml(deserializer, response.text())
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.warning(
+ "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+ )
+ return None
+
+
+# pylint: disable=too-many-instance-attributes
+class _RestField:
+ def __init__(
+ self,
+ *,
+ name: typing.Optional[str] = None,
+ type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin
+ is_discriminator: bool = False,
+ visibility: typing.Optional[list[str]] = None,
+ default: typing.Any = _UNSET,
+ format: typing.Optional[str] = None,
+ is_multipart_file_input: bool = False,
+ xml: typing.Optional[dict[str, typing.Any]] = None,
+ original_tsp_name: typing.Optional[str] = None,
+ ):
+ self._type = type
+ self._rest_name_input = name
+ self._module: typing.Optional[str] = None
+ self._is_discriminator = is_discriminator
+ self._visibility = visibility
+ self._is_model = False
+ self._is_optional = False
+ self._default = default
+ self._format = format
+ self._is_multipart_file_input = is_multipart_file_input
+ self._xml = xml if xml is not None else {}
+ self._original_tsp_name = original_tsp_name
+
+ @property
+ def _class_type(self) -> typing.Any:
+ result = getattr(self._type, "args", [None])[0]
+ # type may be wrapped by nested functools.partial so we need to check for that
+ if isinstance(result, functools.partial):
+ return getattr(result, "args", [None])[0]
+ return result
+
+ @property
+ def _rest_name(self) -> str:
+ if self._rest_name_input is None:
+ raise ValueError("Rest name was never set")
+ return self._rest_name_input
+
+ def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin
+ # by this point, type and rest_name will have a value bc we default
+ # them in __new__ of the Model class
+ # Use _data.get() directly to avoid triggering __getitem__ which clears the cache
+ item = obj._data.get(self._rest_name)
+ if item is None:
+ return item
+ if self._is_model:
+ return item
+
+ # For mutable types, we want mutations to directly affect _data
+ # Check if we've already deserialized this value
+ cache_attr = f"_deserialized_{self._rest_name}"
+ if hasattr(obj, cache_attr):
+ # Return the value from _data directly (it's been deserialized in place)
+ return obj._data.get(self._rest_name)
+
+ deserialized = _deserialize(self._type, _serialize(item, self._format), rf=self)
+
+ # For mutable types, store the deserialized value back in _data
+ # so mutations directly affect _data
+ if isinstance(deserialized, (dict, list, set)):
+ obj._data[self._rest_name] = deserialized
+ object.__setattr__(obj, cache_attr, True) # Mark as deserialized
+ return deserialized
+
+ return deserialized
+
+ def __set__(self, obj: Model, value) -> None:
+ # Clear the cached deserialized object when setting a new value
+ cache_attr = f"_deserialized_{self._rest_name}"
+ if hasattr(obj, cache_attr):
+ object.__delattr__(obj, cache_attr)
+
+ if value is None:
+ # we want to wipe out entries if users set attr to None
+ try:
+ obj.__delitem__(self._rest_name)
+ except KeyError:
+ pass
+ return
+ if self._is_model:
+ if not _is_model(value):
+ value = _deserialize(self._type, value)
+ obj.__setitem__(self._rest_name, value)
+ return
+ obj.__setitem__(self._rest_name, _serialize(value, self._format))
+
+ def _get_deserialize_callable_from_annotation(
+ self, annotation: typing.Any
+ ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]:
+ return _get_deserialize_callable_from_annotation(annotation, self._module, self)
+
+
+def rest_field(
+ *,
+ name: typing.Optional[str] = None,
+ type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin
+ visibility: typing.Optional[list[str]] = None,
+ default: typing.Any = _UNSET,
+ format: typing.Optional[str] = None,
+ is_multipart_file_input: bool = False,
+ xml: typing.Optional[dict[str, typing.Any]] = None,
+ original_tsp_name: typing.Optional[str] = None,
+) -> typing.Any:
+ return _RestField(
+ name=name,
+ type=type,
+ visibility=visibility,
+ default=default,
+ format=format,
+ is_multipart_file_input=is_multipart_file_input,
+ xml=xml,
+ original_tsp_name=original_tsp_name,
+ )
+
+
+def rest_discriminator(
+ *,
+ name: typing.Optional[str] = None,
+ type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin
+ visibility: typing.Optional[list[str]] = None,
+ xml: typing.Optional[dict[str, typing.Any]] = None,
+) -> typing.Any:
+ return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml)
+
+
+def serialize_xml(model: Model, exclude_readonly: bool = False) -> str:
+ """Serialize a model to XML.
+
+ :param Model model: The model to serialize.
+ :param bool exclude_readonly: Whether to exclude readonly properties.
+ :returns: The XML representation of the model.
+ :rtype: str
+ """
+ return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore
+
+
+def _get_element(
+ o: typing.Any,
+ exclude_readonly: bool = False,
+ parent_meta: typing.Optional[dict[str, typing.Any]] = None,
+ wrapped_element: typing.Optional[ET.Element] = None,
+) -> typing.Union[ET.Element, list[ET.Element]]:
+ if _is_model(o):
+ model_meta = getattr(o, "_xml", {})
+
+ # if prop is a model, then use the prop element directly, else generate a wrapper of model
+ if wrapped_element is None:
+ wrapped_element = _create_xml_element(
+ model_meta.get("name", o.__class__.__name__),
+ model_meta.get("prefix"),
+ model_meta.get("ns"),
+ )
+
+ readonly_props = []
+ if exclude_readonly:
+ readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)]
+
+ for k, v in o.items():
+ # do not serialize readonly properties
+ if exclude_readonly and k in readonly_props:
+ continue
+
+ prop_rest_field = _get_rest_field(o._attr_to_rest_field, k)
+ if prop_rest_field:
+ prop_meta = getattr(prop_rest_field, "_xml").copy()
+ # use the wire name as xml name if no specific name is set
+ if prop_meta.get("name") is None:
+ prop_meta["name"] = k
+ else:
+ # additional properties will not have rest field, use the wire name as xml name
+ prop_meta = {"name": k}
+
+ # if no ns for prop, use model's
+ if prop_meta.get("ns") is None and model_meta.get("ns"):
+ prop_meta["ns"] = model_meta.get("ns")
+ prop_meta["prefix"] = model_meta.get("prefix")
+
+ if prop_meta.get("unwrapped", False):
+ # unwrapped could only set on array
+ wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta))
+ elif prop_meta.get("text", False):
+ # text could only set on primitive type
+ wrapped_element.text = _get_primitive_type_value(v)
+ elif prop_meta.get("attribute", False):
+ xml_name = prop_meta.get("name", k)
+ if prop_meta.get("ns"):
+ ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore
+ xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore
+ # attribute should be primitive type
+ wrapped_element.set(xml_name, _get_primitive_type_value(v))
+ else:
+ # other wrapped prop element
+ wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta))
+ return wrapped_element
+ if isinstance(o, list):
+ return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore
+ if isinstance(o, dict):
+ result = []
+ for k, v in o.items():
+ result.append(
+ _get_wrapped_element(
+ v,
+ exclude_readonly,
+ {
+ "name": k,
+ "ns": parent_meta.get("ns") if parent_meta else None,
+ "prefix": parent_meta.get("prefix") if parent_meta else None,
+ },
+ )
+ )
+ return result
+
+ # primitive case need to create element based on parent_meta
+ if parent_meta:
+ return _get_wrapped_element(
+ o,
+ exclude_readonly,
+ {
+ "name": parent_meta.get("itemsName", parent_meta.get("name")),
+ "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")),
+ "ns": parent_meta.get("itemsNs", parent_meta.get("ns")),
+ },
+ )
+
+ raise ValueError("Could not serialize value into xml: " + o)
+
+
+def _get_wrapped_element(
+ v: typing.Any,
+ exclude_readonly: bool,
+ meta: typing.Optional[dict[str, typing.Any]],
+) -> ET.Element:
+ wrapped_element = _create_xml_element(
+ meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None
+ )
+ if isinstance(v, (dict, list)):
+ wrapped_element.extend(_get_element(v, exclude_readonly, meta))
+ elif _is_model(v):
+ _get_element(v, exclude_readonly, meta, wrapped_element)
+ else:
+ wrapped_element.text = _get_primitive_type_value(v)
+ return wrapped_element # type: ignore[no-any-return]
+
+
+def _get_primitive_type_value(v) -> str:
+ if v is True:
+ return "true"
+ if v is False:
+ return "false"
+ if isinstance(v, _Null):
+ return ""
+ return str(v)
+
+
+def _create_xml_element(
+ tag: typing.Any, prefix: typing.Optional[str] = None, ns: typing.Optional[str] = None
+) -> ET.Element:
+ if prefix and ns:
+ ET.register_namespace(prefix, ns)
+ if ns:
+ return ET.Element("{" + ns + "}" + tag)
+ return ET.Element(tag)
+
+
+def _deserialize_xml(
+ deserializer: typing.Any,
+ value: str,
+) -> typing.Any:
+ element = ET.fromstring(value) # nosec
+ return _deserialize(deserializer, element)
+
+
+def _convert_element(e: ET.Element):
+ # dict case
+ if len(e.attrib) > 0 or len({child.tag for child in e}) > 1:
+ dict_result: dict[str, typing.Any] = {}
+ for child in e:
+ if dict_result.get(child.tag) is not None:
+ if isinstance(dict_result[child.tag], list):
+ dict_result[child.tag].append(_convert_element(child))
+ else:
+ dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)]
+ else:
+ dict_result[child.tag] = _convert_element(child)
+ dict_result.update(e.attrib)
+ return dict_result
+ # array case
+ if len(e) > 0:
+ array_result: list[typing.Any] = []
+ for child in e:
+ array_result.append(_convert_element(child))
+ return array_result
+ # primitive case
+ return e.text
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/serialization.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/serialization.py
new file mode 100644
index 000000000000..81ec1de5922b
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/_utils/serialization.py
@@ -0,0 +1,2041 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+# pyright: reportUnnecessaryTypeIgnoreComment=false
+
+from base64 import b64decode, b64encode
+import calendar
+import datetime
+import decimal
+import email
+from enum import Enum
+import json
+import logging
+import re
+import sys
+import codecs
+from typing import (
+ Any,
+ cast,
+ Optional,
+ Union,
+ AnyStr,
+ IO,
+ Mapping,
+ Callable,
+ MutableMapping,
+)
+
+try:
+ from urllib import quote # type: ignore
+except ImportError:
+ from urllib.parse import quote
+import xml.etree.ElementTree as ET
+
+import isodate # type: ignore
+from typing_extensions import Self
+
+from azure.core.exceptions import DeserializationError, SerializationError
+from azure.core.serialization import NULL as CoreNull
+
+_BOM = codecs.BOM_UTF8.decode(encoding="utf-8")
+
+JSON = MutableMapping[str, Any]
+
+
+class RawDeserializer:
+
+ # Accept "text" because we're open minded people...
+ JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$")
+
+ # Name used in context
+ CONTEXT_NAME = "deserialized_data"
+
+ @classmethod
+ def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any:
+ """Decode data according to content-type.
+
+ Accept a stream of data as well, but will be load at once in memory for now.
+
+ If no content-type, will return the string version (not bytes, not stream)
+
+ :param data: Input, could be bytes or stream (will be decoded with UTF8) or text
+ :type data: str or bytes or IO
+ :param str content_type: The content type.
+ :return: The deserialized data.
+ :rtype: object
+ """
+ if hasattr(data, "read"):
+ # Assume a stream
+ data = cast(IO, data).read()
+
+ if isinstance(data, bytes):
+ data_as_str = data.decode(encoding="utf-8-sig")
+ else:
+ # Explain to mypy the correct type.
+ data_as_str = cast(str, data)
+
+ # Remove Byte Order Mark if present in string
+ data_as_str = data_as_str.lstrip(_BOM)
+
+ if content_type is None:
+ return data
+
+ if cls.JSON_REGEXP.match(content_type):
+ try:
+ return json.loads(data_as_str)
+ except ValueError as err:
+ raise DeserializationError("JSON is invalid: {}".format(err), err) from err
+ elif "xml" in (content_type or []):
+ try:
+
+ try:
+ if isinstance(data, unicode): # type: ignore
+ # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string
+ data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore
+ except NameError:
+ pass
+
+ return ET.fromstring(data_as_str) # nosec
+ except ET.ParseError as err:
+ # It might be because the server has an issue, and returned JSON with
+ # content-type XML....
+ # So let's try a JSON load, and if it's still broken
+ # let's flow the initial exception
+ def _json_attemp(data):
+ try:
+ return True, json.loads(data)
+ except ValueError:
+ return False, None # Don't care about this one
+
+ success, json_result = _json_attemp(data)
+ if success:
+ return json_result
+ # If i'm here, it's not JSON, it's not XML, let's scream
+ # and raise the last context in this block (the XML exception)
+ # The function hack is because Py2.7 messes up with exception
+ # context otherwise.
+ _LOGGER.critical("Wasn't XML not JSON, failing")
+ raise DeserializationError("XML is invalid") from err
+ elif content_type.startswith("text/"):
+ return data_as_str
+ raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
+
+ @classmethod
+ def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any:
+ """Deserialize from HTTP response.
+
+ Use bytes and headers to NOT use any requests/aiohttp or whatever
+ specific implementation.
+ Headers will tested for "content-type"
+
+ :param bytes body_bytes: The body of the response.
+ :param dict headers: The headers of the response.
+ :returns: The deserialized data.
+ :rtype: object
+ """
+ # Try to use content-type from headers if available
+ content_type = None
+ if "content-type" in headers:
+ content_type = headers["content-type"].split(";")[0].strip().lower()
+ # Ouch, this server did not declare what it sent...
+ # Let's guess it's JSON...
+ # Also, since Autorest was considering that an empty body was a valid JSON,
+ # need that test as well....
+ else:
+ content_type = "application/json"
+
+ if body_bytes:
+ return cls.deserialize_from_text(body_bytes, content_type)
+ return None
+
+
+_LOGGER = logging.getLogger(__name__)
+
+try:
+ _long_type = long # type: ignore
+except NameError:
+ _long_type = int
+
+TZ_UTC = datetime.timezone.utc
+
+_FLATTEN = re.compile(r"(? None:
+ self.additional_properties: Optional[dict[str, Any]] = {}
+ for k in kwargs: # pylint: disable=consider-using-dict-items
+ if k not in self._attribute_map:
+ _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__)
+ elif k in self._validation and self._validation[k].get("readonly", False):
+ _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__)
+ else:
+ setattr(self, k, kwargs[k])
+
+ def __eq__(self, other: Any) -> bool:
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are equal
+ :rtype: bool
+ """
+ if isinstance(other, self.__class__):
+ return self.__dict__ == other.__dict__
+ return False
+
+ def __ne__(self, other: Any) -> bool:
+ """Compare objects by comparing all attributes.
+
+ :param object other: The object to compare
+ :returns: True if objects are not equal
+ :rtype: bool
+ """
+ return not self.__eq__(other)
+
+ def __str__(self) -> str:
+ return str(self.__dict__)
+
+ @classmethod
+ def enable_additional_properties_sending(cls) -> None:
+ cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"}
+
+ @classmethod
+ def is_xml_model(cls) -> bool:
+ try:
+ cls._xml_map # type: ignore
+ except AttributeError:
+ return False
+ return True
+
+ @classmethod
+ def _create_xml_node(cls):
+ """Create XML node.
+
+ :returns: The XML node
+ :rtype: xml.etree.ElementTree.Element
+ """
+ try:
+ xml_map = cls._xml_map # type: ignore
+ except AttributeError:
+ xml_map = {}
+
+ return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None))
+
+ def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON:
+ """Return the JSON that would be sent to server from this model.
+
+ This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`.
+
+ If you want XML serialization, you can pass the kwargs is_xml=True.
+
+ :param bool keep_readonly: If you want to serialize the readonly attributes
+ :returns: A dict JSON compatible object
+ :rtype: dict
+ """
+ serializer = Serializer(self._infer_class_models())
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, keep_readonly=keep_readonly, **kwargs
+ )
+
+ def as_dict(
+ self,
+ keep_readonly: bool = True,
+ key_transformer: Callable[[str, dict[str, Any], Any], Any] = attribute_transformer,
+ **kwargs: Any
+ ) -> JSON:
+ """Return a dict that can be serialized using json.dump.
+
+ Advanced usage might optionally use a callback as parameter:
+
+ .. code::python
+
+ def my_key_transformer(key, attr_desc, value):
+ return key
+
+ Key is the attribute name used in Python. Attr_desc
+ is a dict of metadata. Currently contains 'type' with the
+ msrest type and 'key' with the RestAPI encoded key.
+ Value is the current value in this object.
+
+ The string returned will be used to serialize the key.
+ If the return type is a list, this is considered hierarchical
+ result dict.
+
+ See the three examples in this file:
+
+ - attribute_transformer
+ - full_restapi_key_transformer
+ - last_restapi_key_transformer
+
+ If you want XML serialization, you can pass the kwargs is_xml=True.
+
+ :param bool keep_readonly: If you want to serialize the readonly attributes
+ :param function key_transformer: A key transformer function.
+ :returns: A dict JSON compatible object
+ :rtype: dict
+ """
+ serializer = Serializer(self._infer_class_models())
+ return serializer._serialize( # type: ignore # pylint: disable=protected-access
+ self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs
+ )
+
+ @classmethod
+ def _infer_class_models(cls):
+ try:
+ str_models = cls.__module__.rsplit(".", 1)[0]
+ models = sys.modules[str_models]
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ if cls.__name__ not in client_models:
+ raise ValueError("Not Autorest generated code")
+ except Exception: # pylint: disable=broad-exception-caught
+ # Assume it's not Autorest generated (tests?). Add ourselves as dependencies.
+ client_models = {cls.__name__: cls}
+ return client_models
+
+ @classmethod
+ def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Self:
+ """Parse a str using the RestAPI syntax and return a model.
+
+ :param str data: A str using RestAPI structure. JSON by default.
+ :param str content_type: JSON by default, set application/xml if XML.
+ :returns: An instance of this model
+ :raises DeserializationError: if something went wrong
+ :rtype: Self
+ """
+ deserializer = Deserializer(cls._infer_class_models())
+ return deserializer(cls.__name__, data, content_type=content_type) # type: ignore
+
+ @classmethod
+ def from_dict(
+ cls,
+ data: Any,
+ key_extractors: Optional[Callable[[str, dict[str, Any], Any], Any]] = None,
+ content_type: Optional[str] = None,
+ ) -> Self:
+ """Parse a dict using given key extractor return a model.
+
+ By default consider key
+ extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
+ and last_rest_key_case_insensitive_extractor)
+
+ :param dict data: A dict using RestAPI structure
+ :param function key_extractors: A key extractor function.
+ :param str content_type: JSON by default, set application/xml if XML.
+ :returns: An instance of this model
+ :raises DeserializationError: if something went wrong
+ :rtype: Self
+ """
+ deserializer = Deserializer(cls._infer_class_models())
+ deserializer.key_extractors = ( # type: ignore
+ [ # type: ignore
+ attribute_key_case_insensitive_extractor,
+ rest_key_case_insensitive_extractor,
+ last_rest_key_case_insensitive_extractor,
+ ]
+ if key_extractors is None
+ else key_extractors
+ )
+ return deserializer(cls.__name__, data, content_type=content_type) # type: ignore
+
+ @classmethod
+ def _flatten_subtype(cls, key, objects):
+ if "_subtype_map" not in cls.__dict__:
+ return {}
+ result = dict(cls._subtype_map[key])
+ for valuetype in cls._subtype_map[key].values():
+ result |= objects[valuetype]._flatten_subtype(key, objects) # pylint: disable=protected-access
+ return result
+
+ @classmethod
+ def _classify(cls, response, objects):
+ """Check the class _subtype_map for any child classes.
+ We want to ignore any inherited _subtype_maps.
+
+ :param dict response: The initial data
+ :param dict objects: The class objects
+ :returns: The class to be used
+ :rtype: class
+ """
+ for subtype_key in cls.__dict__.get("_subtype_map", {}).keys():
+ subtype_value = None
+
+ if not isinstance(response, ET.Element):
+ rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1]
+ subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None)
+ else:
+ subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response)
+ if subtype_value:
+ # Try to match base class. Can be class name only
+ # (bug to fix in Autorest to support x-ms-discriminator-name)
+ if cls.__name__ == subtype_value:
+ return cls
+ flatten_mapping_type = cls._flatten_subtype(subtype_key, objects)
+ try:
+ return objects[flatten_mapping_type[subtype_value]] # type: ignore
+ except KeyError:
+ _LOGGER.warning(
+ "Subtype value %s has no mapping, use base class %s.",
+ subtype_value,
+ cls.__name__,
+ )
+ break
+ else:
+ _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__)
+ break
+ return cls
+
+ @classmethod
+ def _get_rest_key_parts(cls, attr_key):
+ """Get the RestAPI key of this attr, split it and decode part
+ :param str attr_key: Attribute key must be in attribute_map.
+ :returns: A list of RestAPI part
+ :rtype: list
+ """
+ rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"])
+ return [_decode_attribute_map_key(key_part) for key_part in rest_split_key]
+
+
+def _decode_attribute_map_key(key):
+ """This decode a key in an _attribute_map to the actual key we want to look at
+ inside the received data.
+
+ :param str key: A key string from the generated code
+ :returns: The decoded key
+ :rtype: str
+ """
+ return key.replace("\\.", ".")
+
+
+class Serializer: # pylint: disable=too-many-public-methods
+ """Request object model serializer."""
+
+ basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+ _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()}
+ days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"}
+ months = {
+ 1: "Jan",
+ 2: "Feb",
+ 3: "Mar",
+ 4: "Apr",
+ 5: "May",
+ 6: "Jun",
+ 7: "Jul",
+ 8: "Aug",
+ 9: "Sep",
+ 10: "Oct",
+ 11: "Nov",
+ 12: "Dec",
+ }
+ validation = {
+ "min_length": lambda x, y: len(x) < y,
+ "max_length": lambda x, y: len(x) > y,
+ "minimum": lambda x, y: x < y,
+ "maximum": lambda x, y: x > y,
+ "minimum_ex": lambda x, y: x <= y,
+ "maximum_ex": lambda x, y: x >= y,
+ "min_items": lambda x, y: len(x) < y,
+ "max_items": lambda x, y: len(x) > y,
+ "pattern": lambda x, y: not re.match(y, x, re.UNICODE),
+ "unique": lambda x, y: len(x) != len(set(x)),
+ "multiple": lambda x, y: x % y != 0,
+ }
+
+ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+ self.serialize_type = {
+ "iso-8601": Serializer.serialize_iso,
+ "rfc-1123": Serializer.serialize_rfc,
+ "unix-time": Serializer.serialize_unix,
+ "duration": Serializer.serialize_duration,
+ "date": Serializer.serialize_date,
+ "time": Serializer.serialize_time,
+ "decimal": Serializer.serialize_decimal,
+ "long": Serializer.serialize_long,
+ "bytearray": Serializer.serialize_bytearray,
+ "base64": Serializer.serialize_base64,
+ "object": self.serialize_object,
+ "[]": self.serialize_iter,
+ "{}": self.serialize_dict,
+ }
+ self.dependencies: dict[str, type] = dict(classes) if classes else {}
+ self.key_transformer = full_restapi_key_transformer
+ self.client_side_validation = True
+
+ def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals
+ self, target_obj, data_type=None, **kwargs
+ ):
+ """Serialize data into a string according to type.
+
+ :param object target_obj: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str, dict
+ :raises SerializationError: if serialization fails.
+ :returns: The serialized data.
+ """
+ key_transformer = kwargs.get("key_transformer", self.key_transformer)
+ keep_readonly = kwargs.get("keep_readonly", False)
+ if target_obj is None:
+ return None
+
+ attr_name = None
+ class_name = target_obj.__class__.__name__
+
+ if data_type:
+ return self.serialize_data(target_obj, data_type, **kwargs)
+
+ if not hasattr(target_obj, "_attribute_map"):
+ data_type = type(target_obj).__name__
+ if data_type in self.basic_types.values():
+ return self.serialize_data(target_obj, data_type, **kwargs)
+
+ # Force "is_xml" kwargs if we detect a XML model
+ try:
+ is_xml_model_serialization = kwargs["is_xml"]
+ except KeyError:
+ is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model())
+
+ serialized = {}
+ if is_xml_model_serialization:
+ serialized = target_obj._create_xml_node() # pylint: disable=protected-access
+ try:
+ attributes = target_obj._attribute_map # pylint: disable=protected-access
+ for attr, attr_desc in attributes.items():
+ attr_name = attr
+ if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access
+ attr_name, {}
+ ).get("readonly", False):
+ continue
+
+ if attr_name == "additional_properties" and attr_desc["key"] == "":
+ if target_obj.additional_properties is not None:
+ serialized |= target_obj.additional_properties
+ continue
+ try:
+
+ orig_attr = getattr(target_obj, attr)
+ if is_xml_model_serialization:
+ pass # Don't provide "transformer" for XML for now. Keep "orig_attr"
+ else: # JSON
+ keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr)
+ keys = keys if isinstance(keys, list) else [keys]
+
+ kwargs["serialization_ctxt"] = attr_desc
+ new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs)
+
+ if is_xml_model_serialization:
+ xml_desc = attr_desc.get("xml", {})
+ xml_name = xml_desc.get("name", attr_desc["key"])
+ xml_prefix = xml_desc.get("prefix", None)
+ xml_ns = xml_desc.get("ns", None)
+ if xml_desc.get("attr", False):
+ if xml_ns:
+ ET.register_namespace(xml_prefix, xml_ns)
+ xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+ serialized.set(xml_name, new_attr) # type: ignore
+ continue
+ if xml_desc.get("text", False):
+ serialized.text = new_attr # type: ignore
+ continue
+ if isinstance(new_attr, list):
+ serialized.extend(new_attr) # type: ignore
+ elif isinstance(new_attr, ET.Element):
+ # If the down XML has no XML/Name,
+ # we MUST replace the tag with the local tag. But keeping the namespaces.
+ if "name" not in getattr(orig_attr, "_xml_map", {}):
+ splitted_tag = new_attr.tag.split("}")
+ if len(splitted_tag) == 2: # Namespace
+ new_attr.tag = "}".join([splitted_tag[0], xml_name])
+ else:
+ new_attr.tag = xml_name
+ serialized.append(new_attr) # type: ignore
+ else: # That's a basic type
+ # Integrate namespace if necessary
+ local_node = _create_xml_node(xml_name, xml_prefix, xml_ns)
+ local_node.text = str(new_attr)
+ serialized.append(local_node) # type: ignore
+ else: # JSON
+ for k in reversed(keys): # type: ignore
+ new_attr = {k: new_attr}
+
+ _new_attr = new_attr
+ _serialized = serialized
+ for k in keys: # type: ignore
+ if k not in _serialized:
+ _serialized.update(_new_attr) # type: ignore
+ _new_attr = _new_attr[k] # type: ignore
+ _serialized = _serialized[k]
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
+
+ except (AttributeError, KeyError, TypeError) as err:
+ msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj))
+ raise SerializationError(msg) from err
+ return serialized
+
+ def body(self, data, data_type, **kwargs):
+ """Serialize data intended for a request body.
+
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: dict
+ :raises SerializationError: if serialization fails.
+ :raises ValueError: if data is None
+ :returns: The serialized request body
+ """
+
+ # Just in case this is a dict
+ internal_data_type_str = data_type.strip("[]{}")
+ internal_data_type = self.dependencies.get(internal_data_type_str, None)
+ try:
+ is_xml_model_serialization = kwargs["is_xml"]
+ except KeyError:
+ if internal_data_type and issubclass(internal_data_type, Model):
+ is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model())
+ else:
+ is_xml_model_serialization = False
+ if internal_data_type and not isinstance(internal_data_type, Enum):
+ try:
+ deserializer = Deserializer(self.dependencies)
+ # Since it's on serialization, it's almost sure that format is not JSON REST
+ # We're not able to deal with additional properties for now.
+ deserializer.additional_properties_detection = False
+ if is_xml_model_serialization:
+ deserializer.key_extractors = [ # type: ignore
+ attribute_key_case_insensitive_extractor,
+ ]
+ else:
+ deserializer.key_extractors = [
+ rest_key_case_insensitive_extractor,
+ attribute_key_case_insensitive_extractor,
+ last_rest_key_case_insensitive_extractor,
+ ]
+ data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access
+ except DeserializationError as err:
+ raise SerializationError("Unable to build a model: " + str(err)) from err
+
+ return self._serialize(data, data_type, **kwargs)
+
+ def url(self, name, data, data_type, **kwargs):
+ """Serialize data intended for a URL path.
+
+ :param str name: The name of the URL path parameter.
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str
+ :returns: The serialized URL path
+ :raises TypeError: if serialization fails.
+ :raises ValueError: if data is None
+ """
+ try:
+ output = self.serialize_data(data, data_type, **kwargs)
+ if data_type == "bool":
+ output = json.dumps(output)
+
+ if kwargs.get("skip_quote") is True:
+ output = str(output)
+ output = output.replace("{", quote("{")).replace("}", quote("}"))
+ else:
+ output = quote(str(output), safe="")
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return output
+
+ def query(self, name, data, data_type, **kwargs):
+ """Serialize data intended for a URL query.
+
+ :param str name: The name of the query parameter.
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str, list
+ :raises TypeError: if serialization fails.
+ :raises ValueError: if data is None
+ :returns: The serialized query parameter
+ """
+ try:
+ # Treat the list aside, since we don't want to encode the div separator
+ if data_type.startswith("["):
+ internal_data_type = data_type[1:-1]
+ do_quote = not kwargs.get("skip_quote", False)
+ return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs)
+
+ # Not a list, regular serialization
+ output = self.serialize_data(data, data_type, **kwargs)
+ if data_type == "bool":
+ output = json.dumps(output)
+ if kwargs.get("skip_quote") is True:
+ output = str(output)
+ else:
+ output = quote(str(output), safe="")
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
+
+ def header(self, name, data, data_type, **kwargs):
+ """Serialize data intended for a request header.
+
+ :param str name: The name of the header.
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :rtype: str
+ :raises TypeError: if serialization fails.
+ :raises ValueError: if data is None
+ :returns: The serialized header
+ """
+ try:
+ if data_type in ["[str]"]:
+ data = ["" if d is None else d for d in data]
+
+ output = self.serialize_data(data, data_type, **kwargs)
+ if data_type == "bool":
+ output = json.dumps(output)
+ except SerializationError as exc:
+ raise TypeError("{} must be type {}.".format(name, data_type)) from exc
+ return str(output)
+
+ def serialize_data(self, data, data_type, **kwargs):
+ """Serialize generic data according to supplied data type.
+
+ :param object data: The data to be serialized.
+ :param str data_type: The type to be serialized from.
+ :raises AttributeError: if required data is None.
+ :raises ValueError: if data is None
+ :raises SerializationError: if serialization fails.
+ :returns: The serialized data.
+ :rtype: str, int, float, bool, dict, list
+ """
+ if data is None:
+ raise ValueError("No value for given attribute")
+
+ try:
+ if data is CoreNull:
+ return None
+ if data_type in self.basic_types.values():
+ return self.serialize_basic(data, data_type, **kwargs)
+
+ if data_type in self.serialize_type:
+ return self.serialize_type[data_type](data, **kwargs)
+
+ # If dependencies is empty, try with current data class
+ # It has to be a subclass of Enum anyway
+ enum_type = self.dependencies.get(data_type, cast(type, data.__class__))
+ if issubclass(enum_type, Enum):
+ return Serializer.serialize_enum(data, enum_obj=enum_type)
+
+ iter_type = data_type[0] + data_type[-1]
+ if iter_type in self.serialize_type:
+ return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs)
+
+ except (ValueError, TypeError) as err:
+ msg = "Unable to serialize value: {!r} as type: {!r}."
+ raise SerializationError(msg.format(data, data_type)) from err
+ return self._serialize(data, **kwargs)
+
+ @classmethod
+ def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements
+ custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type)
+ if custom_serializer:
+ return custom_serializer
+ if kwargs.get("is_xml", False):
+ return cls._xml_basic_types_serializers.get(data_type)
+
+ @classmethod
+ def serialize_basic(cls, data, data_type, **kwargs):
+ """Serialize basic builting data type.
+ Serializes objects to str, int, float or bool.
+
+ Possible kwargs:
+ - basic_types_serializers dict[str, callable] : If set, use the callable as serializer
+ - is_xml bool : If set, use xml_basic_types_serializers
+
+ :param obj data: Object to be serialized.
+ :param str data_type: Type of object in the iterable.
+ :rtype: str, int, float, bool
+ :return: serialized object
+ :raises TypeError: raise if data_type is not one of str, int, float, bool.
+ """
+ custom_serializer = cls._get_custom_serializers(data_type, **kwargs)
+ if custom_serializer:
+ return custom_serializer(data)
+ if data_type == "str":
+ return cls.serialize_unicode(data)
+ if data_type == "int":
+ return int(data)
+ if data_type == "float":
+ return float(data)
+ if data_type == "bool":
+ return bool(data)
+ raise TypeError("Unknown basic data type: {}".format(data_type))
+
+ @classmethod
+ def serialize_unicode(cls, data):
+ """Special handling for serializing unicode strings in Py2.
+ Encode to UTF-8 if unicode, otherwise handle as a str.
+
+ :param str data: Object to be serialized.
+ :rtype: str
+ :return: serialized object
+ """
+ try: # If I received an enum, return its value
+ return data.value
+ except AttributeError:
+ pass
+
+ try:
+ if isinstance(data, unicode): # type: ignore
+ # Don't change it, JSON and XML ElementTree are totally able
+ # to serialize correctly u'' strings
+ return data
+ except NameError:
+ return str(data)
+ return str(data)
+
+ def serialize_iter(self, data, iter_type, div=None, **kwargs):
+ """Serialize iterable.
+
+ Supported kwargs:
+ - serialization_ctxt dict : The current entry of _attribute_map, or same format.
+ serialization_ctxt['type'] should be same as data_type.
+ - is_xml bool : If set, serialize as XML
+
+ :param list data: Object to be serialized.
+ :param str iter_type: Type of object in the iterable.
+ :param str div: If set, this str will be used to combine the elements
+ in the iterable into a combined string. Default is 'None'.
+ Defaults to False.
+ :rtype: list, str
+ :return: serialized iterable
+ """
+ if isinstance(data, str):
+ raise SerializationError("Refuse str type as a valid iter type.")
+
+ serialization_ctxt = kwargs.get("serialization_ctxt", {})
+ is_xml = kwargs.get("is_xml", False)
+
+ serialized = []
+ for d in data:
+ try:
+ serialized.append(self.serialize_data(d, iter_type, **kwargs))
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
+ serialized.append(None)
+
+ if kwargs.get("do_quote", False):
+ serialized = ["" if s is None else quote(str(s), safe="") for s in serialized]
+
+ if div:
+ serialized = ["" if s is None else str(s) for s in serialized]
+ serialized = div.join(serialized)
+
+ if "xml" in serialization_ctxt or is_xml:
+ # XML serialization is more complicated
+ xml_desc = serialization_ctxt.get("xml", {})
+ xml_name = xml_desc.get("name")
+ if not xml_name:
+ xml_name = serialization_ctxt["key"]
+
+ # Create a wrap node if necessary (use the fact that Element and list have "append")
+ is_wrapped = xml_desc.get("wrapped", False)
+ node_name = xml_desc.get("itemsName", xml_name)
+ if is_wrapped:
+ final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+ else:
+ final_result = []
+ # All list elements to "local_node"
+ for el in serialized:
+ if isinstance(el, ET.Element):
+ el_node = el
+ else:
+ el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+ if el is not None: # Otherwise it writes "None" :-p
+ el_node.text = str(el)
+ final_result.append(el_node)
+ return final_result
+ return serialized
+
+ def serialize_dict(self, attr, dict_type, **kwargs):
+ """Serialize a dictionary of objects.
+
+ :param dict attr: Object to be serialized.
+ :param str dict_type: Type of object in the dictionary.
+ :rtype: dict
+ :return: serialized dictionary
+ """
+ serialization_ctxt = kwargs.get("serialization_ctxt", {})
+ serialized = {}
+ for key, value in attr.items():
+ try:
+ serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs)
+ except ValueError as err:
+ if isinstance(err, SerializationError):
+ raise
+ serialized[self.serialize_unicode(key)] = None
+
+ if "xml" in serialization_ctxt:
+ # XML serialization is more complicated
+ xml_desc = serialization_ctxt["xml"]
+ xml_name = xml_desc["name"]
+
+ final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None))
+ for key, value in serialized.items():
+ ET.SubElement(final_result, key).text = value
+ return final_result
+
+ return serialized
+
+ def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
+ """Serialize a generic object.
+ This will be handled as a dictionary. If object passed in is not
+ a basic type (str, int, float, dict, list) it will simply be
+ cast to str.
+
+ :param dict attr: Object to be serialized.
+ :rtype: dict or str
+ :return: serialized object
+ """
+ if attr is None:
+ return None
+ if isinstance(attr, ET.Element):
+ return attr
+ obj_type = type(attr)
+ if obj_type in self.basic_types:
+ return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs)
+ if obj_type is _long_type:
+ return self.serialize_long(attr)
+ if obj_type is str:
+ return self.serialize_unicode(attr)
+ if obj_type is datetime.datetime:
+ return self.serialize_iso(attr)
+ if obj_type is datetime.date:
+ return self.serialize_date(attr)
+ if obj_type is datetime.time:
+ return self.serialize_time(attr)
+ if obj_type is datetime.timedelta:
+ return self.serialize_duration(attr)
+ if obj_type is decimal.Decimal:
+ return self.serialize_decimal(attr)
+
+ # If it's a model or I know this dependency, serialize as a Model
+ if obj_type in self.dependencies.values() or isinstance(attr, Model):
+ return self._serialize(attr)
+
+ if obj_type == dict:
+ serialized = {}
+ for key, value in attr.items():
+ try:
+ serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs)
+ except ValueError:
+ serialized[self.serialize_unicode(key)] = None
+ return serialized
+
+ if obj_type == list:
+ serialized = []
+ for obj in attr:
+ try:
+ serialized.append(self.serialize_object(obj, **kwargs))
+ except ValueError:
+ pass
+ return serialized
+ return str(attr)
+
+ @staticmethod
+ def serialize_enum(attr, enum_obj=None):
+ try:
+ result = attr.value
+ except AttributeError:
+ result = attr
+ try:
+ enum_obj(result) # type: ignore
+ return result
+ except ValueError as exc:
+ for enum_value in enum_obj: # type: ignore
+ if enum_value.value.lower() == str(attr).lower():
+ return enum_value.value
+ error = "{!r} is not valid value for enum {!r}"
+ raise SerializationError(error.format(attr, enum_obj)) from exc
+
+ @staticmethod
+ def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize bytearray into base-64 string.
+
+ :param str attr: Object to be serialized.
+ :rtype: str
+ :return: serialized base64
+ """
+ return b64encode(attr).decode()
+
+ @staticmethod
+ def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize str into base-64 string.
+
+ :param str attr: Object to be serialized.
+ :rtype: str
+ :return: serialized base64
+ """
+ encoded = b64encode(attr).decode("ascii")
+ return encoded.strip("=").replace("+", "-").replace("/", "_")
+
+ @staticmethod
+ def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Decimal object to float.
+
+ :param decimal attr: Object to be serialized.
+ :rtype: float
+ :return: serialized decimal
+ """
+ return float(attr)
+
+ @staticmethod
+ def serialize_long(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize long (Py2) or int (Py3).
+
+ :param int attr: Object to be serialized.
+ :rtype: int/long
+ :return: serialized long
+ """
+ return _long_type(attr)
+
+ @staticmethod
+ def serialize_date(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Date object into ISO-8601 formatted string.
+
+ :param Date attr: Object to be serialized.
+ :rtype: str
+ :return: serialized date
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_date(attr)
+ t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day)
+ return t
+
+ @staticmethod
+ def serialize_time(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Time object into ISO-8601 formatted string.
+
+ :param datetime.time attr: Object to be serialized.
+ :rtype: str
+ :return: serialized time
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_time(attr)
+ t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second)
+ if attr.microsecond:
+ t += ".{:02}".format(attr.microsecond)
+ return t
+
+ @staticmethod
+ def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize TimeDelta object into ISO-8601 formatted string.
+
+ :param TimeDelta attr: Object to be serialized.
+ :rtype: str
+ :return: serialized duration
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_duration(attr)
+ return isodate.duration_isoformat(attr)
+
+ @staticmethod
+ def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Datetime object into RFC-1123 formatted string.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: str
+ :raises TypeError: if format invalid.
+ :return: serialized rfc
+ """
+ try:
+ if not attr.tzinfo:
+ _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+ utc = attr.utctimetuple()
+ except AttributeError as exc:
+ raise TypeError("RFC1123 object must be valid Datetime object.") from exc
+
+ return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format(
+ Serializer.days[utc.tm_wday],
+ utc.tm_mday,
+ Serializer.months[utc.tm_mon],
+ utc.tm_year,
+ utc.tm_hour,
+ utc.tm_min,
+ utc.tm_sec,
+ )
+
+ @staticmethod
+ def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Datetime object into ISO-8601 formatted string.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: str
+ :raises SerializationError: if format invalid.
+ :return: serialized iso
+ """
+ if isinstance(attr, str):
+ attr = isodate.parse_datetime(attr)
+ try:
+ if not attr.tzinfo:
+ _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+ utc = attr.utctimetuple()
+ if utc.tm_year > 9999 or utc.tm_year < 1:
+ raise OverflowError("Hit max or min date")
+
+ microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0")
+ if microseconds:
+ microseconds = "." + microseconds
+ date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+ utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec
+ )
+ return date + microseconds + "Z"
+ except (ValueError, OverflowError) as err:
+ msg = "Unable to serialize datetime object."
+ raise SerializationError(msg) from err
+ except AttributeError as err:
+ msg = "ISO-8601 object must be valid Datetime object."
+ raise TypeError(msg) from err
+
+ @staticmethod
+ def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument
+ """Serialize Datetime object into IntTime format.
+ This is represented as seconds.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: int
+ :raises SerializationError: if format invalid
+ :return: serialied unix
+ """
+ if isinstance(attr, int):
+ return attr
+ try:
+ if not attr.tzinfo:
+ _LOGGER.warning("Datetime with no tzinfo will be considered UTC.")
+ return int(calendar.timegm(attr.utctimetuple()))
+ except AttributeError as exc:
+ raise TypeError("Unix time object must be valid Datetime object.") from exc
+
+
+def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ key = attr_desc["key"]
+ working_data = data
+
+ while "." in key:
+ # Need the cast, as for some reasons "split" is typed as list[str | Any]
+ dict_keys = cast(list[str], _FLATTEN.split(key))
+ if len(dict_keys) == 1:
+ key = _decode_attribute_map_key(dict_keys[0])
+ break
+ working_key = _decode_attribute_map_key(dict_keys[0])
+ working_data = working_data.get(working_key, data)
+ if working_data is None:
+ # If at any point while following flatten JSON path see None, it means
+ # that all properties under are None as well
+ return None
+ key = ".".join(dict_keys[1:])
+
+ return working_data.get(key)
+
+
+def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements
+ attr, attr_desc, data
+):
+ key = attr_desc["key"]
+ working_data = data
+
+ while "." in key:
+ dict_keys = _FLATTEN.split(key)
+ if len(dict_keys) == 1:
+ key = _decode_attribute_map_key(dict_keys[0])
+ break
+ working_key = _decode_attribute_map_key(dict_keys[0])
+ working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data)
+ if working_data is None:
+ # If at any point while following flatten JSON path see None, it means
+ # that all properties under are None as well
+ return None
+ key = ".".join(dict_keys[1:])
+
+ if working_data:
+ return attribute_key_case_insensitive_extractor(key, None, working_data)
+
+
+def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ """Extract the attribute in "data" based on the last part of the JSON path key.
+
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
+ """
+ key = attr_desc["key"]
+ dict_keys = _FLATTEN.split(key)
+ return attribute_key_extractor(dict_keys[-1], None, data)
+
+
+def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument
+ """Extract the attribute in "data" based on the last part of the JSON path key.
+
+ This is the case insensitive version of "last_rest_key_extractor"
+ :param str attr: The attribute to extract
+ :param dict attr_desc: The attribute description
+ :param dict data: The data to extract from
+ :rtype: object
+ :returns: The extracted attribute
+ """
+ key = attr_desc["key"]
+ dict_keys = _FLATTEN.split(key)
+ return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data)
+
+
+def attribute_key_extractor(attr, _, data):
+ return data.get(attr)
+
+
+def attribute_key_case_insensitive_extractor(attr, _, data):
+ found_key = None
+ lower_attr = attr.lower()
+ for key in data:
+ if lower_attr == key.lower():
+ found_key = key
+ break
+
+ return data.get(found_key)
+
+
+def _extract_name_from_internal_type(internal_type):
+ """Given an internal type XML description, extract correct XML name with namespace.
+
+ :param dict internal_type: An model type
+ :rtype: tuple
+ :returns: A tuple XML name + namespace dict
+ """
+ internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+ xml_name = internal_type_xml_map.get("name", internal_type.__name__)
+ xml_ns = internal_type_xml_map.get("ns", None)
+ if xml_ns:
+ xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+ return xml_name
+
+
+def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements
+ if isinstance(data, dict):
+ return None
+
+ # Test if this model is XML ready first
+ if not isinstance(data, ET.Element):
+ return None
+
+ xml_desc = attr_desc.get("xml", {})
+ xml_name = xml_desc.get("name", attr_desc["key"])
+
+ # Look for a children
+ is_iter_type = attr_desc["type"].startswith("[")
+ is_wrapped = xml_desc.get("wrapped", False)
+ internal_type = attr_desc.get("internalType", None)
+ internal_type_xml_map = getattr(internal_type, "_xml_map", {})
+
+ # Integrate namespace if necessary
+ xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None))
+ if xml_ns:
+ xml_name = "{{{}}}{}".format(xml_ns, xml_name)
+
+ # If it's an attribute, that's simple
+ if xml_desc.get("attr", False):
+ return data.get(xml_name)
+
+ # If it's x-ms-text, that's simple too
+ if xml_desc.get("text", False):
+ return data.text
+
+ # Scenario where I take the local name:
+ # - Wrapped node
+ # - Internal type is an enum (considered basic types)
+ # - Internal type has no XML/Name node
+ if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)):
+ children = data.findall(xml_name)
+ # If internal type has a local name and it's not a list, I use that name
+ elif not is_iter_type and internal_type and "name" in internal_type_xml_map:
+ xml_name = _extract_name_from_internal_type(internal_type)
+ children = data.findall(xml_name)
+ # That's an array
+ else:
+ if internal_type: # Complex type, ignore itemsName and use the complex type name
+ items_name = _extract_name_from_internal_type(internal_type)
+ else:
+ items_name = xml_desc.get("itemsName", xml_name)
+ children = data.findall(items_name)
+
+ if len(children) == 0:
+ if is_iter_type:
+ if is_wrapped:
+ return None # is_wrapped no node, we want None
+ return [] # not wrapped, assume empty list
+ return None # Assume it's not there, maybe an optional node.
+
+ # If is_iter_type and not wrapped, return all found children
+ if is_iter_type:
+ if not is_wrapped:
+ return children
+ # Iter and wrapped, should have found one node only (the wrap one)
+ if len(children) != 1:
+ raise DeserializationError(
+ "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format(
+ xml_name
+ )
+ )
+ return list(children[0]) # Might be empty list and that's ok.
+
+ # Here it's not a itertype, we should have found one element only or empty
+ if len(children) > 1:
+ raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name))
+ return children[0]
+
+
+class Deserializer:
+ """Response object model deserializer.
+
+ :param dict classes: Class type dictionary for deserializing complex types.
+ :ivar list key_extractors: Ordered list of extractors to be used by this deserializer.
+ """
+
+ basic_types = {str: "str", int: "int", bool: "bool", float: "float"}
+
+ valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
+
+ def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None:
+ self.deserialize_type = {
+ "iso-8601": Deserializer.deserialize_iso,
+ "rfc-1123": Deserializer.deserialize_rfc,
+ "unix-time": Deserializer.deserialize_unix,
+ "duration": Deserializer.deserialize_duration,
+ "date": Deserializer.deserialize_date,
+ "time": Deserializer.deserialize_time,
+ "decimal": Deserializer.deserialize_decimal,
+ "long": Deserializer.deserialize_long,
+ "bytearray": Deserializer.deserialize_bytearray,
+ "base64": Deserializer.deserialize_base64,
+ "object": self.deserialize_object,
+ "[]": self.deserialize_iter,
+ "{}": self.deserialize_dict,
+ }
+ self.deserialize_expected_types = {
+ "duration": (isodate.Duration, datetime.timedelta),
+ "iso-8601": (datetime.datetime),
+ }
+ self.dependencies: dict[str, type] = dict(classes) if classes else {}
+ self.key_extractors = [rest_key_extractor, xml_key_extractor]
+ # Additional properties only works if the "rest_key_extractor" is used to
+ # extract the keys. Making it to work whatever the key extractor is too much
+ # complicated, with no real scenario for now.
+ # So adding a flag to disable additional properties detection. This flag should be
+ # used if your expect the deserialization to NOT come from a JSON REST syntax.
+ # Otherwise, result are unexpected
+ self.additional_properties_detection = True
+
+ def __call__(self, target_obj, response_data, content_type=None):
+ """Call the deserializer to process a REST response.
+
+ :param str target_obj: Target data type to deserialize to.
+ :param requests.Response response_data: REST response object.
+ :param str content_type: Swagger "produces" if available.
+ :raises DeserializationError: if deserialization fails.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ data = self._unpack_content(response_data, content_type)
+ return self._deserialize(target_obj, data)
+
+ def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements
+ """Call the deserializer on a model.
+
+ Data needs to be already deserialized as JSON or XML ElementTree
+
+ :param str target_obj: Target data type to deserialize to.
+ :param object data: Object to deserialize.
+ :raises DeserializationError: if deserialization fails.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ # This is already a model, go recursive just in case
+ if hasattr(data, "_attribute_map"):
+ constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")]
+ try:
+ for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access
+ if attr in constants:
+ continue
+ value = getattr(data, attr)
+ if value is None:
+ continue
+ local_type = mapconfig["type"]
+ internal_data_type = local_type.strip("[]{}")
+ if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum):
+ continue
+ setattr(data, attr, self._deserialize(local_type, value))
+ return data
+ except AttributeError:
+ return
+
+ response, class_name = self._classify_target(target_obj, data)
+
+ if isinstance(response, str):
+ return self.deserialize_data(data, response)
+ if isinstance(response, type) and issubclass(response, Enum):
+ return self.deserialize_enum(data, response)
+
+ if data is None or data is CoreNull:
+ return data
+ try:
+ attributes = response._attribute_map # type: ignore # pylint: disable=protected-access
+ d_attrs = {}
+ for attr, attr_desc in attributes.items():
+ # Check empty string. If it's not empty, someone has a real "additionalProperties"...
+ if attr == "additional_properties" and attr_desc["key"] == "":
+ continue
+ raw_value = None
+ # Enhance attr_desc with some dynamic data
+ attr_desc = attr_desc.copy() # Do a copy, do not change the real one
+ internal_data_type = attr_desc["type"].strip("[]{}")
+ if internal_data_type in self.dependencies:
+ attr_desc["internalType"] = self.dependencies[internal_data_type]
+
+ for key_extractor in self.key_extractors:
+ found_value = key_extractor(attr, attr_desc, data)
+ if found_value is not None:
+ if raw_value is not None and raw_value != found_value:
+ msg = (
+ "Ignoring extracted value '%s' from %s for key '%s'"
+ " (duplicate extraction, follow extractors order)"
+ )
+ _LOGGER.warning(msg, found_value, key_extractor, attr)
+ continue
+ raw_value = found_value
+
+ value = self.deserialize_data(raw_value, attr_desc["type"])
+ d_attrs[attr] = value
+ except (AttributeError, TypeError, KeyError) as err:
+ msg = "Unable to deserialize to object: " + class_name # type: ignore
+ raise DeserializationError(msg) from err
+ additional_properties = self._build_additional_properties(attributes, data)
+ return self._instantiate_model(response, d_attrs, additional_properties)
+
+ def _build_additional_properties(self, attribute_map, data):
+ if not self.additional_properties_detection:
+ return None
+ if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "":
+ # Check empty string. If it's not empty, someone has a real "additionalProperties"
+ return None
+ if isinstance(data, ET.Element):
+ data = {el.tag: el.text for el in data}
+
+ known_keys = {
+ _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0])
+ for desc in attribute_map.values()
+ if desc["key"] != ""
+ }
+ present_keys = set(data.keys())
+ missing_keys = present_keys - known_keys
+ return {key: data[key] for key in missing_keys}
+
+ def _classify_target(self, target, data):
+ """Check to see whether the deserialization target object can
+ be classified into a subclass.
+ Once classification has been determined, initialize object.
+
+ :param str target: The target object type to deserialize to.
+ :param str/dict data: The response data to deserialize.
+ :return: The classified target object and its class name.
+ :rtype: tuple
+ """
+ if target is None:
+ return None, None
+
+ if isinstance(target, str):
+ try:
+ target = self.dependencies[target]
+ except KeyError:
+ return target, target
+
+ try:
+ target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access
+ except AttributeError:
+ pass # Target is not a Model, no classify
+ return target, target.__class__.__name__ # type: ignore
+
+ def failsafe_deserialize(self, target_obj, data, content_type=None):
+ """Ignores any errors encountered in deserialization,
+ and falls back to not deserializing the object. Recommended
+ for use in error deserialization, as we want to return the
+ HttpResponseError to users, and not have them deal with
+ a deserialization error.
+
+ :param str target_obj: The target object type to deserialize to.
+ :param str/dict data: The response data to deserialize.
+ :param str content_type: Swagger "produces" if available.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ try:
+ return self(target_obj, data, content_type=content_type)
+ except: # pylint: disable=bare-except
+ _LOGGER.debug(
+ "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True
+ )
+ return None
+
+ @staticmethod
+ def _unpack_content(raw_data, content_type=None):
+ """Extract the correct structure for deserialization.
+
+ If raw_data is a PipelineResponse, try to extract the result of RawDeserializer.
+ if we can't, raise. Your Pipeline should have a RawDeserializer.
+
+ If not a pipeline response and raw_data is bytes or string, use content-type
+ to decode it. If no content-type, try JSON.
+
+ If raw_data is something else, bypass all logic and return it directly.
+
+ :param obj raw_data: Data to be processed.
+ :param str content_type: How to parse if raw_data is a string/bytes.
+ :raises JSONDecodeError: If JSON is requested and parsing is impossible.
+ :raises UnicodeDecodeError: If bytes is not UTF8
+ :rtype: object
+ :return: Unpacked content.
+ """
+ # Assume this is enough to detect a Pipeline Response without importing it
+ context = getattr(raw_data, "context", {})
+ if context:
+ if RawDeserializer.CONTEXT_NAME in context:
+ return context[RawDeserializer.CONTEXT_NAME]
+ raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize")
+
+ # Assume this is enough to recognize universal_http.ClientResponse without importing it
+ if hasattr(raw_data, "body"):
+ return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers)
+
+ # Assume this enough to recognize requests.Response without importing it.
+ if hasattr(raw_data, "_content_consumed"):
+ return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers)
+
+ if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"):
+ return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore
+ return raw_data
+
+ def _instantiate_model(self, response, attrs, additional_properties=None):
+ """Instantiate a response model passing in deserialized args.
+
+ :param Response response: The response model class.
+ :param dict attrs: The deserialized response attributes.
+ :param dict additional_properties: Additional properties to be set.
+ :rtype: Response
+ :return: The instantiated response model.
+ """
+ if callable(response):
+ subtype = getattr(response, "_subtype_map", {})
+ try:
+ readonly = [
+ k
+ for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore
+ if v.get("readonly")
+ ]
+ const = [
+ k
+ for k, v in response._validation.items() # pylint: disable=protected-access # type: ignore
+ if v.get("constant")
+ ]
+ kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const}
+ response_obj = response(**kwargs)
+ for attr in readonly:
+ setattr(response_obj, attr, attrs.get(attr))
+ if additional_properties:
+ response_obj.additional_properties = additional_properties # type: ignore
+ return response_obj
+ except TypeError as err:
+ msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore
+ raise DeserializationError(msg + str(err)) from err
+ else:
+ try:
+ for attr, value in attrs.items():
+ setattr(response, attr, value)
+ return response
+ except Exception as exp:
+ msg = "Unable to populate response model. "
+ msg += "Type: {}, Error: {}".format(type(response), exp)
+ raise DeserializationError(msg) from exp
+
+ def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements
+ """Process data for deserialization according to data type.
+
+ :param str data: The response string to be deserialized.
+ :param str data_type: The type to deserialize to.
+ :raises DeserializationError: if deserialization fails.
+ :return: Deserialized object.
+ :rtype: object
+ """
+ if data is None:
+ return data
+
+ try:
+ if not data_type:
+ return data
+ if data_type in self.basic_types.values():
+ return self.deserialize_basic(data, data_type)
+ if data_type in self.deserialize_type:
+ if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())):
+ return data
+
+ is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment
+ "object",
+ "[]",
+ r"{}",
+ ]
+ if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text:
+ return None
+ data_val = self.deserialize_type[data_type](data)
+ return data_val
+
+ iter_type = data_type[0] + data_type[-1]
+ if iter_type in self.deserialize_type:
+ return self.deserialize_type[iter_type](data, data_type[1:-1])
+
+ obj_type = self.dependencies[data_type]
+ if issubclass(obj_type, Enum):
+ if isinstance(data, ET.Element):
+ data = data.text
+ return self.deserialize_enum(data, obj_type)
+
+ except (ValueError, TypeError, AttributeError) as err:
+ msg = "Unable to deserialize response data."
+ msg += " Data: {}, {}".format(data, data_type)
+ raise DeserializationError(msg) from err
+ return self._deserialize(obj_type, data)
+
+ def deserialize_iter(self, attr, iter_type):
+ """Deserialize an iterable.
+
+ :param list attr: Iterable to be deserialized.
+ :param str iter_type: The type of object in the iterable.
+ :return: Deserialized iterable.
+ :rtype: list
+ """
+ if attr is None:
+ return None
+ if isinstance(attr, ET.Element): # If I receive an element here, get the children
+ attr = list(attr)
+ if not isinstance(attr, (list, set)):
+ raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr)))
+ return [self.deserialize_data(a, iter_type) for a in attr]
+
+ def deserialize_dict(self, attr, dict_type):
+ """Deserialize a dictionary.
+
+ :param dict/list attr: Dictionary to be deserialized. Also accepts
+ a list of key, value pairs.
+ :param str dict_type: The object type of the items in the dictionary.
+ :return: Deserialized dictionary.
+ :rtype: dict
+ """
+ if isinstance(attr, list):
+ return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr}
+
+ if isinstance(attr, ET.Element):
+ # Transform value into {"Key": "value"}
+ attr = {el.tag: el.text for el in attr}
+ return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()}
+
+ def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements
+ """Deserialize a generic object.
+ This will be handled as a dictionary.
+
+ :param dict attr: Dictionary to be deserialized.
+ :return: Deserialized object.
+ :rtype: dict
+ :raises TypeError: if non-builtin datatype encountered.
+ """
+ if attr is None:
+ return None
+ if isinstance(attr, ET.Element):
+ # Do no recurse on XML, just return the tree as-is
+ return attr
+ if isinstance(attr, str):
+ return self.deserialize_basic(attr, "str")
+ obj_type = type(attr)
+ if obj_type in self.basic_types:
+ return self.deserialize_basic(attr, self.basic_types[obj_type])
+ if obj_type is _long_type:
+ return self.deserialize_long(attr)
+
+ if obj_type == dict:
+ deserialized = {}
+ for key, value in attr.items():
+ try:
+ deserialized[key] = self.deserialize_object(value, **kwargs)
+ except ValueError:
+ deserialized[key] = None
+ return deserialized
+
+ if obj_type == list:
+ deserialized = []
+ for obj in attr:
+ try:
+ deserialized.append(self.deserialize_object(obj, **kwargs))
+ except ValueError:
+ pass
+ return deserialized
+
+ error = "Cannot deserialize generic object with type: "
+ raise TypeError(error + str(obj_type))
+
+ def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements
+ """Deserialize basic builtin data type from string.
+ Will attempt to convert to str, int, float and bool.
+ This function will also accept '1', '0', 'true' and 'false' as
+ valid bool values.
+
+ :param str attr: response string to be deserialized.
+ :param str data_type: deserialization data type.
+ :return: Deserialized basic type.
+ :rtype: str, int, float or bool
+ :raises TypeError: if string format is not valid or data_type is not one of str, int, float, bool.
+ """
+ # If we're here, data is supposed to be a basic type.
+ # If it's still an XML node, take the text
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ if not attr:
+ if data_type == "str":
+ # None or '', node is empty string.
+ return ""
+ # None or '', node with a strong type is None.
+ # Don't try to model "empty bool" or "empty int"
+ return None
+
+ if data_type == "bool":
+ if attr in [True, False, 1, 0]:
+ return bool(attr)
+ if isinstance(attr, str):
+ if attr.lower() in ["true", "1"]:
+ return True
+ if attr.lower() in ["false", "0"]:
+ return False
+ raise TypeError("Invalid boolean value: {}".format(attr))
+
+ if data_type == "str":
+ return self.deserialize_unicode(attr)
+ if data_type == "int":
+ return int(attr)
+ if data_type == "float":
+ return float(attr)
+ raise TypeError("Unknown basic data type: {}".format(data_type))
+
+ @staticmethod
+ def deserialize_unicode(data):
+ """Preserve unicode objects in Python 2, otherwise return data
+ as a string.
+
+ :param str data: response string to be deserialized.
+ :return: Deserialized string.
+ :rtype: str or unicode
+ """
+ # We might be here because we have an enum modeled as string,
+ # and we try to deserialize a partial dict with enum inside
+ if isinstance(data, Enum):
+ return data
+
+ # Consider this is real string
+ try:
+ if isinstance(data, unicode): # type: ignore
+ return data
+ except NameError:
+ return str(data)
+ return str(data)
+
+ @staticmethod
+ def deserialize_enum(data, enum_obj):
+ """Deserialize string into enum object.
+
+ If the string is not a valid enum value it will be returned as-is
+ and a warning will be logged.
+
+ :param str data: Response string to be deserialized. If this value is
+ None or invalid it will be returned as-is.
+ :param Enum enum_obj: Enum object to deserialize to.
+ :return: Deserialized enum object.
+ :rtype: Enum
+ """
+ if isinstance(data, enum_obj) or data is None:
+ return data
+ if isinstance(data, Enum):
+ data = data.value
+ if isinstance(data, int):
+ # Workaround. We might consider remove it in the future.
+ try:
+ return list(enum_obj.__members__.values())[data]
+ except IndexError as exc:
+ error = "{!r} is not a valid index for enum {!r}"
+ raise DeserializationError(error.format(data, enum_obj)) from exc
+ try:
+ return enum_obj(str(data))
+ except ValueError:
+ for enum_value in enum_obj:
+ if enum_value.value.lower() == str(data).lower():
+ return enum_value
+ # We don't fail anymore for unknown value, we deserialize as a string
+ _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj)
+ return Deserializer.deserialize_unicode(data)
+
+ @staticmethod
+ def deserialize_bytearray(attr):
+ """Deserialize string into bytearray.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized bytearray
+ :rtype: bytearray
+ :raises TypeError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ return bytearray(b64decode(attr)) # type: ignore
+
+ @staticmethod
+ def deserialize_base64(attr):
+ """Deserialize base64 encoded string into string.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized base64 string
+ :rtype: bytearray
+ :raises TypeError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore
+ attr = attr + padding # type: ignore
+ encoded = attr.replace("-", "+").replace("_", "/")
+ return b64decode(encoded)
+
+ @staticmethod
+ def deserialize_decimal(attr):
+ """Deserialize string into Decimal object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized decimal
+ :raises DeserializationError: if string format invalid.
+ :rtype: decimal
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ return decimal.Decimal(str(attr)) # type: ignore
+ except decimal.DecimalException as err:
+ msg = "Invalid decimal {}".format(attr)
+ raise DeserializationError(msg) from err
+
+ @staticmethod
+ def deserialize_long(attr):
+ """Deserialize string into long (Py2) or int (Py3).
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized int
+ :rtype: long or int
+ :raises ValueError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ return _long_type(attr) # type: ignore
+
+ @staticmethod
+ def deserialize_duration(attr):
+ """Deserialize ISO-8601 formatted string into TimeDelta object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized duration
+ :rtype: TimeDelta
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ duration = isodate.parse_duration(attr)
+ except (ValueError, OverflowError, AttributeError) as err:
+ msg = "Cannot deserialize duration object."
+ raise DeserializationError(msg) from err
+ return duration
+
+ @staticmethod
+ def deserialize_date(attr):
+ """Deserialize ISO-8601 formatted string into Date object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized date
+ :rtype: Date
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore
+ raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+ # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
+ return isodate.parse_date(attr, defaultmonth=0, defaultday=0)
+
+ @staticmethod
+ def deserialize_time(attr):
+ """Deserialize ISO-8601 formatted string into time object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized time
+ :rtype: datetime.time
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore
+ raise DeserializationError("Date must have only digits and -. Received: %s" % attr)
+ return isodate.parse_time(attr)
+
+ @staticmethod
+ def deserialize_rfc(attr):
+ """Deserialize RFC-1123 formatted string into Datetime object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized RFC datetime
+ :rtype: Datetime
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ parsed_date = email.utils.parsedate_tz(attr) # type: ignore
+ date_obj = datetime.datetime(
+ *parsed_date[:6], tzinfo=datetime.timezone(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60))
+ )
+ if not date_obj.tzinfo:
+ date_obj = date_obj.astimezone(tz=TZ_UTC)
+ except ValueError as err:
+ msg = "Cannot deserialize to rfc datetime object."
+ raise DeserializationError(msg) from err
+ return date_obj
+
+ @staticmethod
+ def deserialize_iso(attr):
+ """Deserialize ISO-8601 formatted string into Datetime object.
+
+ :param str attr: response string to be deserialized.
+ :return: Deserialized ISO datetime
+ :rtype: Datetime
+ :raises DeserializationError: if string format invalid.
+ """
+ if isinstance(attr, ET.Element):
+ attr = attr.text
+ try:
+ attr = attr.upper() # type: ignore
+ match = Deserializer.valid_date.match(attr)
+ if not match:
+ raise ValueError("Invalid datetime string: " + attr)
+
+ check_decimal = attr.split(".")
+ if len(check_decimal) > 1:
+ decimal_str = ""
+ for digit in check_decimal[1]:
+ if digit.isdigit():
+ decimal_str += digit
+ else:
+ break
+ if len(decimal_str) > 6:
+ attr = attr.replace(decimal_str, decimal_str[0:6])
+
+ date_obj = isodate.parse_datetime(attr)
+ test_utc = date_obj.utctimetuple()
+ if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
+ raise OverflowError("Hit max or min date")
+ except (ValueError, OverflowError, AttributeError) as err:
+ msg = "Cannot deserialize datetime object."
+ raise DeserializationError(msg) from err
+ return date_obj
+
+ @staticmethod
+ def deserialize_unix(attr):
+ """Serialize Datetime object into IntTime format.
+ This is represented as seconds.
+
+ :param int attr: Object to be serialized.
+ :return: Deserialized datetime
+ :rtype: Datetime
+ :raises DeserializationError: if format invalid
+ """
+ if isinstance(attr, ET.Element):
+ attr = int(attr.text) # type: ignore
+ try:
+ attr = int(attr)
+ date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC)
+ except ValueError as err:
+ msg = "Cannot deserialize to unix datetime object."
+ raise DeserializationError(msg) from err
+ return date_obj
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/__init__.py
new file mode 100644
index 000000000000..49bdcf3683e2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/__init__.py
@@ -0,0 +1,858 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wrong-import-position
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ._patch import * # pylint: disable=unused-wildcard-import
+
+
+from ._models import ( # type: ignore
+ A2APreviewTool,
+ A2AToolCall,
+ A2AToolCallOutput,
+ AISearchIndexResource,
+ AgentReference,
+ Annotation,
+ ApiErrorResponse,
+ ApplyPatchCreateFileOperation,
+ ApplyPatchCreateFileOperationParam,
+ ApplyPatchDeleteFileOperation,
+ ApplyPatchDeleteFileOperationParam,
+ ApplyPatchFileOperation,
+ ApplyPatchOperationParam,
+ ApplyPatchToolCallItemParam,
+ ApplyPatchToolCallOutputItemParam,
+ ApplyPatchToolParam,
+ ApplyPatchUpdateFileOperation,
+ ApplyPatchUpdateFileOperationParam,
+ ApproximateLocation,
+ AutoCodeInterpreterToolParam,
+ AzureAISearchTool,
+ AzureAISearchToolCall,
+ AzureAISearchToolCallOutput,
+ AzureAISearchToolResource,
+ AzureFunctionBinding,
+ AzureFunctionDefinition,
+ AzureFunctionDefinitionFunction,
+ AzureFunctionStorageQueue,
+ AzureFunctionTool,
+ AzureFunctionToolCall,
+ AzureFunctionToolCallOutput,
+ BingCustomSearchConfiguration,
+ BingCustomSearchPreviewTool,
+ BingCustomSearchToolCall,
+ BingCustomSearchToolCallOutput,
+ BingCustomSearchToolParameters,
+ BingGroundingSearchConfiguration,
+ BingGroundingSearchToolParameters,
+ BingGroundingTool,
+ BingGroundingToolCall,
+ BingGroundingToolCallOutput,
+ BrowserAutomationPreviewTool,
+ BrowserAutomationToolCall,
+ BrowserAutomationToolCallOutput,
+ BrowserAutomationToolConnectionParameters,
+ BrowserAutomationToolParameters,
+ CaptureStructuredOutputsTool,
+ ChatSummaryMemoryItem,
+ ClickParam,
+ CodeInterpreterOutputImage,
+ CodeInterpreterOutputLogs,
+ CodeInterpreterTool,
+ CompactResource,
+ CompactionSummaryItemParam,
+ ComparisonFilter,
+ CompoundFilter,
+ ComputerAction,
+ ComputerCallOutputItemParam,
+ ComputerCallSafetyCheckParam,
+ ComputerScreenshotContent,
+ ComputerScreenshotImage,
+ ComputerUsePreviewTool,
+ ContainerAutoParam,
+ ContainerFileCitationBody,
+ ContainerNetworkPolicyAllowlistParam,
+ ContainerNetworkPolicyDisabledParam,
+ ContainerNetworkPolicyDomainSecretParam,
+ ContainerNetworkPolicyParam,
+ ContainerReferenceResource,
+ ContainerSkill,
+ ContextManagementParam,
+ ConversationParam_2,
+ ConversationReference,
+ CoordParam,
+ CreateResponse,
+ CustomGrammarFormatParam,
+ CustomTextFormatParam,
+ CustomToolParam,
+ CustomToolParamFormat,
+ DeleteResponseResult,
+ DoubleClickAction,
+ DragParam,
+ Error,
+ FabricDataAgentToolCall,
+ FabricDataAgentToolCallOutput,
+ FabricDataAgentToolParameters,
+ FileCitationBody,
+ FilePath,
+ FileSearchTool,
+ FileSearchToolCallResults,
+ FunctionAndCustomToolCallOutput,
+ FunctionAndCustomToolCallOutputInputFileContent,
+ FunctionAndCustomToolCallOutputInputImageContent,
+ FunctionAndCustomToolCallOutputInputTextContent,
+ FunctionCallOutputItemParam,
+ FunctionShellAction,
+ FunctionShellActionParam,
+ FunctionShellCallEnvironment,
+ FunctionShellCallItemParam,
+ FunctionShellCallItemParamEnvironment,
+ FunctionShellCallItemParamEnvironmentContainerReferenceParam,
+ FunctionShellCallItemParamEnvironmentLocalEnvironmentParam,
+ FunctionShellCallOutputContent,
+ FunctionShellCallOutputContentParam,
+ FunctionShellCallOutputExitOutcome,
+ FunctionShellCallOutputExitOutcomeParam,
+ FunctionShellCallOutputItemParam,
+ FunctionShellCallOutputOutcome,
+ FunctionShellCallOutputOutcomeParam,
+ FunctionShellCallOutputTimeoutOutcome,
+ FunctionShellCallOutputTimeoutOutcomeParam,
+ FunctionShellToolParam,
+ FunctionShellToolParamEnvironment,
+ FunctionShellToolParamEnvironmentContainerReferenceParam,
+ FunctionShellToolParamEnvironmentLocalEnvironmentParam,
+ FunctionTool,
+ FunctionToolCallOutput,
+ FunctionToolCallOutputResource,
+ HybridSearchOptions,
+ ImageGenTool,
+ ImageGenToolInputImageMask,
+ InlineSkillParam,
+ InlineSkillSourceParam,
+ InputFileContent,
+ InputFileContentParam,
+ InputImageContent,
+ InputImageContentParamAutoParam,
+ InputTextContent,
+ InputTextContentParam,
+ Item,
+ ItemCodeInterpreterToolCall,
+ ItemComputerToolCall,
+ ItemCustomToolCall,
+ ItemCustomToolCallOutput,
+ ItemField,
+ ItemFieldApplyPatchToolCall,
+ ItemFieldApplyPatchToolCallOutput,
+ ItemFieldCodeInterpreterToolCall,
+ ItemFieldCompactionBody,
+ ItemFieldComputerToolCall,
+ ItemFieldComputerToolCallOutputResource,
+ ItemFieldCustomToolCall,
+ ItemFieldCustomToolCallOutput,
+ ItemFieldFileSearchToolCall,
+ ItemFieldFunctionShellCall,
+ ItemFieldFunctionShellCallOutput,
+ ItemFieldFunctionToolCall,
+ ItemFieldImageGenToolCall,
+ ItemFieldLocalShellToolCall,
+ ItemFieldLocalShellToolCallOutput,
+ ItemFieldMcpApprovalRequest,
+ ItemFieldMcpApprovalResponseResource,
+ ItemFieldMcpListTools,
+ ItemFieldMcpToolCall,
+ ItemFieldMessage,
+ ItemFieldReasoningItem,
+ ItemFieldWebSearchToolCall,
+ ItemFileSearchToolCall,
+ ItemFunctionToolCall,
+ ItemImageGenToolCall,
+ ItemLocalShellToolCall,
+ ItemLocalShellToolCallOutput,
+ ItemMcpApprovalRequest,
+ ItemMcpListTools,
+ ItemMcpToolCall,
+ ItemMessage,
+ ItemOutputMessage,
+ ItemReasoningItem,
+ ItemReferenceParam,
+ ItemWebSearchToolCall,
+ KeyPressAction,
+ LocalEnvironmentResource,
+ LocalShellExecAction,
+ LocalShellToolParam,
+ LocalSkillParam,
+ LogProb,
+ MCPApprovalResponse,
+ MCPListToolsTool,
+ MCPListToolsToolAnnotations,
+ MCPListToolsToolInputSchema,
+ MCPTool,
+ MCPToolFilter,
+ MCPToolRequireApproval,
+ MemoryItem,
+ MemorySearchItem,
+ MemorySearchOptions,
+ MemorySearchPreviewTool,
+ MemorySearchToolCallItemParam,
+ MemorySearchToolCallItemResource,
+ MessageContent,
+ MessageContentInputFileContent,
+ MessageContentInputImageContent,
+ MessageContentInputTextContent,
+ MessageContentOutputTextContent,
+ MessageContentReasoningTextContent,
+ MessageContentRefusalContent,
+ Metadata,
+ MicrosoftFabricPreviewTool,
+ MoveParam,
+ OAuthConsentRequestOutputItem,
+ OpenApiAnonymousAuthDetails,
+ OpenApiAuthDetails,
+ OpenApiFunctionDefinition,
+ OpenApiFunctionDefinitionFunction,
+ OpenApiManagedAuthDetails,
+ OpenApiManagedSecurityScheme,
+ OpenApiProjectConnectionAuthDetails,
+ OpenApiProjectConnectionSecurityScheme,
+ OpenApiTool,
+ OpenApiToolCall,
+ OpenApiToolCallOutput,
+ OutputContent,
+ OutputContentOutputTextContent,
+ OutputContentReasoningTextContent,
+ OutputContentRefusalContent,
+ OutputItem,
+ OutputItemApplyPatchToolCall,
+ OutputItemApplyPatchToolCallOutput,
+ OutputItemCodeInterpreterToolCall,
+ OutputItemCompactionBody,
+ OutputItemComputerToolCall,
+ OutputItemComputerToolCallOutputResource,
+ OutputItemCustomToolCall,
+ OutputItemCustomToolCallOutput,
+ OutputItemFileSearchToolCall,
+ OutputItemFunctionShellCall,
+ OutputItemFunctionShellCallOutput,
+ OutputItemFunctionToolCall,
+ OutputItemImageGenToolCall,
+ OutputItemLocalShellToolCall,
+ OutputItemLocalShellToolCallOutput,
+ OutputItemMcpApprovalRequest,
+ OutputItemMcpApprovalResponseResource,
+ OutputItemMcpListTools,
+ OutputItemMcpToolCall,
+ OutputItemMessage,
+ OutputItemOutputMessage,
+ OutputItemReasoningItem,
+ OutputItemWebSearchToolCall,
+ OutputMessageContent,
+ OutputMessageContentOutputTextContent,
+ OutputMessageContentRefusalContent,
+ Prompt,
+ RankingOptions,
+ Reasoning,
+ ReasoningTextContent,
+ ResponseAudioDeltaEvent,
+ ResponseAudioDoneEvent,
+ ResponseAudioTranscriptDeltaEvent,
+ ResponseAudioTranscriptDoneEvent,
+ ResponseCodeInterpreterCallCodeDeltaEvent,
+ ResponseCodeInterpreterCallCodeDoneEvent,
+ ResponseCodeInterpreterCallCompletedEvent,
+ ResponseCodeInterpreterCallInProgressEvent,
+ ResponseCodeInterpreterCallInterpretingEvent,
+ ResponseCompletedEvent,
+ ResponseContentPartAddedEvent,
+ ResponseContentPartDoneEvent,
+ ResponseCreatedEvent,
+ ResponseCustomToolCallInputDeltaEvent,
+ ResponseCustomToolCallInputDoneEvent,
+ ResponseErrorEvent,
+ ResponseErrorInfo,
+ ResponseFailedEvent,
+ ResponseFileSearchCallCompletedEvent,
+ ResponseFileSearchCallInProgressEvent,
+ ResponseFileSearchCallSearchingEvent,
+ ResponseFormatJsonSchemaSchema,
+ ResponseFunctionCallArgumentsDeltaEvent,
+ ResponseFunctionCallArgumentsDoneEvent,
+ ResponseImageGenCallCompletedEvent,
+ ResponseImageGenCallGeneratingEvent,
+ ResponseImageGenCallInProgressEvent,
+ ResponseImageGenCallPartialImageEvent,
+ ResponseInProgressEvent,
+ ResponseIncompleteDetails,
+ ResponseIncompleteEvent,
+ ResponseLogProb,
+ ResponseLogProbTopLogprobs,
+ ResponseMCPCallArgumentsDeltaEvent,
+ ResponseMCPCallArgumentsDoneEvent,
+ ResponseMCPCallCompletedEvent,
+ ResponseMCPCallFailedEvent,
+ ResponseMCPCallInProgressEvent,
+ ResponseMCPListToolsCompletedEvent,
+ ResponseMCPListToolsFailedEvent,
+ ResponseMCPListToolsInProgressEvent,
+ ResponseObject,
+ ResponseOutputItemAddedEvent,
+ ResponseOutputItemDoneEvent,
+ ResponseOutputTextAnnotationAddedEvent,
+ ResponsePromptVariables,
+ ResponseQueuedEvent,
+ ResponseReasoningSummaryPartAddedEvent,
+ ResponseReasoningSummaryPartAddedEventPart,
+ ResponseReasoningSummaryPartDoneEvent,
+ ResponseReasoningSummaryPartDoneEventPart,
+ ResponseReasoningSummaryTextDeltaEvent,
+ ResponseReasoningSummaryTextDoneEvent,
+ ResponseReasoningTextDeltaEvent,
+ ResponseReasoningTextDoneEvent,
+ ResponseRefusalDeltaEvent,
+ ResponseRefusalDoneEvent,
+ ResponseStreamEvent,
+ ResponseStreamOptions,
+ ResponseTextDeltaEvent,
+ ResponseTextDoneEvent,
+ ResponseTextParam,
+ ResponseUsage,
+ ResponseUsageInputTokensDetails,
+ ResponseUsageOutputTokensDetails,
+ ResponseWebSearchCallCompletedEvent,
+ ResponseWebSearchCallInProgressEvent,
+ ResponseWebSearchCallSearchingEvent,
+ ScreenshotParam,
+ ScrollParam,
+ SharepointGroundingToolCall,
+ SharepointGroundingToolCallOutput,
+ SharepointGroundingToolParameters,
+ SharepointPreviewTool,
+ SkillReferenceParam,
+ SpecificApplyPatchParam,
+ SpecificFunctionShellParam,
+ StructuredOutputDefinition,
+ StructuredOutputsOutputItem,
+ SummaryTextContent,
+ TextContent,
+ TextResponseFormatConfiguration,
+ TextResponseFormatConfigurationResponseFormatJsonObject,
+ TextResponseFormatConfigurationResponseFormatText,
+ TextResponseFormatJsonSchema,
+ Tool,
+ ToolChoiceAllowed,
+ ToolChoiceCodeInterpreter,
+ ToolChoiceComputerUsePreview,
+ ToolChoiceCustom,
+ ToolChoiceFileSearch,
+ ToolChoiceFunction,
+ ToolChoiceImageGeneration,
+ ToolChoiceMCP,
+ ToolChoiceParam,
+ ToolChoiceWebSearchPreview,
+ ToolChoiceWebSearchPreview20250311,
+ ToolProjectConnection,
+ TopLogProb,
+ TypeParam,
+ UrlCitationBody,
+ UserProfileMemoryItem,
+ VectorStoreFileAttributes,
+ WaitParam,
+ WebSearchActionFind,
+ WebSearchActionOpenPage,
+ WebSearchActionSearch,
+ WebSearchActionSearchSources,
+ WebSearchApproximateLocation,
+ WebSearchConfiguration,
+ WebSearchPreviewTool,
+ WebSearchTool,
+ WebSearchToolFilters,
+ WorkIQPreviewTool,
+ WorkIQPreviewToolParameters,
+ WorkflowActionOutputItem,
+)
+
+from ._enums import ( # type: ignore
+ AnnotationType,
+ ApplyPatchCallOutputStatus,
+ ApplyPatchCallOutputStatusParam,
+ ApplyPatchCallStatus,
+ ApplyPatchCallStatusParam,
+ ApplyPatchFileOperationType,
+ ApplyPatchOperationParamType,
+ AzureAISearchQueryType,
+ ClickButtonType,
+ ComputerActionType,
+ ComputerEnvironment,
+ ContainerMemoryLimit,
+ ContainerNetworkPolicyParamType,
+ ContainerSkillType,
+ CustomToolParamFormatType,
+ DetailEnum,
+ FunctionAndCustomToolCallOutputType,
+ FunctionCallItemStatus,
+ FunctionShellCallEnvironmentType,
+ FunctionShellCallItemParamEnvironmentType,
+ FunctionShellCallItemStatus,
+ FunctionShellCallOutputOutcomeParamType,
+ FunctionShellCallOutputOutcomeType,
+ FunctionShellToolParamEnvironmentType,
+ GrammarSyntax1,
+ ImageDetail,
+ ImageGenActionEnum,
+ IncludeEnum,
+ InputFidelity,
+ ItemFieldType,
+ ItemType,
+ LocalShellCallOutputStatusEnum,
+ LocalShellCallStatus,
+ MCPToolCallStatus,
+ MemoryItemKind,
+ MessageContentType,
+ MessageRole,
+ MessageStatus,
+ ModelIdsCompaction,
+ OpenApiAuthType,
+ OutputContentType,
+ OutputItemType,
+ OutputMessageContentType,
+ PageOrder,
+ RankerVersionType,
+ ResponseErrorCode,
+ ResponseStreamEventType,
+ SearchContextSize,
+ TextResponseFormatConfigurationType,
+ ToolCallStatus,
+ ToolChoiceOptions,
+ ToolChoiceParamType,
+ ToolType,
+)
+from ._patch import __all__ as _patch_all
+from ._patch import *
+from ._patch import patch_sdk as _patch_sdk
+
+__all__ = [
+ "A2APreviewTool",
+ "A2AToolCall",
+ "A2AToolCallOutput",
+ "AISearchIndexResource",
+ "AgentReference",
+ "Annotation",
+ "ApiErrorResponse",
+ "ApplyPatchCreateFileOperation",
+ "ApplyPatchCreateFileOperationParam",
+ "ApplyPatchDeleteFileOperation",
+ "ApplyPatchDeleteFileOperationParam",
+ "ApplyPatchFileOperation",
+ "ApplyPatchOperationParam",
+ "ApplyPatchToolCallItemParam",
+ "ApplyPatchToolCallOutputItemParam",
+ "ApplyPatchToolParam",
+ "ApplyPatchUpdateFileOperation",
+ "ApplyPatchUpdateFileOperationParam",
+ "ApproximateLocation",
+ "AutoCodeInterpreterToolParam",
+ "AzureAISearchTool",
+ "AzureAISearchToolCall",
+ "AzureAISearchToolCallOutput",
+ "AzureAISearchToolResource",
+ "AzureFunctionBinding",
+ "AzureFunctionDefinition",
+ "AzureFunctionDefinitionFunction",
+ "AzureFunctionStorageQueue",
+ "AzureFunctionTool",
+ "AzureFunctionToolCall",
+ "AzureFunctionToolCallOutput",
+ "BingCustomSearchConfiguration",
+ "BingCustomSearchPreviewTool",
+ "BingCustomSearchToolCall",
+ "BingCustomSearchToolCallOutput",
+ "BingCustomSearchToolParameters",
+ "BingGroundingSearchConfiguration",
+ "BingGroundingSearchToolParameters",
+ "BingGroundingTool",
+ "BingGroundingToolCall",
+ "BingGroundingToolCallOutput",
+ "BrowserAutomationPreviewTool",
+ "BrowserAutomationToolCall",
+ "BrowserAutomationToolCallOutput",
+ "BrowserAutomationToolConnectionParameters",
+ "BrowserAutomationToolParameters",
+ "CaptureStructuredOutputsTool",
+ "ChatSummaryMemoryItem",
+ "ClickParam",
+ "CodeInterpreterOutputImage",
+ "CodeInterpreterOutputLogs",
+ "CodeInterpreterTool",
+ "CompactResource",
+ "CompactionSummaryItemParam",
+ "ComparisonFilter",
+ "CompoundFilter",
+ "ComputerAction",
+ "ComputerCallOutputItemParam",
+ "ComputerCallSafetyCheckParam",
+ "ComputerScreenshotContent",
+ "ComputerScreenshotImage",
+ "ComputerUsePreviewTool",
+ "ContainerAutoParam",
+ "ContainerFileCitationBody",
+ "ContainerNetworkPolicyAllowlistParam",
+ "ContainerNetworkPolicyDisabledParam",
+ "ContainerNetworkPolicyDomainSecretParam",
+ "ContainerNetworkPolicyParam",
+ "ContainerReferenceResource",
+ "ContainerSkill",
+ "ContextManagementParam",
+ "ConversationParam_2",
+ "ConversationReference",
+ "CoordParam",
+ "CreateResponse",
+ "CustomGrammarFormatParam",
+ "CustomTextFormatParam",
+ "CustomToolParam",
+ "CustomToolParamFormat",
+ "DeleteResponseResult",
+ "DoubleClickAction",
+ "DragParam",
+ "Error",
+ "FabricDataAgentToolCall",
+ "FabricDataAgentToolCallOutput",
+ "FabricDataAgentToolParameters",
+ "FileCitationBody",
+ "FilePath",
+ "FileSearchTool",
+ "FileSearchToolCallResults",
+ "FunctionAndCustomToolCallOutput",
+ "FunctionAndCustomToolCallOutputInputFileContent",
+ "FunctionAndCustomToolCallOutputInputImageContent",
+ "FunctionAndCustomToolCallOutputInputTextContent",
+ "FunctionCallOutputItemParam",
+ "FunctionShellAction",
+ "FunctionShellActionParam",
+ "FunctionShellCallEnvironment",
+ "FunctionShellCallItemParam",
+ "FunctionShellCallItemParamEnvironment",
+ "FunctionShellCallItemParamEnvironmentContainerReferenceParam",
+ "FunctionShellCallItemParamEnvironmentLocalEnvironmentParam",
+ "FunctionShellCallOutputContent",
+ "FunctionShellCallOutputContentParam",
+ "FunctionShellCallOutputExitOutcome",
+ "FunctionShellCallOutputExitOutcomeParam",
+ "FunctionShellCallOutputItemParam",
+ "FunctionShellCallOutputOutcome",
+ "FunctionShellCallOutputOutcomeParam",
+ "FunctionShellCallOutputTimeoutOutcome",
+ "FunctionShellCallOutputTimeoutOutcomeParam",
+ "FunctionShellToolParam",
+ "FunctionShellToolParamEnvironment",
+ "FunctionShellToolParamEnvironmentContainerReferenceParam",
+ "FunctionShellToolParamEnvironmentLocalEnvironmentParam",
+ "FunctionTool",
+ "FunctionToolCallOutput",
+ "FunctionToolCallOutputResource",
+ "HybridSearchOptions",
+ "ImageGenTool",
+ "ImageGenToolInputImageMask",
+ "InlineSkillParam",
+ "InlineSkillSourceParam",
+ "InputFileContent",
+ "InputFileContentParam",
+ "InputImageContent",
+ "InputImageContentParamAutoParam",
+ "InputTextContent",
+ "InputTextContentParam",
+ "Item",
+ "ItemCodeInterpreterToolCall",
+ "ItemComputerToolCall",
+ "ItemCustomToolCall",
+ "ItemCustomToolCallOutput",
+ "ItemField",
+ "ItemFieldApplyPatchToolCall",
+ "ItemFieldApplyPatchToolCallOutput",
+ "ItemFieldCodeInterpreterToolCall",
+ "ItemFieldCompactionBody",
+ "ItemFieldComputerToolCall",
+ "ItemFieldComputerToolCallOutputResource",
+ "ItemFieldCustomToolCall",
+ "ItemFieldCustomToolCallOutput",
+ "ItemFieldFileSearchToolCall",
+ "ItemFieldFunctionShellCall",
+ "ItemFieldFunctionShellCallOutput",
+ "ItemFieldFunctionToolCall",
+ "ItemFieldImageGenToolCall",
+ "ItemFieldLocalShellToolCall",
+ "ItemFieldLocalShellToolCallOutput",
+ "ItemFieldMcpApprovalRequest",
+ "ItemFieldMcpApprovalResponseResource",
+ "ItemFieldMcpListTools",
+ "ItemFieldMcpToolCall",
+ "ItemFieldMessage",
+ "ItemFieldReasoningItem",
+ "ItemFieldWebSearchToolCall",
+ "ItemFileSearchToolCall",
+ "ItemFunctionToolCall",
+ "ItemImageGenToolCall",
+ "ItemLocalShellToolCall",
+ "ItemLocalShellToolCallOutput",
+ "ItemMcpApprovalRequest",
+ "ItemMcpListTools",
+ "ItemMcpToolCall",
+ "ItemMessage",
+ "ItemOutputMessage",
+ "ItemReasoningItem",
+ "ItemReferenceParam",
+ "ItemWebSearchToolCall",
+ "KeyPressAction",
+ "LocalEnvironmentResource",
+ "LocalShellExecAction",
+ "LocalShellToolParam",
+ "LocalSkillParam",
+ "LogProb",
+ "MCPApprovalResponse",
+ "MCPListToolsTool",
+ "MCPListToolsToolAnnotations",
+ "MCPListToolsToolInputSchema",
+ "MCPTool",
+ "MCPToolFilter",
+ "MCPToolRequireApproval",
+ "MemoryItem",
+ "MemorySearchItem",
+ "MemorySearchOptions",
+ "MemorySearchPreviewTool",
+ "MemorySearchToolCallItemParam",
+ "MemorySearchToolCallItemResource",
+ "MessageContent",
+ "MessageContentInputFileContent",
+ "MessageContentInputImageContent",
+ "MessageContentInputTextContent",
+ "MessageContentOutputTextContent",
+ "MessageContentReasoningTextContent",
+ "MessageContentRefusalContent",
+ "Metadata",
+ "MicrosoftFabricPreviewTool",
+ "MoveParam",
+ "OAuthConsentRequestOutputItem",
+ "OpenApiAnonymousAuthDetails",
+ "OpenApiAuthDetails",
+ "OpenApiFunctionDefinition",
+ "OpenApiFunctionDefinitionFunction",
+ "OpenApiManagedAuthDetails",
+ "OpenApiManagedSecurityScheme",
+ "OpenApiProjectConnectionAuthDetails",
+ "OpenApiProjectConnectionSecurityScheme",
+ "OpenApiTool",
+ "OpenApiToolCall",
+ "OpenApiToolCallOutput",
+ "OutputContent",
+ "OutputContentOutputTextContent",
+ "OutputContentReasoningTextContent",
+ "OutputContentRefusalContent",
+ "OutputItem",
+ "OutputItemApplyPatchToolCall",
+ "OutputItemApplyPatchToolCallOutput",
+ "OutputItemCodeInterpreterToolCall",
+ "OutputItemCompactionBody",
+ "OutputItemComputerToolCall",
+ "OutputItemComputerToolCallOutputResource",
+ "OutputItemCustomToolCall",
+ "OutputItemCustomToolCallOutput",
+ "OutputItemFileSearchToolCall",
+ "OutputItemFunctionShellCall",
+ "OutputItemFunctionShellCallOutput",
+ "OutputItemFunctionToolCall",
+ "OutputItemImageGenToolCall",
+ "OutputItemLocalShellToolCall",
+ "OutputItemLocalShellToolCallOutput",
+ "OutputItemMcpApprovalRequest",
+ "OutputItemMcpApprovalResponseResource",
+ "OutputItemMcpListTools",
+ "OutputItemMcpToolCall",
+ "OutputItemMessage",
+ "OutputItemOutputMessage",
+ "OutputItemReasoningItem",
+ "OutputItemWebSearchToolCall",
+ "OutputMessageContent",
+ "OutputMessageContentOutputTextContent",
+ "OutputMessageContentRefusalContent",
+ "Prompt",
+ "RankingOptions",
+ "Reasoning",
+ "ReasoningTextContent",
+ "ResponseAudioDeltaEvent",
+ "ResponseAudioDoneEvent",
+ "ResponseAudioTranscriptDeltaEvent",
+ "ResponseAudioTranscriptDoneEvent",
+ "ResponseCodeInterpreterCallCodeDeltaEvent",
+ "ResponseCodeInterpreterCallCodeDoneEvent",
+ "ResponseCodeInterpreterCallCompletedEvent",
+ "ResponseCodeInterpreterCallInProgressEvent",
+ "ResponseCodeInterpreterCallInterpretingEvent",
+ "ResponseCompletedEvent",
+ "ResponseContentPartAddedEvent",
+ "ResponseContentPartDoneEvent",
+ "ResponseCreatedEvent",
+ "ResponseCustomToolCallInputDeltaEvent",
+ "ResponseCustomToolCallInputDoneEvent",
+ "ResponseErrorEvent",
+ "ResponseErrorInfo",
+ "ResponseFailedEvent",
+ "ResponseFileSearchCallCompletedEvent",
+ "ResponseFileSearchCallInProgressEvent",
+ "ResponseFileSearchCallSearchingEvent",
+ "ResponseFormatJsonSchemaSchema",
+ "ResponseFunctionCallArgumentsDeltaEvent",
+ "ResponseFunctionCallArgumentsDoneEvent",
+ "ResponseImageGenCallCompletedEvent",
+ "ResponseImageGenCallGeneratingEvent",
+ "ResponseImageGenCallInProgressEvent",
+ "ResponseImageGenCallPartialImageEvent",
+ "ResponseInProgressEvent",
+ "ResponseIncompleteDetails",
+ "ResponseIncompleteEvent",
+ "ResponseLogProb",
+ "ResponseLogProbTopLogprobs",
+ "ResponseMCPCallArgumentsDeltaEvent",
+ "ResponseMCPCallArgumentsDoneEvent",
+ "ResponseMCPCallCompletedEvent",
+ "ResponseMCPCallFailedEvent",
+ "ResponseMCPCallInProgressEvent",
+ "ResponseMCPListToolsCompletedEvent",
+ "ResponseMCPListToolsFailedEvent",
+ "ResponseMCPListToolsInProgressEvent",
+ "ResponseObject",
+ "ResponseOutputItemAddedEvent",
+ "ResponseOutputItemDoneEvent",
+ "ResponseOutputTextAnnotationAddedEvent",
+ "ResponsePromptVariables",
+ "ResponseQueuedEvent",
+ "ResponseReasoningSummaryPartAddedEvent",
+ "ResponseReasoningSummaryPartAddedEventPart",
+ "ResponseReasoningSummaryPartDoneEvent",
+ "ResponseReasoningSummaryPartDoneEventPart",
+ "ResponseReasoningSummaryTextDeltaEvent",
+ "ResponseReasoningSummaryTextDoneEvent",
+ "ResponseReasoningTextDeltaEvent",
+ "ResponseReasoningTextDoneEvent",
+ "ResponseRefusalDeltaEvent",
+ "ResponseRefusalDoneEvent",
+ "ResponseStreamEvent",
+ "ResponseStreamOptions",
+ "ResponseTextDeltaEvent",
+ "ResponseTextDoneEvent",
+ "ResponseTextParam",
+ "ResponseUsage",
+ "ResponseUsageInputTokensDetails",
+ "ResponseUsageOutputTokensDetails",
+ "ResponseWebSearchCallCompletedEvent",
+ "ResponseWebSearchCallInProgressEvent",
+ "ResponseWebSearchCallSearchingEvent",
+ "ScreenshotParam",
+ "ScrollParam",
+ "SharepointGroundingToolCall",
+ "SharepointGroundingToolCallOutput",
+ "SharepointGroundingToolParameters",
+ "SharepointPreviewTool",
+ "SkillReferenceParam",
+ "SpecificApplyPatchParam",
+ "SpecificFunctionShellParam",
+ "StructuredOutputDefinition",
+ "StructuredOutputsOutputItem",
+ "SummaryTextContent",
+ "TextContent",
+ "TextResponseFormatConfiguration",
+ "TextResponseFormatConfigurationResponseFormatJsonObject",
+ "TextResponseFormatConfigurationResponseFormatText",
+ "TextResponseFormatJsonSchema",
+ "Tool",
+ "ToolChoiceAllowed",
+ "ToolChoiceCodeInterpreter",
+ "ToolChoiceComputerUsePreview",
+ "ToolChoiceCustom",
+ "ToolChoiceFileSearch",
+ "ToolChoiceFunction",
+ "ToolChoiceImageGeneration",
+ "ToolChoiceMCP",
+ "ToolChoiceParam",
+ "ToolChoiceWebSearchPreview",
+ "ToolChoiceWebSearchPreview20250311",
+ "ToolProjectConnection",
+ "TopLogProb",
+ "TypeParam",
+ "UrlCitationBody",
+ "UserProfileMemoryItem",
+ "VectorStoreFileAttributes",
+ "WaitParam",
+ "WebSearchActionFind",
+ "WebSearchActionOpenPage",
+ "WebSearchActionSearch",
+ "WebSearchActionSearchSources",
+ "WebSearchApproximateLocation",
+ "WebSearchConfiguration",
+ "WebSearchPreviewTool",
+ "WebSearchTool",
+ "WebSearchToolFilters",
+ "WorkIQPreviewTool",
+ "WorkIQPreviewToolParameters",
+ "WorkflowActionOutputItem",
+ "AnnotationType",
+ "ApplyPatchCallOutputStatus",
+ "ApplyPatchCallOutputStatusParam",
+ "ApplyPatchCallStatus",
+ "ApplyPatchCallStatusParam",
+ "ApplyPatchFileOperationType",
+ "ApplyPatchOperationParamType",
+ "AzureAISearchQueryType",
+ "ClickButtonType",
+ "ComputerActionType",
+ "ComputerEnvironment",
+ "ContainerMemoryLimit",
+ "ContainerNetworkPolicyParamType",
+ "ContainerSkillType",
+ "CustomToolParamFormatType",
+ "DetailEnum",
+ "FunctionAndCustomToolCallOutputType",
+ "FunctionCallItemStatus",
+ "FunctionShellCallEnvironmentType",
+ "FunctionShellCallItemParamEnvironmentType",
+ "FunctionShellCallItemStatus",
+ "FunctionShellCallOutputOutcomeParamType",
+ "FunctionShellCallOutputOutcomeType",
+ "FunctionShellToolParamEnvironmentType",
+ "GrammarSyntax1",
+ "ImageDetail",
+ "ImageGenActionEnum",
+ "IncludeEnum",
+ "InputFidelity",
+ "ItemFieldType",
+ "ItemType",
+ "LocalShellCallOutputStatusEnum",
+ "LocalShellCallStatus",
+ "MCPToolCallStatus",
+ "MemoryItemKind",
+ "MessageContentType",
+ "MessageRole",
+ "MessageStatus",
+ "ModelIdsCompaction",
+ "OpenApiAuthType",
+ "OutputContentType",
+ "OutputItemType",
+ "OutputMessageContentType",
+ "PageOrder",
+ "RankerVersionType",
+ "ResponseErrorCode",
+ "ResponseStreamEventType",
+ "SearchContextSize",
+ "TextResponseFormatConfigurationType",
+ "ToolCallStatus",
+ "ToolChoiceOptions",
+ "ToolChoiceParamType",
+ "ToolType",
+]
+__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore
+_patch_sdk()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_enums.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_enums.py
new file mode 100644
index 000000000000..4ac334096a58
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_enums.py
@@ -0,0 +1,1226 @@
+# pylint: disable=too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+from azure.core import CaseInsensitiveEnumMeta
+
+
+class AnnotationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of AnnotationType."""
+
+ FILE_CITATION = "file_citation"
+ """FILE_CITATION."""
+ URL_CITATION = "url_citation"
+ """URL_CITATION."""
+ CONTAINER_FILE_CITATION = "container_file_citation"
+ """CONTAINER_FILE_CITATION."""
+ FILE_PATH = "file_path"
+ """FILE_PATH."""
+
+
+class ApplyPatchCallOutputStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ApplyPatchCallOutputStatus."""
+
+ COMPLETED = "completed"
+ """COMPLETED."""
+ FAILED = "failed"
+ """FAILED."""
+
+
+class ApplyPatchCallOutputStatusParam(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Apply patch call output status."""
+
+ COMPLETED = "completed"
+ """COMPLETED."""
+ FAILED = "failed"
+ """FAILED."""
+
+
+class ApplyPatchCallStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ApplyPatchCallStatus."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+
+
+class ApplyPatchCallStatusParam(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Apply patch call status."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+
+
+class ApplyPatchFileOperationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ApplyPatchFileOperationType."""
+
+ CREATE_FILE = "create_file"
+ """CREATE_FILE."""
+ DELETE_FILE = "delete_file"
+ """DELETE_FILE."""
+ UPDATE_FILE = "update_file"
+ """UPDATE_FILE."""
+
+
+class ApplyPatchOperationParamType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ApplyPatchOperationParamType."""
+
+ CREATE_FILE = "create_file"
+ """CREATE_FILE."""
+ DELETE_FILE = "delete_file"
+ """DELETE_FILE."""
+ UPDATE_FILE = "update_file"
+ """UPDATE_FILE."""
+
+
+class AzureAISearchQueryType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Available query types for Azure AI Search tool."""
+
+ SIMPLE = "simple"
+ """Query type ``simple``."""
+ SEMANTIC = "semantic"
+ """Query type ``semantic``."""
+ VECTOR = "vector"
+ """Query type ``vector``."""
+ VECTOR_SIMPLE_HYBRID = "vector_simple_hybrid"
+ """Query type ``vector_simple_hybrid``."""
+ VECTOR_SEMANTIC_HYBRID = "vector_semantic_hybrid"
+ """Query type ``vector_semantic_hybrid``."""
+
+
+class ClickButtonType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ClickButtonType."""
+
+ LEFT = "left"
+ """LEFT."""
+ RIGHT = "right"
+ """RIGHT."""
+ WHEEL = "wheel"
+ """WHEEL."""
+ BACK = "back"
+ """BACK."""
+ FORWARD = "forward"
+ """FORWARD."""
+
+
+class ComputerActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ComputerActionType."""
+
+ CLICK = "click"
+ """CLICK."""
+ DOUBLE_CLICK = "double_click"
+ """DOUBLE_CLICK."""
+ DRAG = "drag"
+ """DRAG."""
+ KEYPRESS = "keypress"
+ """KEYPRESS."""
+ MOVE = "move"
+ """MOVE."""
+ SCREENSHOT = "screenshot"
+ """SCREENSHOT."""
+ SCROLL = "scroll"
+ """SCROLL."""
+ TYPE = "type"
+ """TYPE."""
+ WAIT = "wait"
+ """WAIT."""
+
+
+class ComputerEnvironment(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ComputerEnvironment."""
+
+ WINDOWS = "windows"
+ """WINDOWS."""
+ MAC = "mac"
+ """MAC."""
+ LINUX = "linux"
+ """LINUX."""
+ UBUNTU = "ubuntu"
+ """UBUNTU."""
+ BROWSER = "browser"
+ """BROWSER."""
+
+
+class ContainerMemoryLimit(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ContainerMemoryLimit."""
+
+ ENUM_1_G = "1g"
+ """1_G."""
+ ENUM_4_G = "4g"
+ """4_G."""
+ ENUM_16_G = "16g"
+ """16_G."""
+ ENUM_64_G = "64g"
+ """64_G."""
+
+
+class ContainerNetworkPolicyParamType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ContainerNetworkPolicyParamType."""
+
+ DISABLED = "disabled"
+ """DISABLED."""
+ ALLOWLIST = "allowlist"
+ """ALLOWLIST."""
+
+
+class ContainerSkillType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ContainerSkillType."""
+
+ SKILL_REFERENCE = "skill_reference"
+ """SKILL_REFERENCE."""
+ INLINE = "inline"
+ """INLINE."""
+
+
+class CustomToolParamFormatType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of CustomToolParamFormatType."""
+
+ TEXT = "text"
+ """TEXT."""
+ GRAMMAR = "grammar"
+ """GRAMMAR."""
+
+
+class DetailEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of DetailEnum."""
+
+ LOW = "low"
+ """LOW."""
+ HIGH = "high"
+ """HIGH."""
+ AUTO = "auto"
+ """AUTO."""
+
+
+class FunctionAndCustomToolCallOutputType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of FunctionAndCustomToolCallOutputType."""
+
+ INPUT_TEXT = "input_text"
+ """INPUT_TEXT."""
+ INPUT_IMAGE = "input_image"
+ """INPUT_IMAGE."""
+ INPUT_FILE = "input_file"
+ """INPUT_FILE."""
+
+
+class FunctionCallItemStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of FunctionCallItemStatus."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+ INCOMPLETE = "incomplete"
+ """INCOMPLETE."""
+
+
+class FunctionShellCallEnvironmentType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of FunctionShellCallEnvironmentType."""
+
+ LOCAL = "local"
+ """LOCAL."""
+ CONTAINER_REFERENCE = "container_reference"
+ """CONTAINER_REFERENCE."""
+
+
+class FunctionShellCallItemParamEnvironmentType( # pylint: disable=name-too-long
+ str, Enum, metaclass=CaseInsensitiveEnumMeta
+):
+ """Type of FunctionShellCallItemParamEnvironmentType."""
+
+ LOCAL = "local"
+ """LOCAL."""
+ CONTAINER_REFERENCE = "container_reference"
+ """CONTAINER_REFERENCE."""
+
+
+class FunctionShellCallItemStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Shell call status."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+ INCOMPLETE = "incomplete"
+ """INCOMPLETE."""
+
+
+class FunctionShellCallOutputOutcomeParamType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of FunctionShellCallOutputOutcomeParamType."""
+
+ TIMEOUT = "timeout"
+ """TIMEOUT."""
+ EXIT = "exit"
+ """EXIT."""
+
+
+class FunctionShellCallOutputOutcomeType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of FunctionShellCallOutputOutcomeType."""
+
+ TIMEOUT = "timeout"
+ """TIMEOUT."""
+ EXIT = "exit"
+ """EXIT."""
+
+
+class FunctionShellToolParamEnvironmentType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of FunctionShellToolParamEnvironmentType."""
+
+ CONTAINER_AUTO = "container_auto"
+ """CONTAINER_AUTO."""
+ LOCAL = "local"
+ """LOCAL."""
+ CONTAINER_REFERENCE = "container_reference"
+ """CONTAINER_REFERENCE."""
+
+
+class GrammarSyntax1(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of GrammarSyntax1."""
+
+ LARK = "lark"
+ """LARK."""
+ REGEX = "regex"
+ """REGEX."""
+
+
+class ImageDetail(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ImageDetail."""
+
+ LOW = "low"
+ """LOW."""
+ HIGH = "high"
+ """HIGH."""
+ AUTO = "auto"
+ """AUTO."""
+
+
+class ImageGenActionEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ImageGenActionEnum."""
+
+ GENERATE = "generate"
+ """GENERATE."""
+ EDIT = "edit"
+ """EDIT."""
+ AUTO = "auto"
+ """AUTO."""
+
+
+class IncludeEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Specify additional output data to include in the model response. Currently supported values
+ are:
+
+ * `web_search_call.action.sources`: Include the sources of the web search tool call.
+ * `code_interpreter_call.outputs`: Includes the outputs of python code execution in code
+ interpreter tool call items.
+ * `computer_call_output.output.image_url`: Include image urls from the computer call output.
+ * `file_search_call.results`: Include the search results of the file search tool call.
+ * `message.input_image.image_url`: Include image urls from the input message.
+ * `message.output_text.logprobs`: Include logprobs with assistant messages.
+ * `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning
+ item outputs. This enables reasoning items to be used in multi-turn conversations when using
+ the Responses API statelessly (like when the `store` parameter is set to `false`, or when an
+ organization is enrolled in the zero data retention program).
+ """
+
+ FILE_SEARCH_CALL_RESULTS = "file_search_call.results"
+ """FILE_SEARCH_CALL_RESULTS."""
+ WEB_SEARCH_CALL_RESULTS = "web_search_call.results"
+ """WEB_SEARCH_CALL_RESULTS."""
+ WEB_SEARCH_CALL_ACTION_SOURCES = "web_search_call.action.sources"
+ """WEB_SEARCH_CALL_ACTION_SOURCES."""
+ MESSAGE_INPUT_IMAGE_IMAGE_URL = "message.input_image.image_url"
+ """MESSAGE_INPUT_IMAGE_IMAGE_URL."""
+ COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = "computer_call_output.output.image_url"
+ """COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL."""
+ CODE_INTERPRETER_CALL_OUTPUTS = "code_interpreter_call.outputs"
+ """CODE_INTERPRETER_CALL_OUTPUTS."""
+ REASONING_ENCRYPTED_CONTENT = "reasoning.encrypted_content"
+ """REASONING_ENCRYPTED_CONTENT."""
+ MESSAGE_OUTPUT_TEXT_LOGPROBS = "message.output_text.logprobs"
+ """MESSAGE_OUTPUT_TEXT_LOGPROBS."""
+ MEMORY_SEARCH_CALL_RESULTS = "memory_search_call.results"
+ """MEMORY_SEARCH_CALL_RESULTS."""
+
+
+class InputFidelity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Control how much effort the model will exert to match the style and features, especially facial
+ features, of input images. This parameter is only supported for ``gpt-image-1`` and
+ ``gpt-image-1.5`` and later models, unsupported for ``gpt-image-1-mini``. Supports ``high`` and
+ ``low``. Defaults to ``low``.
+ """
+
+ HIGH = "high"
+ """HIGH."""
+ LOW = "low"
+ """LOW."""
+
+
+class ItemFieldType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ItemFieldType."""
+
+ MESSAGE = "message"
+ """MESSAGE."""
+ FUNCTION_CALL = "function_call"
+ """FUNCTION_CALL."""
+ FUNCTION_CALL_OUTPUT = "function_call_output"
+ """FUNCTION_CALL_OUTPUT."""
+ FILE_SEARCH_CALL = "file_search_call"
+ """FILE_SEARCH_CALL."""
+ WEB_SEARCH_CALL = "web_search_call"
+ """WEB_SEARCH_CALL."""
+ IMAGE_GENERATION_CALL = "image_generation_call"
+ """IMAGE_GENERATION_CALL."""
+ COMPUTER_CALL = "computer_call"
+ """COMPUTER_CALL."""
+ COMPUTER_CALL_OUTPUT = "computer_call_output"
+ """COMPUTER_CALL_OUTPUT."""
+ REASONING = "reasoning"
+ """REASONING."""
+ COMPACTION = "compaction"
+ """COMPACTION."""
+ CODE_INTERPRETER_CALL = "code_interpreter_call"
+ """CODE_INTERPRETER_CALL."""
+ LOCAL_SHELL_CALL = "local_shell_call"
+ """LOCAL_SHELL_CALL."""
+ LOCAL_SHELL_CALL_OUTPUT = "local_shell_call_output"
+ """LOCAL_SHELL_CALL_OUTPUT."""
+ SHELL_CALL = "shell_call"
+ """SHELL_CALL."""
+ SHELL_CALL_OUTPUT = "shell_call_output"
+ """SHELL_CALL_OUTPUT."""
+ APPLY_PATCH_CALL = "apply_patch_call"
+ """APPLY_PATCH_CALL."""
+ APPLY_PATCH_CALL_OUTPUT = "apply_patch_call_output"
+ """APPLY_PATCH_CALL_OUTPUT."""
+ MCP_LIST_TOOLS = "mcp_list_tools"
+ """MCP_LIST_TOOLS."""
+ MCP_APPROVAL_REQUEST = "mcp_approval_request"
+ """MCP_APPROVAL_REQUEST."""
+ MCP_APPROVAL_RESPONSE = "mcp_approval_response"
+ """MCP_APPROVAL_RESPONSE."""
+ MCP_CALL = "mcp_call"
+ """MCP_CALL."""
+ CUSTOM_TOOL_CALL = "custom_tool_call"
+ """CUSTOM_TOOL_CALL."""
+ CUSTOM_TOOL_CALL_OUTPUT = "custom_tool_call_output"
+ """CUSTOM_TOOL_CALL_OUTPUT."""
+
+
+class ItemType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ItemType."""
+
+ MESSAGE = "message"
+ """MESSAGE."""
+ OUTPUT_MESSAGE = "output_message"
+ """OUTPUT_MESSAGE."""
+ FILE_SEARCH_CALL = "file_search_call"
+ """FILE_SEARCH_CALL."""
+ COMPUTER_CALL = "computer_call"
+ """COMPUTER_CALL."""
+ COMPUTER_CALL_OUTPUT = "computer_call_output"
+ """COMPUTER_CALL_OUTPUT."""
+ WEB_SEARCH_CALL = "web_search_call"
+ """WEB_SEARCH_CALL."""
+ FUNCTION_CALL = "function_call"
+ """FUNCTION_CALL."""
+ FUNCTION_CALL_OUTPUT = "function_call_output"
+ """FUNCTION_CALL_OUTPUT."""
+ REASONING = "reasoning"
+ """REASONING."""
+ COMPACTION = "compaction"
+ """COMPACTION."""
+ IMAGE_GENERATION_CALL = "image_generation_call"
+ """IMAGE_GENERATION_CALL."""
+ CODE_INTERPRETER_CALL = "code_interpreter_call"
+ """CODE_INTERPRETER_CALL."""
+ LOCAL_SHELL_CALL = "local_shell_call"
+ """LOCAL_SHELL_CALL."""
+ LOCAL_SHELL_CALL_OUTPUT = "local_shell_call_output"
+ """LOCAL_SHELL_CALL_OUTPUT."""
+ SHELL_CALL = "shell_call"
+ """SHELL_CALL."""
+ SHELL_CALL_OUTPUT = "shell_call_output"
+ """SHELL_CALL_OUTPUT."""
+ APPLY_PATCH_CALL = "apply_patch_call"
+ """APPLY_PATCH_CALL."""
+ APPLY_PATCH_CALL_OUTPUT = "apply_patch_call_output"
+ """APPLY_PATCH_CALL_OUTPUT."""
+ MCP_LIST_TOOLS = "mcp_list_tools"
+ """MCP_LIST_TOOLS."""
+ MCP_APPROVAL_REQUEST = "mcp_approval_request"
+ """MCP_APPROVAL_REQUEST."""
+ MCP_APPROVAL_RESPONSE = "mcp_approval_response"
+ """MCP_APPROVAL_RESPONSE."""
+ MCP_CALL = "mcp_call"
+ """MCP_CALL."""
+ CUSTOM_TOOL_CALL_OUTPUT = "custom_tool_call_output"
+ """CUSTOM_TOOL_CALL_OUTPUT."""
+ CUSTOM_TOOL_CALL = "custom_tool_call"
+ """CUSTOM_TOOL_CALL."""
+ ITEM_REFERENCE = "item_reference"
+ """ITEM_REFERENCE."""
+ STRUCTURED_OUTPUTS = "structured_outputs"
+ """STRUCTURED_OUTPUTS."""
+ OAUTH_CONSENT_REQUEST = "oauth_consent_request"
+ """OAUTH_CONSENT_REQUEST."""
+ MEMORY_SEARCH_CALL = "memory_search_call"
+ """MEMORY_SEARCH_CALL."""
+ WORKFLOW_ACTION = "workflow_action"
+ """WORKFLOW_ACTION."""
+ A2_A_PREVIEW_CALL = "a2a_preview_call"
+ """A2_A_PREVIEW_CALL."""
+ A2_A_PREVIEW_CALL_OUTPUT = "a2a_preview_call_output"
+ """A2_A_PREVIEW_CALL_OUTPUT."""
+ BING_GROUNDING_CALL = "bing_grounding_call"
+ """BING_GROUNDING_CALL."""
+ BING_GROUNDING_CALL_OUTPUT = "bing_grounding_call_output"
+ """BING_GROUNDING_CALL_OUTPUT."""
+ SHAREPOINT_GROUNDING_PREVIEW_CALL = "sharepoint_grounding_preview_call"
+ """SHAREPOINT_GROUNDING_PREVIEW_CALL."""
+ SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT = "sharepoint_grounding_preview_call_output"
+ """SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT."""
+ AZURE_AI_SEARCH_CALL = "azure_ai_search_call"
+ """AZURE_AI_SEARCH_CALL."""
+ AZURE_AI_SEARCH_CALL_OUTPUT = "azure_ai_search_call_output"
+ """AZURE_AI_SEARCH_CALL_OUTPUT."""
+ BING_CUSTOM_SEARCH_PREVIEW_CALL = "bing_custom_search_preview_call"
+ """BING_CUSTOM_SEARCH_PREVIEW_CALL."""
+ BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT = "bing_custom_search_preview_call_output"
+ """BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT."""
+ OPENAPI_CALL = "openapi_call"
+ """OPENAPI_CALL."""
+ OPENAPI_CALL_OUTPUT = "openapi_call_output"
+ """OPENAPI_CALL_OUTPUT."""
+ BROWSER_AUTOMATION_PREVIEW_CALL = "browser_automation_preview_call"
+ """BROWSER_AUTOMATION_PREVIEW_CALL."""
+ BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT = "browser_automation_preview_call_output"
+ """BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT."""
+ FABRIC_DATAAGENT_PREVIEW_CALL = "fabric_dataagent_preview_call"
+ """FABRIC_DATAAGENT_PREVIEW_CALL."""
+ FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT = "fabric_dataagent_preview_call_output"
+ """FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT."""
+ AZURE_FUNCTION_CALL = "azure_function_call"
+ """AZURE_FUNCTION_CALL."""
+ AZURE_FUNCTION_CALL_OUTPUT = "azure_function_call_output"
+ """AZURE_FUNCTION_CALL_OUTPUT."""
+
+
+class LocalShellCallOutputStatusEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of LocalShellCallOutputStatusEnum."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+ INCOMPLETE = "incomplete"
+ """INCOMPLETE."""
+
+
+class LocalShellCallStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of LocalShellCallStatus."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+ INCOMPLETE = "incomplete"
+ """INCOMPLETE."""
+
+
+class MCPToolCallStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of MCPToolCallStatus."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+ INCOMPLETE = "incomplete"
+ """INCOMPLETE."""
+ CALLING = "calling"
+ """CALLING."""
+ FAILED = "failed"
+ """FAILED."""
+
+
+class MemoryItemKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Memory item kind."""
+
+ USER_PROFILE = "user_profile"
+ """User profile information extracted from conversations."""
+ CHAT_SUMMARY = "chat_summary"
+ """Summary of chat conversations."""
+
+
+class MessageContentType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of MessageContentType."""
+
+ INPUT_TEXT = "input_text"
+ """INPUT_TEXT."""
+ OUTPUT_TEXT = "output_text"
+ """OUTPUT_TEXT."""
+ TEXT = "text"
+ """TEXT."""
+ SUMMARY_TEXT = "summary_text"
+ """SUMMARY_TEXT."""
+ REASONING_TEXT = "reasoning_text"
+ """REASONING_TEXT."""
+ REFUSAL = "refusal"
+ """REFUSAL."""
+ INPUT_IMAGE = "input_image"
+ """INPUT_IMAGE."""
+ COMPUTER_SCREENSHOT = "computer_screenshot"
+ """COMPUTER_SCREENSHOT."""
+ INPUT_FILE = "input_file"
+ """INPUT_FILE."""
+
+
+class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of MessageRole."""
+
+ UNKNOWN = "unknown"
+ """UNKNOWN."""
+ USER = "user"
+ """USER."""
+ ASSISTANT = "assistant"
+ """ASSISTANT."""
+ SYSTEM = "system"
+ """SYSTEM."""
+ CRITIC = "critic"
+ """CRITIC."""
+ DISCRIMINATOR = "discriminator"
+ """DISCRIMINATOR."""
+ DEVELOPER = "developer"
+ """DEVELOPER."""
+ TOOL = "tool"
+ """TOOL."""
+
+
+class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of MessageStatus."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+ INCOMPLETE = "incomplete"
+ """INCOMPLETE."""
+
+
+class ModelIdsCompaction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Model ID used to generate the response, like ``gpt-5`` or ``o3``. OpenAI offers a wide range of
+ models with different capabilities, performance characteristics, and price points. Refer to the
+ `model guide `_ to browse and compare available models.
+ """
+
+ GPT5_2 = "gpt-5.2"
+ """GPT5_2."""
+ GPT5_2_2025_12_11 = "gpt-5.2-2025-12-11"
+ """GPT5_2_2025_12_11."""
+ GPT5_2_CHAT_LATEST = "gpt-5.2-chat-latest"
+ """GPT5_2_CHAT_LATEST."""
+ GPT5_2_PRO = "gpt-5.2-pro"
+ """GPT5_2_PRO."""
+ GPT5_2_PRO2025_12_11 = "gpt-5.2-pro-2025-12-11"
+ """GPT5_2_PRO2025_12_11."""
+ GPT5_1 = "gpt-5.1"
+ """GPT5_1."""
+ GPT5_1_2025_11_13 = "gpt-5.1-2025-11-13"
+ """GPT5_1_2025_11_13."""
+ GPT5_1_CODEX = "gpt-5.1-codex"
+ """GPT5_1_CODEX."""
+ GPT5_1_MINI = "gpt-5.1-mini"
+ """GPT5_1_MINI."""
+ GPT5_1_CHAT_LATEST = "gpt-5.1-chat-latest"
+ """GPT5_1_CHAT_LATEST."""
+ GPT5 = "gpt-5"
+ """GPT5."""
+ GPT5_MINI = "gpt-5-mini"
+ """GPT5_MINI."""
+ GPT5_NANO = "gpt-5-nano"
+ """GPT5_NANO."""
+ GPT5_2025_08_07 = "gpt-5-2025-08-07"
+ """GPT5_2025_08_07."""
+ GPT5_MINI2025_08_07 = "gpt-5-mini-2025-08-07"
+ """GPT5_MINI2025_08_07."""
+ GPT5_NANO2025_08_07 = "gpt-5-nano-2025-08-07"
+ """GPT5_NANO2025_08_07."""
+ GPT5_CHAT_LATEST = "gpt-5-chat-latest"
+ """GPT5_CHAT_LATEST."""
+ GPT4_1 = "gpt-4.1"
+ """GPT4_1."""
+ GPT4_1_MINI = "gpt-4.1-mini"
+ """GPT4_1_MINI."""
+ GPT4_1_NANO = "gpt-4.1-nano"
+ """GPT4_1_NANO."""
+ GPT4_1_2025_04_14 = "gpt-4.1-2025-04-14"
+ """GPT4_1_2025_04_14."""
+ GPT4_1_MINI2025_04_14 = "gpt-4.1-mini-2025-04-14"
+ """GPT4_1_MINI2025_04_14."""
+ GPT4_1_NANO2025_04_14 = "gpt-4.1-nano-2025-04-14"
+ """GPT4_1_NANO2025_04_14."""
+ O4_MINI = "o4-mini"
+ """O4_MINI."""
+ O4_MINI2025_04_16 = "o4-mini-2025-04-16"
+ """O4_MINI2025_04_16."""
+ O3 = "o3"
+ """O3."""
+ O3_2025_04_16 = "o3-2025-04-16"
+ """O3_2025_04_16."""
+ O3_MINI = "o3-mini"
+ """O3_MINI."""
+ O3_MINI2025_01_31 = "o3-mini-2025-01-31"
+ """O3_MINI2025_01_31."""
+ O1 = "o1"
+ """O1."""
+ O1_2024_12_17 = "o1-2024-12-17"
+ """O1_2024_12_17."""
+ O1_PREVIEW = "o1-preview"
+ """O1_PREVIEW."""
+ O1_PREVIEW2024_09_12 = "o1-preview-2024-09-12"
+ """O1_PREVIEW2024_09_12."""
+ O1_MINI = "o1-mini"
+ """O1_MINI."""
+ O1_MINI2024_09_12 = "o1-mini-2024-09-12"
+ """O1_MINI2024_09_12."""
+ GPT4_O = "gpt-4o"
+ """GPT4_O."""
+ GPT4_O2024_11_20 = "gpt-4o-2024-11-20"
+ """GPT4_O2024_11_20."""
+ GPT4_O2024_08_06 = "gpt-4o-2024-08-06"
+ """GPT4_O2024_08_06."""
+ GPT4_O2024_05_13 = "gpt-4o-2024-05-13"
+ """GPT4_O2024_05_13."""
+ GPT4_O_AUDIO_PREVIEW = "gpt-4o-audio-preview"
+ """GPT4_O_AUDIO_PREVIEW."""
+ GPT4_O_AUDIO_PREVIEW2024_10_01 = "gpt-4o-audio-preview-2024-10-01"
+ """GPT4_O_AUDIO_PREVIEW2024_10_01."""
+ GPT4_O_AUDIO_PREVIEW2024_12_17 = "gpt-4o-audio-preview-2024-12-17"
+ """GPT4_O_AUDIO_PREVIEW2024_12_17."""
+ GPT4_O_AUDIO_PREVIEW2025_06_03 = "gpt-4o-audio-preview-2025-06-03"
+ """GPT4_O_AUDIO_PREVIEW2025_06_03."""
+ GPT4_O_MINI_AUDIO_PREVIEW = "gpt-4o-mini-audio-preview"
+ """GPT4_O_MINI_AUDIO_PREVIEW."""
+ GPT4_O_MINI_AUDIO_PREVIEW2024_12_17 = "gpt-4o-mini-audio-preview-2024-12-17"
+ """GPT4_O_MINI_AUDIO_PREVIEW2024_12_17."""
+ GPT4_O_SEARCH_PREVIEW = "gpt-4o-search-preview"
+ """GPT4_O_SEARCH_PREVIEW."""
+ GPT4_O_MINI_SEARCH_PREVIEW = "gpt-4o-mini-search-preview"
+ """GPT4_O_MINI_SEARCH_PREVIEW."""
+ GPT4_O_SEARCH_PREVIEW2025_03_11 = "gpt-4o-search-preview-2025-03-11"
+ """GPT4_O_SEARCH_PREVIEW2025_03_11."""
+ GPT4_O_MINI_SEARCH_PREVIEW2025_03_11 = "gpt-4o-mini-search-preview-2025-03-11"
+ """GPT4_O_MINI_SEARCH_PREVIEW2025_03_11."""
+ CHATGPT4_O_LATEST = "chatgpt-4o-latest"
+ """CHATGPT4_O_LATEST."""
+ CODEX_MINI_LATEST = "codex-mini-latest"
+ """CODEX_MINI_LATEST."""
+ GPT4_O_MINI = "gpt-4o-mini"
+ """GPT4_O_MINI."""
+ GPT4_O_MINI2024_07_18 = "gpt-4o-mini-2024-07-18"
+ """GPT4_O_MINI2024_07_18."""
+ GPT4_TURBO = "gpt-4-turbo"
+ """GPT4_TURBO."""
+ GPT4_TURBO2024_04_09 = "gpt-4-turbo-2024-04-09"
+ """GPT4_TURBO2024_04_09."""
+ GPT4_0125_PREVIEW = "gpt-4-0125-preview"
+ """GPT4_0125_PREVIEW."""
+ GPT4_TURBO_PREVIEW = "gpt-4-turbo-preview"
+ """GPT4_TURBO_PREVIEW."""
+ GPT4_1106_PREVIEW = "gpt-4-1106-preview"
+ """GPT4_1106_PREVIEW."""
+ GPT4_VISION_PREVIEW = "gpt-4-vision-preview"
+ """GPT4_VISION_PREVIEW."""
+ GPT4 = "gpt-4"
+ """GPT4."""
+ GPT4_0314 = "gpt-4-0314"
+ """GPT4_0314."""
+ GPT4_0613 = "gpt-4-0613"
+ """GPT4_0613."""
+ GPT4_32_K = "gpt-4-32k"
+ """GPT4_32_K."""
+ GPT4_32_K0314 = "gpt-4-32k-0314"
+ """GPT4_32_K0314."""
+ GPT4_32_K0613 = "gpt-4-32k-0613"
+ """GPT4_32_K0613."""
+ GPT3_5_TURBO = "gpt-3.5-turbo"
+ """GPT3_5_TURBO."""
+ GPT3_5_TURBO16_K = "gpt-3.5-turbo-16k"
+ """GPT3_5_TURBO16_K."""
+ GPT3_5_TURBO0301 = "gpt-3.5-turbo-0301"
+ """GPT3_5_TURBO0301."""
+ GPT3_5_TURBO0613 = "gpt-3.5-turbo-0613"
+ """GPT3_5_TURBO0613."""
+ GPT3_5_TURBO1106 = "gpt-3.5-turbo-1106"
+ """GPT3_5_TURBO1106."""
+ GPT3_5_TURBO0125 = "gpt-3.5-turbo-0125"
+ """GPT3_5_TURBO0125."""
+ GPT3_5_TURBO16_K0613 = "gpt-3.5-turbo-16k-0613"
+ """GPT3_5_TURBO16_K0613."""
+ O1_PRO = "o1-pro"
+ """O1_PRO."""
+ O1_PRO2025_03_19 = "o1-pro-2025-03-19"
+ """O1_PRO2025_03_19."""
+ O3_PRO = "o3-pro"
+ """O3_PRO."""
+ O3_PRO2025_06_10 = "o3-pro-2025-06-10"
+ """O3_PRO2025_06_10."""
+ O3_DEEP_RESEARCH = "o3-deep-research"
+ """O3_DEEP_RESEARCH."""
+ O3_DEEP_RESEARCH2025_06_26 = "o3-deep-research-2025-06-26"
+ """O3_DEEP_RESEARCH2025_06_26."""
+ O4_MINI_DEEP_RESEARCH = "o4-mini-deep-research"
+ """O4_MINI_DEEP_RESEARCH."""
+ O4_MINI_DEEP_RESEARCH2025_06_26 = "o4-mini-deep-research-2025-06-26"
+ """O4_MINI_DEEP_RESEARCH2025_06_26."""
+ COMPUTER_USE_PREVIEW = "computer-use-preview"
+ """COMPUTER_USE_PREVIEW."""
+ COMPUTER_USE_PREVIEW2025_03_11 = "computer-use-preview-2025-03-11"
+ """COMPUTER_USE_PREVIEW2025_03_11."""
+ GPT5_CODEX = "gpt-5-codex"
+ """GPT5_CODEX."""
+ GPT5_PRO = "gpt-5-pro"
+ """GPT5_PRO."""
+ GPT5_PRO2025_10_06 = "gpt-5-pro-2025-10-06"
+ """GPT5_PRO2025_10_06."""
+ GPT5_1_CODEX_MAX = "gpt-5.1-codex-max"
+ """GPT5_1_CODEX_MAX."""
+
+
+class OpenApiAuthType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Authentication type for OpenApi endpoint. Allowed types are:
+
+ * Anonymous (no authentication required)
+ * Project Connection (requires project_connection_id to endpoint, as setup in AI Foundry)
+ * Managed_Identity (requires audience for identity based auth).
+ """
+
+ ANONYMOUS = "anonymous"
+ """ANONYMOUS."""
+ PROJECT_CONNECTION = "project_connection"
+ """PROJECT_CONNECTION."""
+ MANAGED_IDENTITY = "managed_identity"
+ """MANAGED_IDENTITY."""
+
+
+class OutputContentType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of OutputContentType."""
+
+ OUTPUT_TEXT = "output_text"
+ """OUTPUT_TEXT."""
+ REFUSAL = "refusal"
+ """REFUSAL."""
+ REASONING_TEXT = "reasoning_text"
+ """REASONING_TEXT."""
+
+
+class OutputItemType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of OutputItemType."""
+
+ OUTPUT_MESSAGE = "output_message"
+ """OUTPUT_MESSAGE."""
+ FILE_SEARCH_CALL = "file_search_call"
+ """FILE_SEARCH_CALL."""
+ FUNCTION_CALL = "function_call"
+ """FUNCTION_CALL."""
+ WEB_SEARCH_CALL = "web_search_call"
+ """WEB_SEARCH_CALL."""
+ COMPUTER_CALL = "computer_call"
+ """COMPUTER_CALL."""
+ REASONING = "reasoning"
+ """REASONING."""
+ COMPACTION = "compaction"
+ """COMPACTION."""
+ IMAGE_GENERATION_CALL = "image_generation_call"
+ """IMAGE_GENERATION_CALL."""
+ CODE_INTERPRETER_CALL = "code_interpreter_call"
+ """CODE_INTERPRETER_CALL."""
+ LOCAL_SHELL_CALL = "local_shell_call"
+ """LOCAL_SHELL_CALL."""
+ SHELL_CALL = "shell_call"
+ """SHELL_CALL."""
+ SHELL_CALL_OUTPUT = "shell_call_output"
+ """SHELL_CALL_OUTPUT."""
+ APPLY_PATCH_CALL = "apply_patch_call"
+ """APPLY_PATCH_CALL."""
+ APPLY_PATCH_CALL_OUTPUT = "apply_patch_call_output"
+ """APPLY_PATCH_CALL_OUTPUT."""
+ MCP_CALL = "mcp_call"
+ """MCP_CALL."""
+ MCP_LIST_TOOLS = "mcp_list_tools"
+ """MCP_LIST_TOOLS."""
+ MCP_APPROVAL_REQUEST = "mcp_approval_request"
+ """MCP_APPROVAL_REQUEST."""
+ CUSTOM_TOOL_CALL = "custom_tool_call"
+ """CUSTOM_TOOL_CALL."""
+ MESSAGE = "message"
+ """MESSAGE."""
+ COMPUTER_CALL_OUTPUT = "computer_call_output"
+ """COMPUTER_CALL_OUTPUT."""
+ FUNCTION_CALL_OUTPUT = "function_call_output"
+ """FUNCTION_CALL_OUTPUT."""
+ LOCAL_SHELL_CALL_OUTPUT = "local_shell_call_output"
+ """LOCAL_SHELL_CALL_OUTPUT."""
+ MCP_APPROVAL_RESPONSE = "mcp_approval_response"
+ """MCP_APPROVAL_RESPONSE."""
+ CUSTOM_TOOL_CALL_OUTPUT = "custom_tool_call_output"
+ """CUSTOM_TOOL_CALL_OUTPUT."""
+ STRUCTURED_OUTPUTS = "structured_outputs"
+ """STRUCTURED_OUTPUTS."""
+ OAUTH_CONSENT_REQUEST = "oauth_consent_request"
+ """OAUTH_CONSENT_REQUEST."""
+ MEMORY_SEARCH_CALL = "memory_search_call"
+ """MEMORY_SEARCH_CALL."""
+ WORKFLOW_ACTION = "workflow_action"
+ """WORKFLOW_ACTION."""
+ A2_A_PREVIEW_CALL = "a2a_preview_call"
+ """A2_A_PREVIEW_CALL."""
+ A2_A_PREVIEW_CALL_OUTPUT = "a2a_preview_call_output"
+ """A2_A_PREVIEW_CALL_OUTPUT."""
+ BING_GROUNDING_CALL = "bing_grounding_call"
+ """BING_GROUNDING_CALL."""
+ BING_GROUNDING_CALL_OUTPUT = "bing_grounding_call_output"
+ """BING_GROUNDING_CALL_OUTPUT."""
+ SHAREPOINT_GROUNDING_PREVIEW_CALL = "sharepoint_grounding_preview_call"
+ """SHAREPOINT_GROUNDING_PREVIEW_CALL."""
+ SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT = "sharepoint_grounding_preview_call_output"
+ """SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT."""
+ AZURE_AI_SEARCH_CALL = "azure_ai_search_call"
+ """AZURE_AI_SEARCH_CALL."""
+ AZURE_AI_SEARCH_CALL_OUTPUT = "azure_ai_search_call_output"
+ """AZURE_AI_SEARCH_CALL_OUTPUT."""
+ BING_CUSTOM_SEARCH_PREVIEW_CALL = "bing_custom_search_preview_call"
+ """BING_CUSTOM_SEARCH_PREVIEW_CALL."""
+ BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT = "bing_custom_search_preview_call_output"
+ """BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT."""
+ OPENAPI_CALL = "openapi_call"
+ """OPENAPI_CALL."""
+ OPENAPI_CALL_OUTPUT = "openapi_call_output"
+ """OPENAPI_CALL_OUTPUT."""
+ BROWSER_AUTOMATION_PREVIEW_CALL = "browser_automation_preview_call"
+ """BROWSER_AUTOMATION_PREVIEW_CALL."""
+ BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT = "browser_automation_preview_call_output"
+ """BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT."""
+ FABRIC_DATAAGENT_PREVIEW_CALL = "fabric_dataagent_preview_call"
+ """FABRIC_DATAAGENT_PREVIEW_CALL."""
+ FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT = "fabric_dataagent_preview_call_output"
+ """FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT."""
+ AZURE_FUNCTION_CALL = "azure_function_call"
+ """AZURE_FUNCTION_CALL."""
+ AZURE_FUNCTION_CALL_OUTPUT = "azure_function_call_output"
+ """AZURE_FUNCTION_CALL_OUTPUT."""
+
+
+class OutputMessageContentType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of OutputMessageContentType."""
+
+ OUTPUT_TEXT = "output_text"
+ """OUTPUT_TEXT."""
+ REFUSAL = "refusal"
+ """REFUSAL."""
+
+
+class PageOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of PageOrder."""
+
+ ASC = "asc"
+ """ASC."""
+ DESC = "desc"
+ """DESC."""
+
+
+class RankerVersionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of RankerVersionType."""
+
+ AUTO = "auto"
+ """AUTO."""
+ DEFAULT2024_11_15 = "default-2024-11-15"
+ """DEFAULT2024_11_15."""
+
+
+class ResponseErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The error code for the response."""
+
+ SERVER_ERROR = "server_error"
+ """SERVER_ERROR."""
+ RATE_LIMIT_EXCEEDED = "rate_limit_exceeded"
+ """RATE_LIMIT_EXCEEDED."""
+ INVALID_PROMPT = "invalid_prompt"
+ """INVALID_PROMPT."""
+ VECTOR_STORE_TIMEOUT = "vector_store_timeout"
+ """VECTOR_STORE_TIMEOUT."""
+ INVALID_IMAGE = "invalid_image"
+ """INVALID_IMAGE."""
+ INVALID_IMAGE_FORMAT = "invalid_image_format"
+ """INVALID_IMAGE_FORMAT."""
+ INVALID_BASE64_IMAGE = "invalid_base64_image"
+ """INVALID_BASE64_IMAGE."""
+ INVALID_IMAGE_URL = "invalid_image_url"
+ """INVALID_IMAGE_URL."""
+ IMAGE_TOO_LARGE = "image_too_large"
+ """IMAGE_TOO_LARGE."""
+ IMAGE_TOO_SMALL = "image_too_small"
+ """IMAGE_TOO_SMALL."""
+ IMAGE_PARSE_ERROR = "image_parse_error"
+ """IMAGE_PARSE_ERROR."""
+ IMAGE_CONTENT_POLICY_VIOLATION = "image_content_policy_violation"
+ """IMAGE_CONTENT_POLICY_VIOLATION."""
+ INVALID_IMAGE_MODE = "invalid_image_mode"
+ """INVALID_IMAGE_MODE."""
+ IMAGE_FILE_TOO_LARGE = "image_file_too_large"
+ """IMAGE_FILE_TOO_LARGE."""
+ UNSUPPORTED_IMAGE_MEDIA_TYPE = "unsupported_image_media_type"
+ """UNSUPPORTED_IMAGE_MEDIA_TYPE."""
+ EMPTY_IMAGE_FILE = "empty_image_file"
+ """EMPTY_IMAGE_FILE."""
+ FAILED_TO_DOWNLOAD_IMAGE = "failed_to_download_image"
+ """FAILED_TO_DOWNLOAD_IMAGE."""
+ IMAGE_FILE_NOT_FOUND = "image_file_not_found"
+ """IMAGE_FILE_NOT_FOUND."""
+
+
+class ResponseStreamEventType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ResponseStreamEventType."""
+
+ RESPONSE_AUDIO_DELTA = "response.audio.delta"
+ """RESPONSE_AUDIO_DELTA."""
+ RESPONSE_AUDIO_DONE = "response.audio.done"
+ """RESPONSE_AUDIO_DONE."""
+ RESPONSE_AUDIO_TRANSCRIPT_DELTA = "response.audio.transcript.delta"
+ """RESPONSE_AUDIO_TRANSCRIPT_DELTA."""
+ RESPONSE_AUDIO_TRANSCRIPT_DONE = "response.audio.transcript.done"
+ """RESPONSE_AUDIO_TRANSCRIPT_DONE."""
+ RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA = "response.code_interpreter_call_code.delta"
+ """RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA."""
+ RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE = "response.code_interpreter_call_code.done"
+ """RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE."""
+ RESPONSE_CODE_INTERPRETER_CALL_COMPLETED = "response.code_interpreter_call.completed"
+ """RESPONSE_CODE_INTERPRETER_CALL_COMPLETED."""
+ RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS = "response.code_interpreter_call.in_progress"
+ """RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS."""
+ RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING = "response.code_interpreter_call.interpreting"
+ """RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING."""
+ RESPONSE_COMPLETED = "response.completed"
+ """RESPONSE_COMPLETED."""
+ RESPONSE_CONTENT_PART_ADDED = "response.content_part.added"
+ """RESPONSE_CONTENT_PART_ADDED."""
+ RESPONSE_CONTENT_PART_DONE = "response.content_part.done"
+ """RESPONSE_CONTENT_PART_DONE."""
+ RESPONSE_CREATED = "response.created"
+ """RESPONSE_CREATED."""
+ ERROR = "error"
+ """ERROR."""
+ RESPONSE_FILE_SEARCH_CALL_COMPLETED = "response.file_search_call.completed"
+ """RESPONSE_FILE_SEARCH_CALL_COMPLETED."""
+ RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS = "response.file_search_call.in_progress"
+ """RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS."""
+ RESPONSE_FILE_SEARCH_CALL_SEARCHING = "response.file_search_call.searching"
+ """RESPONSE_FILE_SEARCH_CALL_SEARCHING."""
+ RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA = "response.function_call_arguments.delta"
+ """RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA."""
+ RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE = "response.function_call_arguments.done"
+ """RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE."""
+ RESPONSE_IN_PROGRESS = "response.in_progress"
+ """RESPONSE_IN_PROGRESS."""
+ RESPONSE_FAILED = "response.failed"
+ """RESPONSE_FAILED."""
+ RESPONSE_INCOMPLETE = "response.incomplete"
+ """RESPONSE_INCOMPLETE."""
+ RESPONSE_OUTPUT_ITEM_ADDED = "response.output_item.added"
+ """RESPONSE_OUTPUT_ITEM_ADDED."""
+ RESPONSE_OUTPUT_ITEM_DONE = "response.output_item.done"
+ """RESPONSE_OUTPUT_ITEM_DONE."""
+ RESPONSE_REASONING_SUMMARY_PART_ADDED = "response.reasoning_summary_part.added"
+ """RESPONSE_REASONING_SUMMARY_PART_ADDED."""
+ RESPONSE_REASONING_SUMMARY_PART_DONE = "response.reasoning_summary_part.done"
+ """RESPONSE_REASONING_SUMMARY_PART_DONE."""
+ RESPONSE_REASONING_SUMMARY_TEXT_DELTA = "response.reasoning_summary_text.delta"
+ """RESPONSE_REASONING_SUMMARY_TEXT_DELTA."""
+ RESPONSE_REASONING_SUMMARY_TEXT_DONE = "response.reasoning_summary_text.done"
+ """RESPONSE_REASONING_SUMMARY_TEXT_DONE."""
+ RESPONSE_REASONING_TEXT_DELTA = "response.reasoning_text.delta"
+ """RESPONSE_REASONING_TEXT_DELTA."""
+ RESPONSE_REASONING_TEXT_DONE = "response.reasoning_text.done"
+ """RESPONSE_REASONING_TEXT_DONE."""
+ RESPONSE_REFUSAL_DELTA = "response.refusal.delta"
+ """RESPONSE_REFUSAL_DELTA."""
+ RESPONSE_REFUSAL_DONE = "response.refusal.done"
+ """RESPONSE_REFUSAL_DONE."""
+ RESPONSE_OUTPUT_TEXT_DELTA = "response.output_text.delta"
+ """RESPONSE_OUTPUT_TEXT_DELTA."""
+ RESPONSE_OUTPUT_TEXT_DONE = "response.output_text.done"
+ """RESPONSE_OUTPUT_TEXT_DONE."""
+ RESPONSE_WEB_SEARCH_CALL_COMPLETED = "response.web_search_call.completed"
+ """RESPONSE_WEB_SEARCH_CALL_COMPLETED."""
+ RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS = "response.web_search_call.in_progress"
+ """RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS."""
+ RESPONSE_WEB_SEARCH_CALL_SEARCHING = "response.web_search_call.searching"
+ """RESPONSE_WEB_SEARCH_CALL_SEARCHING."""
+ RESPONSE_IMAGE_GENERATION_CALL_COMPLETED = "response.image_generation_call.completed"
+ """RESPONSE_IMAGE_GENERATION_CALL_COMPLETED."""
+ RESPONSE_IMAGE_GENERATION_CALL_GENERATING = "response.image_generation_call.generating"
+ """RESPONSE_IMAGE_GENERATION_CALL_GENERATING."""
+ RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS = "response.image_generation_call.in_progress"
+ """RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS."""
+ RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE = "response.image_generation_call.partial_image"
+ """RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE."""
+ RESPONSE_MCP_CALL_ARGUMENTS_DELTA = "response.mcp_call_arguments.delta"
+ """RESPONSE_MCP_CALL_ARGUMENTS_DELTA."""
+ RESPONSE_MCP_CALL_ARGUMENTS_DONE = "response.mcp_call_arguments.done"
+ """RESPONSE_MCP_CALL_ARGUMENTS_DONE."""
+ RESPONSE_MCP_CALL_COMPLETED = "response.mcp_call.completed"
+ """RESPONSE_MCP_CALL_COMPLETED."""
+ RESPONSE_MCP_CALL_FAILED = "response.mcp_call.failed"
+ """RESPONSE_MCP_CALL_FAILED."""
+ RESPONSE_MCP_CALL_IN_PROGRESS = "response.mcp_call.in_progress"
+ """RESPONSE_MCP_CALL_IN_PROGRESS."""
+ RESPONSE_MCP_LIST_TOOLS_COMPLETED = "response.mcp_list_tools.completed"
+ """RESPONSE_MCP_LIST_TOOLS_COMPLETED."""
+ RESPONSE_MCP_LIST_TOOLS_FAILED = "response.mcp_list_tools.failed"
+ """RESPONSE_MCP_LIST_TOOLS_FAILED."""
+ RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS = "response.mcp_list_tools.in_progress"
+ """RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS."""
+ RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED = "response.output_text.annotation.added"
+ """RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED."""
+ RESPONSE_QUEUED = "response.queued"
+ """RESPONSE_QUEUED."""
+ RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA = "response.custom_tool_call_input.delta"
+ """RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA."""
+ RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE = "response.custom_tool_call_input.done"
+ """RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE."""
+
+
+class SearchContextSize(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of SearchContextSize."""
+
+ LOW = "low"
+ """LOW."""
+ MEDIUM = "medium"
+ """MEDIUM."""
+ HIGH = "high"
+ """HIGH."""
+
+
+class TextResponseFormatConfigurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of TextResponseFormatConfigurationType."""
+
+ TEXT = "text"
+ """TEXT."""
+ JSON_SCHEMA = "json_schema"
+ """JSON_SCHEMA."""
+ JSON_OBJECT = "json_object"
+ """JSON_OBJECT."""
+
+
+class ToolCallStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """The status of a tool call."""
+
+ IN_PROGRESS = "in_progress"
+ """IN_PROGRESS."""
+ COMPLETED = "completed"
+ """COMPLETED."""
+ INCOMPLETE = "incomplete"
+ """INCOMPLETE."""
+ FAILED = "failed"
+ """FAILED."""
+
+
+class ToolChoiceOptions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Tool choice mode."""
+
+ NONE = "none"
+ """NONE."""
+ AUTO = "auto"
+ """AUTO."""
+ REQUIRED = "required"
+ """REQUIRED."""
+
+
+class ToolChoiceParamType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ToolChoiceParamType."""
+
+ ALLOWED_TOOLS = "allowed_tools"
+ """ALLOWED_TOOLS."""
+ FUNCTION = "function"
+ """FUNCTION."""
+ MCP = "mcp"
+ """MCP."""
+ CUSTOM = "custom"
+ """CUSTOM."""
+ APPLY_PATCH = "apply_patch"
+ """APPLY_PATCH."""
+ SHELL = "shell"
+ """SHELL."""
+ FILE_SEARCH = "file_search"
+ """FILE_SEARCH."""
+ WEB_SEARCH_PREVIEW = "web_search_preview"
+ """WEB_SEARCH_PREVIEW."""
+ COMPUTER_USE_PREVIEW = "computer_use_preview"
+ """COMPUTER_USE_PREVIEW."""
+ WEB_SEARCH_PREVIEW2025_03_11 = "web_search_preview_2025_03_11"
+ """WEB_SEARCH_PREVIEW2025_03_11."""
+ IMAGE_GENERATION = "image_generation"
+ """IMAGE_GENERATION."""
+ CODE_INTERPRETER = "code_interpreter"
+ """CODE_INTERPRETER."""
+
+
+class ToolType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Type of ToolType."""
+
+ FUNCTION = "function"
+ """FUNCTION."""
+ FILE_SEARCH = "file_search"
+ """FILE_SEARCH."""
+ COMPUTER_USE_PREVIEW = "computer_use_preview"
+ """COMPUTER_USE_PREVIEW."""
+ WEB_SEARCH = "web_search"
+ """WEB_SEARCH."""
+ MCP = "mcp"
+ """MCP."""
+ CODE_INTERPRETER = "code_interpreter"
+ """CODE_INTERPRETER."""
+ IMAGE_GENERATION = "image_generation"
+ """IMAGE_GENERATION."""
+ LOCAL_SHELL = "local_shell"
+ """LOCAL_SHELL."""
+ SHELL = "shell"
+ """SHELL."""
+ CUSTOM = "custom"
+ """CUSTOM."""
+ WEB_SEARCH_PREVIEW = "web_search_preview"
+ """WEB_SEARCH_PREVIEW."""
+ APPLY_PATCH = "apply_patch"
+ """APPLY_PATCH."""
+ A2_A_PREVIEW = "a2a_preview"
+ """A2_A_PREVIEW."""
+ BING_CUSTOM_SEARCH_PREVIEW = "bing_custom_search_preview"
+ """BING_CUSTOM_SEARCH_PREVIEW."""
+ BROWSER_AUTOMATION_PREVIEW = "browser_automation_preview"
+ """BROWSER_AUTOMATION_PREVIEW."""
+ FABRIC_DATAAGENT_PREVIEW = "fabric_dataagent_preview"
+ """FABRIC_DATAAGENT_PREVIEW."""
+ SHAREPOINT_GROUNDING_PREVIEW = "sharepoint_grounding_preview"
+ """SHAREPOINT_GROUNDING_PREVIEW."""
+ MEMORY_SEARCH_PREVIEW = "memory_search_preview"
+ """MEMORY_SEARCH_PREVIEW."""
+ WORK_IQ_PREVIEW = "work_iq_preview"
+ """WORK_IQ_PREVIEW."""
+ AZURE_AI_SEARCH = "azure_ai_search"
+ """AZURE_AI_SEARCH."""
+ AZURE_FUNCTION = "azure_function"
+ """AZURE_FUNCTION."""
+ BING_GROUNDING = "bing_grounding"
+ """BING_GROUNDING."""
+ CAPTURE_STRUCTURED_OUTPUTS = "capture_structured_outputs"
+ """CAPTURE_STRUCTURED_OUTPUTS."""
+ OPENAPI = "openapi"
+ """OPENAPI."""
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_models.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_models.py
new file mode 100644
index 000000000000..7e15ca44d5eb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_models.py
@@ -0,0 +1,17025 @@
+# pylint: disable=line-too-long,useless-suppression,too-many-lines
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) Python Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=useless-super-delegation
+
+import datetime
+from typing import Any, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload
+
+from .._utils.model_base import Model as _Model, rest_discriminator, rest_field
+from ._enums import (
+ AnnotationType,
+ ApplyPatchFileOperationType,
+ ApplyPatchOperationParamType,
+ ComputerActionType,
+ ContainerNetworkPolicyParamType,
+ ContainerSkillType,
+ CustomToolParamFormatType,
+ FunctionAndCustomToolCallOutputType,
+ FunctionShellCallEnvironmentType,
+ FunctionShellCallItemParamEnvironmentType,
+ FunctionShellCallOutputOutcomeParamType,
+ FunctionShellCallOutputOutcomeType,
+ FunctionShellToolParamEnvironmentType,
+ ItemFieldType,
+ ItemType,
+ MemoryItemKind,
+ MessageContentType,
+ OpenApiAuthType,
+ OutputContentType,
+ OutputItemType,
+ OutputMessageContentType,
+ ResponseStreamEventType,
+ TextResponseFormatConfigurationType,
+ ToolChoiceParamType,
+ ToolType,
+)
+
+if TYPE_CHECKING:
+ from .. import _types, models as _models
+
+
+class Tool(_Model):
+ """A tool that can be used to generate a response.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ A2APreviewTool, ApplyPatchToolParam, AzureAISearchTool, AzureFunctionTool,
+ BingCustomSearchPreviewTool, BingGroundingTool, BrowserAutomationPreviewTool,
+ CaptureStructuredOutputsTool, CodeInterpreterTool, ComputerUsePreviewTool, CustomToolParam,
+ MicrosoftFabricPreviewTool, FileSearchTool, FunctionTool, ImageGenTool, LocalShellToolParam,
+ MCPTool, MemorySearchPreviewTool, OpenApiTool, SharepointPreviewTool, FunctionShellToolParam,
+ WebSearchTool, WebSearchPreviewTool, WorkIQPreviewTool
+
+ :ivar type: Required. Known values are: "function", "file_search", "computer_use_preview",
+ "web_search", "mcp", "code_interpreter", "image_generation", "local_shell", "shell", "custom",
+ "web_search_preview", "apply_patch", "a2a_preview", "bing_custom_search_preview",
+ "browser_automation_preview", "fabric_dataagent_preview", "sharepoint_grounding_preview",
+ "memory_search_preview", "work_iq_preview", "azure_ai_search", "azure_function",
+ "bing_grounding", "capture_structured_outputs", and "openapi".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ToolType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"function\", \"file_search\", \"computer_use_preview\",
+ \"web_search\", \"mcp\", \"code_interpreter\", \"image_generation\", \"local_shell\",
+ \"shell\", \"custom\", \"web_search_preview\", \"apply_patch\", \"a2a_preview\",
+ \"bing_custom_search_preview\", \"browser_automation_preview\", \"fabric_dataagent_preview\",
+ \"sharepoint_grounding_preview\", \"memory_search_preview\", \"work_iq_preview\",
+ \"azure_ai_search\", \"azure_function\", \"bing_grounding\", \"capture_structured_outputs\",
+ and \"openapi\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class A2APreviewTool(Tool, discriminator="a2a_preview"):
+ """An agent implementing the A2A protocol.
+
+ :ivar type: The type of the tool. Always ``"a2a_preview``. Required. A2_A_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.A2_A_PREVIEW
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar base_url: Base URL of the agent.
+ :vartype base_url: str
+ :ivar agent_card_path: The path to the agent card relative to the ``base_url``. If not
+ provided, defaults to ``/.well-known/agent-card.json``.
+ :vartype agent_card_path: str
+ :ivar project_connection_id: The connection ID in the project for the A2A server. The
+ connection stores authentication and other connection details needed to connect to the A2A
+ server.
+ :vartype project_connection_id: str
+ """
+
+ type: Literal[ToolType.A2_A_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the tool. Always ``\"a2a_preview``. Required. A2_A_PREVIEW."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ base_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Base URL of the agent."""
+ agent_card_path: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The path to the agent card relative to the ``base_url``. If not provided, defaults to
+ ``/.well-known/agent-card.json``."""
+ project_connection_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The connection ID in the project for the A2A server. The connection stores authentication and
+ other connection details needed to connect to the A2A server."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ base_url: Optional[str] = None,
+ agent_card_path: Optional[str] = None,
+ project_connection_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.A2_A_PREVIEW # type: ignore
+
+
+class OutputItem(_Model):
+ """OutputItem.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ A2AToolCall, A2AToolCallOutput, OutputItemApplyPatchToolCall,
+ OutputItemApplyPatchToolCallOutput, AzureAISearchToolCall, AzureAISearchToolCallOutput,
+ AzureFunctionToolCall, AzureFunctionToolCallOutput, BingCustomSearchToolCall,
+ BingCustomSearchToolCallOutput, BingGroundingToolCall, BingGroundingToolCallOutput,
+ BrowserAutomationToolCall, BrowserAutomationToolCallOutput, OutputItemCodeInterpreterToolCall,
+ OutputItemCompactionBody, OutputItemComputerToolCall, OutputItemComputerToolCallOutputResource,
+ OutputItemCustomToolCall, OutputItemCustomToolCallOutput, FabricDataAgentToolCall,
+ FabricDataAgentToolCallOutput, OutputItemFileSearchToolCall, OutputItemFunctionToolCall,
+ FunctionToolCallOutputResource, OutputItemImageGenToolCall, OutputItemLocalShellToolCall,
+ OutputItemLocalShellToolCallOutput, OutputItemMcpApprovalRequest,
+ OutputItemMcpApprovalResponseResource, OutputItemMcpToolCall, OutputItemMcpListTools,
+ MemorySearchToolCallItemResource, OutputItemMessage, OAuthConsentRequestOutputItem,
+ OpenApiToolCall, OpenApiToolCallOutput, OutputItemOutputMessage, OutputItemReasoningItem,
+ SharepointGroundingToolCall, SharepointGroundingToolCallOutput, OutputItemFunctionShellCall,
+ OutputItemFunctionShellCallOutput, StructuredOutputsOutputItem, OutputItemWebSearchToolCall,
+ WorkflowActionOutputItem
+
+ :ivar type: Required. Known values are: "output_message", "file_search_call", "function_call",
+ "web_search_call", "computer_call", "reasoning", "compaction", "image_generation_call",
+ "code_interpreter_call", "local_shell_call", "shell_call", "shell_call_output",
+ "apply_patch_call", "apply_patch_call_output", "mcp_call", "mcp_list_tools",
+ "mcp_approval_request", "custom_tool_call", "message", "computer_call_output",
+ "function_call_output", "local_shell_call_output", "mcp_approval_response",
+ "custom_tool_call_output", "structured_outputs", "oauth_consent_request", "memory_search_call",
+ "workflow_action", "a2a_preview_call", "a2a_preview_call_output", "bing_grounding_call",
+ "bing_grounding_call_output", "sharepoint_grounding_preview_call",
+ "sharepoint_grounding_preview_call_output", "azure_ai_search_call",
+ "azure_ai_search_call_output", "bing_custom_search_preview_call",
+ "bing_custom_search_preview_call_output", "openapi_call", "openapi_call_output",
+ "browser_automation_preview_call", "browser_automation_preview_call_output",
+ "fabric_dataagent_preview_call", "fabric_dataagent_preview_call_output", "azure_function_call",
+ and "azure_function_call_output".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OutputItemType
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"output_message\", \"file_search_call\", \"function_call\",
+ \"web_search_call\", \"computer_call\", \"reasoning\", \"compaction\",
+ \"image_generation_call\", \"code_interpreter_call\", \"local_shell_call\", \"shell_call\",
+ \"shell_call_output\", \"apply_patch_call\", \"apply_patch_call_output\", \"mcp_call\",
+ \"mcp_list_tools\", \"mcp_approval_request\", \"custom_tool_call\", \"message\",
+ \"computer_call_output\", \"function_call_output\", \"local_shell_call_output\",
+ \"mcp_approval_response\", \"custom_tool_call_output\", \"structured_outputs\",
+ \"oauth_consent_request\", \"memory_search_call\", \"workflow_action\", \"a2a_preview_call\",
+ \"a2a_preview_call_output\", \"bing_grounding_call\", \"bing_grounding_call_output\",
+ \"sharepoint_grounding_preview_call\", \"sharepoint_grounding_preview_call_output\",
+ \"azure_ai_search_call\", \"azure_ai_search_call_output\", \"bing_custom_search_preview_call\",
+ \"bing_custom_search_preview_call_output\", \"openapi_call\", \"openapi_call_output\",
+ \"browser_automation_preview_call\", \"browser_automation_preview_call_output\",
+ \"fabric_dataagent_preview_call\", \"fabric_dataagent_preview_call_output\",
+ \"azure_function_call\", and \"azure_function_call_output\"."""
+ agent_reference: Optional["_models.AgentReference"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The agent that created the item."""
+ response_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The response on which the item is created."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class A2AToolCall(OutputItem, discriminator="a2a_preview_call"):
+ """An A2A (Agent-to-Agent) tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. A2_A_PREVIEW_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.A2_A_PREVIEW_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the A2A agent card being called. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.A2_A_PREVIEW_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. A2_A_PREVIEW_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the A2A agent card being called. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.A2_A_PREVIEW_CALL # type: ignore
+
+
+class A2AToolCallOutput(OutputItem, discriminator="a2a_preview_call_output"):
+ """The output of an A2A (Agent-to-Agent) tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. A2_A_PREVIEW_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.A2_A_PREVIEW_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the A2A agent card that was called. Required.
+ :vartype name: str
+ :ivar output: The output from the A2A tool call. Is one of the following types: {str: Any},
+ str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.A2_A_PREVIEW_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. A2_A_PREVIEW_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the A2A agent card that was called. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the A2A tool call. Is one of the following types: {str: Any}, str, [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.A2_A_PREVIEW_CALL_OUTPUT # type: ignore
+
+
+class AgentReference(_Model):
+ """AgentReference.
+
+ :ivar type: Required. Default value is "agent_reference".
+ :vartype type: str
+ :ivar name: The name of the agent. Required.
+ :vartype name: str
+ :ivar version: The version identifier of the agent.
+ :vartype version: str
+ """
+
+ type: Literal["agent_reference"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required. Default value is \"agent_reference\"."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the agent. Required."""
+ version: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The version identifier of the agent."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ version: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["agent_reference"] = "agent_reference"
+
+
+class AISearchIndexResource(_Model):
+ """A AI Search Index resource.
+
+ :ivar project_connection_id: An index connection ID in an IndexResource attached to this agent.
+ :vartype project_connection_id: str
+ :ivar index_name: The name of an index in an IndexResource attached to this agent.
+ :vartype index_name: str
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar query_type: Type of query in an AIIndexResource attached to this agent. Known values are:
+ "simple", "semantic", "vector", "vector_simple_hybrid", and "vector_semantic_hybrid".
+ :vartype query_type: str or
+ ~azure.ai.agentserver.responses.models.models.AzureAISearchQueryType
+ :ivar top_k: Number of documents to retrieve from search and present to the model.
+ :vartype top_k: int
+ :ivar filter: filter string for search resource. `Learn more here
+ `_.
+ :vartype filter: str
+ :ivar index_asset_id: Index asset id for search resource.
+ :vartype index_asset_id: str
+ """
+
+ project_connection_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An index connection ID in an IndexResource attached to this agent."""
+ index_name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of an index in an IndexResource attached to this agent."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Type of query in an AIIndexResource attached to this agent. Known values are: \"simple\",
+ \"semantic\", \"vector\", \"vector_simple_hybrid\", and \"vector_semantic_hybrid\"."""
+ top_k: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Number of documents to retrieve from search and present to the model."""
+ filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """filter string for search resource. `Learn more here
+ `_."""
+ index_asset_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Index asset id for search resource."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: Optional[str] = None,
+ index_name: Optional[str] = None,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ query_type: Optional[Union[str, "_models.AzureAISearchQueryType"]] = None,
+ top_k: Optional[int] = None,
+ filter: Optional[str] = None, # pylint: disable=redefined-builtin
+ index_asset_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class Annotation(_Model):
+ """An annotation that applies to a span of output text.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ContainerFileCitationBody, FileCitationBody, FilePath, UrlCitationBody
+
+ :ivar type: Required. Known values are: "file_citation", "url_citation",
+ "container_file_citation", and "file_path".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.AnnotationType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"file_citation\", \"url_citation\", \"container_file_citation\",
+ and \"file_path\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ApiErrorResponse(_Model):
+ """Error response for API failures.
+
+ :ivar error: Required.
+ :vartype error: ~azure.ai.agentserver.responses.models.models.Error
+ """
+
+ error: "_models.Error" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ error: "_models.Error",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ApplyPatchFileOperation(_Model):
+ """Apply patch operation.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ApplyPatchCreateFileOperation, ApplyPatchDeleteFileOperation, ApplyPatchUpdateFileOperation
+
+ :ivar type: Required. Known values are: "create_file", "delete_file", and "update_file".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ApplyPatchFileOperationType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"create_file\", \"delete_file\", and \"update_file\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ApplyPatchCreateFileOperation(ApplyPatchFileOperation, discriminator="create_file"):
+ """Apply patch create file operation.
+
+ :ivar type: Create a new file with the provided diff. Required. CREATE_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CREATE_FILE
+ :ivar path: Path of the file to create. Required.
+ :vartype path: str
+ :ivar diff: Diff to apply. Required.
+ :vartype diff: str
+ """
+
+ type: Literal[ApplyPatchFileOperationType.CREATE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Create a new file with the provided diff. Required. CREATE_FILE."""
+ path: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Path of the file to create. Required."""
+ diff: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Diff to apply. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ path: str,
+ diff: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ApplyPatchFileOperationType.CREATE_FILE # type: ignore
+
+
+class ApplyPatchOperationParam(_Model):
+ """Apply patch operation.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ApplyPatchCreateFileOperationParam, ApplyPatchDeleteFileOperationParam,
+ ApplyPatchUpdateFileOperationParam
+
+ :ivar type: Required. Known values are: "create_file", "delete_file", and "update_file".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.ApplyPatchOperationParamType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"create_file\", \"delete_file\", and \"update_file\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ApplyPatchCreateFileOperationParam(ApplyPatchOperationParam, discriminator="create_file"):
+ """Apply patch create file operation.
+
+ :ivar type: The operation type. Always ``create_file``. Required. CREATE_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CREATE_FILE
+ :ivar path: Path of the file to create relative to the workspace root. Required.
+ :vartype path: str
+ :ivar diff: Unified diff content to apply when creating the file. Required.
+ :vartype diff: str
+ """
+
+ type: Literal[ApplyPatchOperationParamType.CREATE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The operation type. Always ``create_file``. Required. CREATE_FILE."""
+ path: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Path of the file to create relative to the workspace root. Required."""
+ diff: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unified diff content to apply when creating the file. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ path: str,
+ diff: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ApplyPatchOperationParamType.CREATE_FILE # type: ignore
+
+
+class ApplyPatchDeleteFileOperation(ApplyPatchFileOperation, discriminator="delete_file"):
+ """Apply patch delete file operation.
+
+ :ivar type: Delete the specified file. Required. DELETE_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.DELETE_FILE
+ :ivar path: Path of the file to delete. Required.
+ :vartype path: str
+ """
+
+ type: Literal[ApplyPatchFileOperationType.DELETE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Delete the specified file. Required. DELETE_FILE."""
+ path: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Path of the file to delete. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ path: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ApplyPatchFileOperationType.DELETE_FILE # type: ignore
+
+
+class ApplyPatchDeleteFileOperationParam(ApplyPatchOperationParam, discriminator="delete_file"):
+ """Apply patch delete file operation.
+
+ :ivar type: The operation type. Always ``delete_file``. Required. DELETE_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.DELETE_FILE
+ :ivar path: Path of the file to delete relative to the workspace root. Required.
+ :vartype path: str
+ """
+
+ type: Literal[ApplyPatchOperationParamType.DELETE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The operation type. Always ``delete_file``. Required. DELETE_FILE."""
+ path: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Path of the file to delete relative to the workspace root. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ path: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ApplyPatchOperationParamType.DELETE_FILE # type: ignore
+
+
+class Item(_Model):
+ """Content item used to generate a response.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ApplyPatchToolCallItemParam, ApplyPatchToolCallOutputItemParam, ItemCodeInterpreterToolCall,
+ CompactionSummaryItemParam, ItemComputerToolCall, ComputerCallOutputItemParam,
+ ItemCustomToolCall, ItemCustomToolCallOutput, ItemFileSearchToolCall, ItemFunctionToolCall,
+ FunctionCallOutputItemParam, ItemImageGenToolCall, ItemReferenceParam, ItemLocalShellToolCall,
+ ItemLocalShellToolCallOutput, ItemMcpApprovalRequest, MCPApprovalResponse, ItemMcpToolCall,
+ ItemMcpListTools, MemorySearchToolCallItemParam, ItemMessage, ItemOutputMessage,
+ ItemReasoningItem, FunctionShellCallItemParam, FunctionShellCallOutputItemParam,
+ ItemWebSearchToolCall
+
+ :ivar type: Required. Known values are: "message", "output_message", "file_search_call",
+ "computer_call", "computer_call_output", "web_search_call", "function_call",
+ "function_call_output", "reasoning", "compaction", "image_generation_call",
+ "code_interpreter_call", "local_shell_call", "local_shell_call_output", "shell_call",
+ "shell_call_output", "apply_patch_call", "apply_patch_call_output", "mcp_list_tools",
+ "mcp_approval_request", "mcp_approval_response", "mcp_call", "custom_tool_call_output",
+ "custom_tool_call", "item_reference", "structured_outputs", "oauth_consent_request",
+ "memory_search_call", "workflow_action", "a2a_preview_call", "a2a_preview_call_output",
+ "bing_grounding_call", "bing_grounding_call_output", "sharepoint_grounding_preview_call",
+ "sharepoint_grounding_preview_call_output", "azure_ai_search_call",
+ "azure_ai_search_call_output", "bing_custom_search_preview_call",
+ "bing_custom_search_preview_call_output", "openapi_call", "openapi_call_output",
+ "browser_automation_preview_call", "browser_automation_preview_call_output",
+ "fabric_dataagent_preview_call", "fabric_dataagent_preview_call_output", "azure_function_call",
+ and "azure_function_call_output".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ItemType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"message\", \"output_message\", \"file_search_call\",
+ \"computer_call\", \"computer_call_output\", \"web_search_call\", \"function_call\",
+ \"function_call_output\", \"reasoning\", \"compaction\", \"image_generation_call\",
+ \"code_interpreter_call\", \"local_shell_call\", \"local_shell_call_output\", \"shell_call\",
+ \"shell_call_output\", \"apply_patch_call\", \"apply_patch_call_output\", \"mcp_list_tools\",
+ \"mcp_approval_request\", \"mcp_approval_response\", \"mcp_call\", \"custom_tool_call_output\",
+ \"custom_tool_call\", \"item_reference\", \"structured_outputs\", \"oauth_consent_request\",
+ \"memory_search_call\", \"workflow_action\", \"a2a_preview_call\", \"a2a_preview_call_output\",
+ \"bing_grounding_call\", \"bing_grounding_call_output\", \"sharepoint_grounding_preview_call\",
+ \"sharepoint_grounding_preview_call_output\", \"azure_ai_search_call\",
+ \"azure_ai_search_call_output\", \"bing_custom_search_preview_call\",
+ \"bing_custom_search_preview_call_output\", \"openapi_call\", \"openapi_call_output\",
+ \"browser_automation_preview_call\", \"browser_automation_preview_call_output\",
+ \"fabric_dataagent_preview_call\", \"fabric_dataagent_preview_call_output\",
+ \"azure_function_call\", and \"azure_function_call_output\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ApplyPatchToolCallItemParam(Item, discriminator="apply_patch_call"):
+ """Apply patch tool call.
+
+ :ivar type: The type of the item. Always ``apply_patch_call``. Required. APPLY_PATCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH_CALL
+ :ivar id:
+ :vartype id: str
+ :ivar call_id: The unique ID of the apply patch tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the apply patch tool call. One of ``in_progress`` or ``completed``.
+ Required. Known values are: "in_progress" and "completed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ApplyPatchCallStatusParam
+ :ivar operation: The specific create, delete, or update instruction for the apply_patch tool
+ call. Required.
+ :vartype operation: ~azure.ai.agentserver.responses.models.models.ApplyPatchOperationParam
+ """
+
+ type: Literal[ItemType.APPLY_PATCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``apply_patch_call``. Required. APPLY_PATCH_CALL."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call generated by the model. Required."""
+ status: Union[str, "_models.ApplyPatchCallStatusParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the apply patch tool call. One of ``in_progress`` or ``completed``. Required.
+ Known values are: \"in_progress\" and \"completed\"."""
+ operation: "_models.ApplyPatchOperationParam" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The specific create, delete, or update instruction for the apply_patch tool call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ApplyPatchCallStatusParam"],
+ operation: "_models.ApplyPatchOperationParam",
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.APPLY_PATCH_CALL # type: ignore
+
+
+class ApplyPatchToolCallOutputItemParam(Item, discriminator="apply_patch_call_output"):
+ """Apply patch tool call output.
+
+ :ivar type: The type of the item. Always ``apply_patch_call_output``. Required.
+ APPLY_PATCH_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH_CALL_OUTPUT
+ :ivar id:
+ :vartype id: str
+ :ivar call_id: The unique ID of the apply patch tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the apply patch tool call output. One of ``completed`` or
+ ``failed``. Required. Known values are: "completed" and "failed".
+ :vartype status: str or
+ ~azure.ai.agentserver.responses.models.models.ApplyPatchCallOutputStatusParam
+ :ivar output:
+ :vartype output: str
+ """
+
+ type: Literal[ItemType.APPLY_PATCH_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``apply_patch_call_output``. Required. APPLY_PATCH_CALL_OUTPUT."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call generated by the model. Required."""
+ status: Union[str, "_models.ApplyPatchCallOutputStatusParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the apply patch tool call output. One of ``completed`` or ``failed``. Required.
+ Known values are: \"completed\" and \"failed\"."""
+ output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ApplyPatchCallOutputStatusParam"],
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ output: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.APPLY_PATCH_CALL_OUTPUT # type: ignore
+
+
+class ApplyPatchToolParam(Tool, discriminator="apply_patch"):
+ """Apply patch tool.
+
+ :ivar type: The type of the tool. Always ``apply_patch``. Required. APPLY_PATCH.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH
+ """
+
+ type: Literal[ToolType.APPLY_PATCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the tool. Always ``apply_patch``. Required. APPLY_PATCH."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.APPLY_PATCH # type: ignore
+
+
+class ApplyPatchUpdateFileOperation(ApplyPatchFileOperation, discriminator="update_file"):
+ """Apply patch update file operation.
+
+ :ivar type: Update an existing file with the provided diff. Required. UPDATE_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.UPDATE_FILE
+ :ivar path: Path of the file to update. Required.
+ :vartype path: str
+ :ivar diff: Diff to apply. Required.
+ :vartype diff: str
+ """
+
+ type: Literal[ApplyPatchFileOperationType.UPDATE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Update an existing file with the provided diff. Required. UPDATE_FILE."""
+ path: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Path of the file to update. Required."""
+ diff: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Diff to apply. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ path: str,
+ diff: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ApplyPatchFileOperationType.UPDATE_FILE # type: ignore
+
+
+class ApplyPatchUpdateFileOperationParam(ApplyPatchOperationParam, discriminator="update_file"):
+ """Apply patch update file operation.
+
+ :ivar type: The operation type. Always ``update_file``. Required. UPDATE_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.UPDATE_FILE
+ :ivar path: Path of the file to update relative to the workspace root. Required.
+ :vartype path: str
+ :ivar diff: Unified diff content to apply to the existing file. Required.
+ :vartype diff: str
+ """
+
+ type: Literal[ApplyPatchOperationParamType.UPDATE_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The operation type. Always ``update_file``. Required. UPDATE_FILE."""
+ path: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Path of the file to update relative to the workspace root. Required."""
+ diff: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unified diff content to apply to the existing file. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ path: str,
+ diff: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ApplyPatchOperationParamType.UPDATE_FILE # type: ignore
+
+
+class ApproximateLocation(_Model):
+ """ApproximateLocation.
+
+ :ivar type: The type of location approximation. Always ``approximate``. Required. Default value
+ is "approximate".
+ :vartype type: str
+ :ivar country:
+ :vartype country: str
+ :ivar region:
+ :vartype region: str
+ :ivar city:
+ :vartype city: str
+ :ivar timezone:
+ :vartype timezone: str
+ """
+
+ type: Literal["approximate"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of location approximation. Always ``approximate``. Required. Default value is
+ \"approximate\"."""
+ country: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ region: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ city: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ timezone: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ country: Optional[str] = None,
+ region: Optional[str] = None,
+ city: Optional[str] = None,
+ timezone: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["approximate"] = "approximate"
+
+
+class AutoCodeInterpreterToolParam(_Model):
+ """Automatic Code Interpreter Tool Parameters.
+
+ :ivar type: Always ``auto``. Required. Default value is "auto".
+ :vartype type: str
+ :ivar file_ids: An optional list of uploaded files to make available to your code.
+ :vartype file_ids: list[str]
+ :ivar memory_limit: Known values are: "1g", "4g", "16g", and "64g".
+ :vartype memory_limit: str or
+ ~azure.ai.agentserver.responses.models.models.ContainerMemoryLimit
+ :ivar network_policy:
+ :vartype network_policy:
+ ~azure.ai.agentserver.responses.models.models.ContainerNetworkPolicyParam
+ """
+
+ type: Literal["auto"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Always ``auto``. Required. Default value is \"auto\"."""
+ file_ids: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An optional list of uploaded files to make available to your code."""
+ memory_limit: Optional[Union[str, "_models.ContainerMemoryLimit"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"1g\", \"4g\", \"16g\", and \"64g\"."""
+ network_policy: Optional["_models.ContainerNetworkPolicyParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_ids: Optional[list[str]] = None,
+ memory_limit: Optional[Union[str, "_models.ContainerMemoryLimit"]] = None,
+ network_policy: Optional["_models.ContainerNetworkPolicyParam"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["auto"] = "auto"
+
+
+class AzureAISearchTool(Tool, discriminator="azure_ai_search"):
+ """The input definition information for an Azure AI search tool as used to configure an agent.
+
+ :ivar type: The object type, which is always 'azure_ai_search'. Required. AZURE_AI_SEARCH.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.AZURE_AI_SEARCH
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar azure_ai_search: The azure ai search index resource. Required.
+ :vartype azure_ai_search:
+ ~azure.ai.agentserver.responses.models.models.AzureAISearchToolResource
+ """
+
+ type: Literal[ToolType.AZURE_AI_SEARCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'azure_ai_search'. Required. AZURE_AI_SEARCH."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ azure_ai_search: "_models.AzureAISearchToolResource" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The azure ai search index resource. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ azure_ai_search: "_models.AzureAISearchToolResource",
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.AZURE_AI_SEARCH # type: ignore
+
+
+class AzureAISearchToolCall(OutputItem, discriminator="azure_ai_search_call"):
+ """An Azure AI Search tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. AZURE_AI_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.AZURE_AI_SEARCH_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.AZURE_AI_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. AZURE_AI_SEARCH_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.AZURE_AI_SEARCH_CALL # type: ignore
+
+
+class AzureAISearchToolCallOutput(OutputItem, discriminator="azure_ai_search_call_output"):
+ """The output of an Azure AI Search tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. AZURE_AI_SEARCH_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.AZURE_AI_SEARCH_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the Azure AI Search tool call. Is one of the following types:
+ {str: Any}, str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.AZURE_AI_SEARCH_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. AZURE_AI_SEARCH_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the Azure AI Search tool call. Is one of the following types: {str: Any}, str,
+ [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.AZURE_AI_SEARCH_CALL_OUTPUT # type: ignore
+
+
+class AzureAISearchToolResource(_Model):
+ """A set of index resources used by the ``azure_ai_search`` tool.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar indexes: The indices attached to this agent. There can be a maximum of 1 index resource
+ attached to the agent. Required.
+ :vartype indexes: list[~azure.ai.agentserver.responses.models.models.AISearchIndexResource]
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ indexes: list["_models.AISearchIndexResource"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The indices attached to this agent. There can be a maximum of 1 index resource attached to the
+ agent. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ indexes: list["_models.AISearchIndexResource"],
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class AzureFunctionBinding(_Model):
+ """The structure for keeping storage queue name and URI.
+
+ :ivar type: The type of binding, which is always 'storage_queue'. Required. Default value is
+ "storage_queue".
+ :vartype type: str
+ :ivar storage_queue: Storage queue. Required.
+ :vartype storage_queue: ~azure.ai.agentserver.responses.models.models.AzureFunctionStorageQueue
+ """
+
+ type: Literal["storage_queue"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of binding, which is always 'storage_queue'. Required. Default value is
+ \"storage_queue\"."""
+ storage_queue: "_models.AzureFunctionStorageQueue" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Storage queue. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ storage_queue: "_models.AzureFunctionStorageQueue",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["storage_queue"] = "storage_queue"
+
+
+class AzureFunctionDefinition(_Model):
+ """The definition of Azure function.
+
+ :ivar function: The definition of azure function and its parameters. Required.
+ :vartype function:
+ ~azure.ai.agentserver.responses.models.models.AzureFunctionDefinitionFunction
+ :ivar input_binding: Input storage queue. The queue storage trigger runs a function as messages
+ are added to it. Required.
+ :vartype input_binding: ~azure.ai.agentserver.responses.models.models.AzureFunctionBinding
+ :ivar output_binding: Output storage queue. The function writes output to this queue when the
+ input items are processed. Required.
+ :vartype output_binding: ~azure.ai.agentserver.responses.models.models.AzureFunctionBinding
+ """
+
+ function: "_models.AzureFunctionDefinitionFunction" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The definition of azure function and its parameters. Required."""
+ input_binding: "_models.AzureFunctionBinding" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Input storage queue. The queue storage trigger runs a function as messages are added to it.
+ Required."""
+ output_binding: "_models.AzureFunctionBinding" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Output storage queue. The function writes output to this queue when the input items are
+ processed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ function: "_models.AzureFunctionDefinitionFunction",
+ input_binding: "_models.AzureFunctionBinding",
+ output_binding: "_models.AzureFunctionBinding",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class AzureFunctionDefinitionFunction(_Model):
+ """AzureFunctionDefinitionFunction.
+
+ :ivar name: The name of the function to be called. Required.
+ :vartype name: str
+ :ivar description: A description of what the function does, used by the model to choose when
+ and how to call the function.
+ :vartype description: str
+ :ivar parameters: The parameters the functions accepts, described as a JSON Schema object.
+ Required.
+ :vartype parameters: dict[str, any]
+ """
+
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to be called. Required."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A description of what the function does, used by the model to choose when and how to call the
+ function."""
+ parameters: dict[str, Any] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The parameters the functions accepts, described as a JSON Schema object. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ parameters: dict[str, Any],
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class AzureFunctionStorageQueue(_Model):
+ """The structure for keeping storage queue name and URI.
+
+ :ivar queue_service_endpoint: URI to the Azure Storage Queue service allowing you to manipulate
+ a queue. Required.
+ :vartype queue_service_endpoint: str
+ :ivar queue_name: The name of an Azure function storage queue. Required.
+ :vartype queue_name: str
+ """
+
+ queue_service_endpoint: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """URI to the Azure Storage Queue service allowing you to manipulate a queue. Required."""
+ queue_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of an Azure function storage queue. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ queue_service_endpoint: str,
+ queue_name: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class AzureFunctionTool(Tool, discriminator="azure_function"):
+ """The input definition information for an Azure Function Tool, as used to configure an Agent.
+
+ :ivar type: The object type, which is always 'browser_automation'. Required. AZURE_FUNCTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.AZURE_FUNCTION
+ :ivar azure_function: The Azure Function Tool definition. Required.
+ :vartype azure_function: ~azure.ai.agentserver.responses.models.models.AzureFunctionDefinition
+ """
+
+ type: Literal[ToolType.AZURE_FUNCTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'browser_automation'. Required. AZURE_FUNCTION."""
+ azure_function: "_models.AzureFunctionDefinition" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The Azure Function Tool definition. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ azure_function: "_models.AzureFunctionDefinition",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.AZURE_FUNCTION # type: ignore
+
+
+class AzureFunctionToolCall(OutputItem, discriminator="azure_function_call"):
+ """An Azure Function tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. AZURE_FUNCTION_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.AZURE_FUNCTION_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the Azure Function being called. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.AZURE_FUNCTION_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. AZURE_FUNCTION_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the Azure Function being called. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.AZURE_FUNCTION_CALL # type: ignore
+
+
+class AzureFunctionToolCallOutput(OutputItem, discriminator="azure_function_call_output"):
+ """The output of an Azure Function tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. AZURE_FUNCTION_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.AZURE_FUNCTION_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the Azure Function that was called. Required.
+ :vartype name: str
+ :ivar output: The output from the Azure Function tool call. Is one of the following types:
+ {str: Any}, str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.AZURE_FUNCTION_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. AZURE_FUNCTION_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the Azure Function that was called. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the Azure Function tool call. Is one of the following types: {str: Any}, str,
+ [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.AZURE_FUNCTION_CALL_OUTPUT # type: ignore
+
+
+class BingCustomSearchConfiguration(_Model):
+ """A bing custom search configuration.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar project_connection_id: Project connection id for grounding with bing search. Required.
+ :vartype project_connection_id: str
+ :ivar instance_name: Name of the custom configuration instance given to config. Required.
+ :vartype instance_name: str
+ :ivar market: The market where the results come from.
+ :vartype market: str
+ :ivar set_lang: The language to use for user interface strings when calling Bing API.
+ :vartype set_lang: str
+ :ivar count: The number of search results to return in the bing api response.
+ :vartype count: int
+ :ivar freshness: Filter search results by a specific time range. See `accepted values here
+ `_.
+ :vartype freshness: str
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Project connection id for grounding with bing search. Required."""
+ instance_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Name of the custom configuration instance given to config. Required."""
+ market: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The market where the results come from."""
+ set_lang: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The language to use for user interface strings when calling Bing API."""
+ count: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The number of search results to return in the bing api response."""
+ freshness: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Filter search results by a specific time range. See `accepted values here
+ `_."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: str,
+ instance_name: str,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ market: Optional[str] = None,
+ set_lang: Optional[str] = None,
+ count: Optional[int] = None,
+ freshness: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class BingCustomSearchPreviewTool(Tool, discriminator="bing_custom_search_preview"):
+ """The input definition information for a Bing custom search tool as used to configure an agent.
+
+ :ivar type: The object type, which is always 'bing_custom_search_preview'. Required.
+ BING_CUSTOM_SEARCH_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.BING_CUSTOM_SEARCH_PREVIEW
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar bing_custom_search_preview: The bing custom search tool parameters. Required.
+ :vartype bing_custom_search_preview:
+ ~azure.ai.agentserver.responses.models.models.BingCustomSearchToolParameters
+ """
+
+ type: Literal[ToolType.BING_CUSTOM_SEARCH_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'bing_custom_search_preview'. Required.
+ BING_CUSTOM_SEARCH_PREVIEW."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ bing_custom_search_preview: "_models.BingCustomSearchToolParameters" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The bing custom search tool parameters. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ bing_custom_search_preview: "_models.BingCustomSearchToolParameters",
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.BING_CUSTOM_SEARCH_PREVIEW # type: ignore
+
+
+class BingCustomSearchToolCall(OutputItem, discriminator="bing_custom_search_preview_call"):
+ """A Bing custom search tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. BING_CUSTOM_SEARCH_PREVIEW_CALL.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.BING_CUSTOM_SEARCH_PREVIEW_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.BING_CUSTOM_SEARCH_PREVIEW_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. BING_CUSTOM_SEARCH_PREVIEW_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.BING_CUSTOM_SEARCH_PREVIEW_CALL # type: ignore
+
+
+class BingCustomSearchToolCallOutput(OutputItem, discriminator="bing_custom_search_preview_call_output"):
+ """The output of a Bing custom search tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the Bing custom search tool call. Is one of the following types:
+ {str: Any}, str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the Bing custom search tool call. Is one of the following types: {str: Any},
+ str, [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.BING_CUSTOM_SEARCH_PREVIEW_CALL_OUTPUT # type: ignore
+
+
+class BingCustomSearchToolParameters(_Model):
+ """The bing custom search tool parameters.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar search_configurations: The project connections attached to this tool. There can be a
+ maximum of 1 connection resource attached to the tool. Required.
+ :vartype search_configurations:
+ list[~azure.ai.agentserver.responses.models.models.BingCustomSearchConfiguration]
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ search_configurations: list["_models.BingCustomSearchConfiguration"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The project connections attached to this tool. There can be a maximum of 1 connection resource
+ attached to the tool. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ search_configurations: list["_models.BingCustomSearchConfiguration"],
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class BingGroundingSearchConfiguration(_Model):
+ """Search configuration for Bing Grounding.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar project_connection_id: Project connection id for grounding with bing search. Required.
+ :vartype project_connection_id: str
+ :ivar market: The market where the results come from.
+ :vartype market: str
+ :ivar set_lang: The language to use for user interface strings when calling Bing API.
+ :vartype set_lang: str
+ :ivar count: The number of search results to return in the bing api response.
+ :vartype count: int
+ :ivar freshness: Filter search results by a specific time range. See `accepted values here
+ `_.
+ :vartype freshness: str
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Project connection id for grounding with bing search. Required."""
+ market: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The market where the results come from."""
+ set_lang: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The language to use for user interface strings when calling Bing API."""
+ count: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The number of search results to return in the bing api response."""
+ freshness: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Filter search results by a specific time range. See `accepted values here
+ `_."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: str,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ market: Optional[str] = None,
+ set_lang: Optional[str] = None,
+ count: Optional[int] = None,
+ freshness: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class BingGroundingSearchToolParameters(_Model):
+ """The bing grounding search tool parameters.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar search_configurations: The search configurations attached to this tool. There can be a
+ maximum of 1 search configuration resource attached to the tool. Required.
+ :vartype search_configurations:
+ list[~azure.ai.agentserver.responses.models.models.BingGroundingSearchConfiguration]
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ search_configurations: list["_models.BingGroundingSearchConfiguration"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The search configurations attached to this tool. There can be a maximum of 1 search
+ configuration resource attached to the tool. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ search_configurations: list["_models.BingGroundingSearchConfiguration"],
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class BingGroundingTool(Tool, discriminator="bing_grounding"):
+ """The input definition information for a bing grounding search tool as used to configure an
+ agent.
+
+ :ivar type: The object type, which is always 'bing_grounding'. Required. BING_GROUNDING.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.BING_GROUNDING
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar bing_grounding: The bing grounding search tool parameters. Required.
+ :vartype bing_grounding:
+ ~azure.ai.agentserver.responses.models.models.BingGroundingSearchToolParameters
+ """
+
+ type: Literal[ToolType.BING_GROUNDING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'bing_grounding'. Required. BING_GROUNDING."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ bing_grounding: "_models.BingGroundingSearchToolParameters" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The bing grounding search tool parameters. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ bing_grounding: "_models.BingGroundingSearchToolParameters",
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.BING_GROUNDING # type: ignore
+
+
+class BingGroundingToolCall(OutputItem, discriminator="bing_grounding_call"):
+ """A Bing grounding tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. BING_GROUNDING_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.BING_GROUNDING_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.BING_GROUNDING_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. BING_GROUNDING_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.BING_GROUNDING_CALL # type: ignore
+
+
+class BingGroundingToolCallOutput(OutputItem, discriminator="bing_grounding_call_output"):
+ """The output of a Bing grounding tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. BING_GROUNDING_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.BING_GROUNDING_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the Bing grounding tool call. Is one of the following types:
+ {str: Any}, str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.BING_GROUNDING_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. BING_GROUNDING_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the Bing grounding tool call. Is one of the following types: {str: Any}, str,
+ [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.BING_GROUNDING_CALL_OUTPUT # type: ignore
+
+
+class BrowserAutomationPreviewTool(Tool, discriminator="browser_automation_preview"):
+ """The input definition information for a Browser Automation Tool, as used to configure an Agent.
+
+ :ivar type: The object type, which is always 'browser_automation_preview'. Required.
+ BROWSER_AUTOMATION_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.BROWSER_AUTOMATION_PREVIEW
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar browser_automation_preview: The Browser Automation Tool parameters. Required.
+ :vartype browser_automation_preview:
+ ~azure.ai.agentserver.responses.models.models.BrowserAutomationToolParameters
+ """
+
+ type: Literal[ToolType.BROWSER_AUTOMATION_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'browser_automation_preview'. Required.
+ BROWSER_AUTOMATION_PREVIEW."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ browser_automation_preview: "_models.BrowserAutomationToolParameters" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The Browser Automation Tool parameters. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ browser_automation_preview: "_models.BrowserAutomationToolParameters",
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.BROWSER_AUTOMATION_PREVIEW # type: ignore
+
+
+class BrowserAutomationToolCall(OutputItem, discriminator="browser_automation_preview_call"):
+ """A browser automation tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. BROWSER_AUTOMATION_PREVIEW_CALL.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.BROWSER_AUTOMATION_PREVIEW_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.BROWSER_AUTOMATION_PREVIEW_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. BROWSER_AUTOMATION_PREVIEW_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.BROWSER_AUTOMATION_PREVIEW_CALL # type: ignore
+
+
+class BrowserAutomationToolCallOutput(OutputItem, discriminator="browser_automation_preview_call_output"):
+ """The output of a browser automation tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the browser automation tool call. Is one of the following types:
+ {str: Any}, str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the browser automation tool call. Is one of the following types: {str: Any},
+ str, [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.BROWSER_AUTOMATION_PREVIEW_CALL_OUTPUT # type: ignore
+
+
+class BrowserAutomationToolConnectionParameters(_Model): # pylint: disable=name-too-long
+ """Definition of input parameters for the connection used by the Browser Automation Tool.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar project_connection_id: The ID of the project connection to your Azure Playwright
+ resource. Required.
+ :vartype project_connection_id: str
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the project connection to your Azure Playwright resource. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: str,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class BrowserAutomationToolParameters(_Model):
+ """Definition of input parameters for the Browser Automation Tool.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar connection: The project connection parameters associated with the Browser Automation
+ Tool. Required.
+ :vartype connection:
+ ~azure.ai.agentserver.responses.models.models.BrowserAutomationToolConnectionParameters
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ connection: "_models.BrowserAutomationToolConnectionParameters" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The project connection parameters associated with the Browser Automation Tool. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ connection: "_models.BrowserAutomationToolConnectionParameters",
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class CaptureStructuredOutputsTool(Tool, discriminator="capture_structured_outputs"):
+ """A tool for capturing structured outputs.
+
+ :ivar type: The type of the tool. Always ``capture_structured_outputs``. Required.
+ CAPTURE_STRUCTURED_OUTPUTS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CAPTURE_STRUCTURED_OUTPUTS
+ :ivar outputs: The structured outputs to capture from the model. Required.
+ :vartype outputs: ~azure.ai.agentserver.responses.models.models.StructuredOutputDefinition
+ """
+
+ type: Literal[ToolType.CAPTURE_STRUCTURED_OUTPUTS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the tool. Always ``capture_structured_outputs``. Required.
+ CAPTURE_STRUCTURED_OUTPUTS."""
+ outputs: "_models.StructuredOutputDefinition" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The structured outputs to capture from the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ outputs: "_models.StructuredOutputDefinition",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.CAPTURE_STRUCTURED_OUTPUTS # type: ignore
+
+
+class MemoryItem(_Model):
+ """A single memory item stored in the memory store, containing content and metadata.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ChatSummaryMemoryItem, UserProfileMemoryItem
+
+ :ivar memory_id: The unique ID of the memory item. Required.
+ :vartype memory_id: str
+ :ivar updated_at: The last update time of the memory item. Required.
+ :vartype updated_at: ~datetime.datetime
+ :ivar scope: The namespace that logically groups and isolates memories, such as a user ID.
+ Required.
+ :vartype scope: str
+ :ivar content: The content of the memory. Required.
+ :vartype content: str
+ :ivar kind: The kind of the memory item. Required. Known values are: "user_profile" and
+ "chat_summary".
+ :vartype kind: str or ~azure.ai.agentserver.responses.models.models.MemoryItemKind
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ memory_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the memory item. Required."""
+ updated_at: datetime.datetime = rest_field(
+ visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp"
+ )
+ """The last update time of the memory item. Required."""
+ scope: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The namespace that logically groups and isolates memories, such as a user ID. Required."""
+ content: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content of the memory. Required."""
+ kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"])
+ """The kind of the memory item. Required. Known values are: \"user_profile\" and \"chat_summary\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ memory_id: str,
+ updated_at: datetime.datetime,
+ scope: str,
+ content: str,
+ kind: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ChatSummaryMemoryItem(MemoryItem, discriminator="chat_summary"):
+ """A memory item containing a summary extracted from conversations.
+
+ :ivar memory_id: The unique ID of the memory item. Required.
+ :vartype memory_id: str
+ :ivar updated_at: The last update time of the memory item. Required.
+ :vartype updated_at: ~datetime.datetime
+ :ivar scope: The namespace that logically groups and isolates memories, such as a user ID.
+ Required.
+ :vartype scope: str
+ :ivar content: The content of the memory. Required.
+ :vartype content: str
+ :ivar kind: The kind of the memory item. Required. Summary of chat conversations.
+ :vartype kind: str or ~azure.ai.agentserver.responses.models.models.CHAT_SUMMARY
+ """
+
+ kind: Literal[MemoryItemKind.CHAT_SUMMARY] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The kind of the memory item. Required. Summary of chat conversations."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ memory_id: str,
+ updated_at: datetime.datetime,
+ scope: str,
+ content: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.kind = MemoryItemKind.CHAT_SUMMARY # type: ignore
+
+
+class ComputerAction(_Model):
+ """ComputerAction.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ClickParam, DoubleClickAction, DragParam, KeyPressAction, MoveParam, ScreenshotParam,
+ ScrollParam, TypeParam, WaitParam
+
+ :ivar type: Required. Known values are: "click", "double_click", "drag", "keypress", "move",
+ "screenshot", "scroll", "type", and "wait".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ComputerActionType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"click\", \"double_click\", \"drag\", \"keypress\", \"move\",
+ \"screenshot\", \"scroll\", \"type\", and \"wait\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ClickParam(ComputerAction, discriminator="click"):
+ """Click.
+
+ :ivar type: Specifies the event type. For a click action, this property is always ``click``.
+ Required. CLICK.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CLICK
+ :ivar button: Indicates which mouse button was pressed during the click. One of ``left``,
+ ``right``, ``wheel``, ``back``, or ``forward``. Required. Known values are: "left", "right",
+ "wheel", "back", and "forward".
+ :vartype button: str or ~azure.ai.agentserver.responses.models.models.ClickButtonType
+ :ivar x: The x-coordinate where the click occurred. Required.
+ :vartype x: int
+ :ivar y: The y-coordinate where the click occurred. Required.
+ :vartype y: int
+ """
+
+ type: Literal[ComputerActionType.CLICK] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a click action, this property is always ``click``. Required.
+ CLICK."""
+ button: Union[str, "_models.ClickButtonType"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Indicates which mouse button was pressed during the click. One of ``left``, ``right``,
+ ``wheel``, ``back``, or ``forward``. Required. Known values are: \"left\", \"right\",
+ \"wheel\", \"back\", and \"forward\"."""
+ x: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The x-coordinate where the click occurred. Required."""
+ y: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The y-coordinate where the click occurred. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ button: Union[str, "_models.ClickButtonType"],
+ x: int,
+ y: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.CLICK # type: ignore
+
+
+class CodeInterpreterOutputImage(_Model):
+ """Code interpreter output image.
+
+ :ivar type: The type of the output. Always ``image``. Required. Default value is "image".
+ :vartype type: str
+ :ivar url: The URL of the image output from the code interpreter. Required.
+ :vartype url: str
+ """
+
+ type: Literal["image"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the output. Always ``image``. Required. Default value is \"image\"."""
+ url: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL of the image output from the code interpreter. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ url: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["image"] = "image"
+
+
+class CodeInterpreterOutputLogs(_Model):
+ """Code interpreter output logs.
+
+ :ivar type: The type of the output. Always ``logs``. Required. Default value is "logs".
+ :vartype type: str
+ :ivar logs: The logs output from the code interpreter. Required.
+ :vartype logs: str
+ """
+
+ type: Literal["logs"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the output. Always ``logs``. Required. Default value is \"logs\"."""
+ logs: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The logs output from the code interpreter. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ logs: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["logs"] = "logs"
+
+
+class CodeInterpreterTool(Tool, discriminator="code_interpreter"):
+ """Code interpreter.
+
+ :ivar type: The type of the code interpreter tool. Always ``code_interpreter``. Required.
+ CODE_INTERPRETER.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CODE_INTERPRETER
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar container: The code interpreter container. Can be a container ID or an object that
+ specifies uploaded file IDs to make available to your code, along with an optional
+ ``memory_limit`` setting. If not provided, the service assumes auto. Is either a str type or a
+ AutoCodeInterpreterToolParam type.
+ :vartype container: str or
+ ~azure.ai.agentserver.responses.models.models.AutoCodeInterpreterToolParam
+ """
+
+ type: Literal[ToolType.CODE_INTERPRETER] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the code interpreter tool. Always ``code_interpreter``. Required. CODE_INTERPRETER."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ container: Optional[Union[str, "_models.AutoCodeInterpreterToolParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The code interpreter container. Can be a container ID or an object that specifies uploaded file
+ IDs to make available to your code, along with an optional ``memory_limit`` setting. If not
+ provided, the service assumes auto. Is either a str type or a AutoCodeInterpreterToolParam
+ type."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ container: Optional[Union[str, "_models.AutoCodeInterpreterToolParam"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.CODE_INTERPRETER # type: ignore
+
+
+class CompactionSummaryItemParam(Item, discriminator="compaction"):
+ """Compaction item.
+
+ :ivar id:
+ :vartype id: str
+ :ivar type: The type of the item. Always ``compaction``. Required. COMPACTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPACTION
+ :ivar encrypted_content: The encrypted content of the compaction summary. Required.
+ :vartype encrypted_content: str
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ type: Literal[ItemType.COMPACTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``compaction``. Required. COMPACTION."""
+ encrypted_content: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The encrypted content of the compaction summary. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ encrypted_content: str,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.COMPACTION # type: ignore
+
+
+class CompactResource(_Model):
+ """The compacted response object.
+
+ :ivar id: The unique identifier for the compacted response. Required.
+ :vartype id: str
+ :ivar object: The object type. Always ``response.compaction``. Required. Default value is
+ "response.compaction".
+ :vartype object: str
+ :ivar output: The compacted list of output items. Required.
+ :vartype output: list[~azure.ai.agentserver.responses.models.models.ItemField]
+ :ivar created_at: Unix timestamp (in seconds) when the compacted conversation was created.
+ Required.
+ :vartype created_at: ~datetime.datetime
+ :ivar usage: Token accounting for the compaction pass, including cached, reasoning, and total
+ tokens. Required.
+ :vartype usage: ~azure.ai.agentserver.responses.models.models.ResponseUsage
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier for the compacted response. Required."""
+ object: Literal["response.compaction"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The object type. Always ``response.compaction``. Required. Default value is
+ \"response.compaction\"."""
+ output: list["_models.ItemField"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The compacted list of output items. Required."""
+ created_at: datetime.datetime = rest_field(
+ visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp"
+ )
+ """Unix timestamp (in seconds) when the compacted conversation was created. Required."""
+ usage: "_models.ResponseUsage" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Token accounting for the compaction pass, including cached, reasoning, and total tokens.
+ Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ output: list["_models.ItemField"],
+ created_at: datetime.datetime,
+ usage: "_models.ResponseUsage",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.object: Literal["response.compaction"] = "response.compaction"
+
+
+class ComparisonFilter(_Model):
+ """Comparison Filter.
+
+ :ivar type: Specifies the comparison operator: ``eq``, ``ne``, ``gt``, ``gte``, ``lt``,
+ ``lte``, ``in``, ``nin``.
+
+ * `eq`: equals
+ * `ne`: not equal
+ * `gt`: greater than
+ * `gte`: greater than or equal
+ * `lt`: less than
+ * `lte`: less than or equal
+ * `in`: in
+ * `nin`: not in. Required. Is one of the following types: Literal["eq"], Literal["ne"],
+ Literal["gt"], Literal["gte"], Literal["lt"], Literal["lte"]
+ :vartype type: str or str or str or str or str or str
+ :ivar key: The key to compare against the value. Required.
+ :vartype key: str
+ :ivar value: The value to compare against the attribute key; supports string, number, or
+ boolean types. Required. Is one of the following types: str, int, bool, [Union[str, int]]
+ :vartype value: str or int or bool or list[str or int]
+ """
+
+ type: Literal["eq", "ne", "gt", "gte", "lt", "lte"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Specifies the comparison operator: ``eq``, ``ne``, ``gt``, ``gte``, ``lt``, ``lte``, ``in``,
+ ``nin``.
+
+ * `eq`: equals
+ * `ne`: not equal
+ * `gt`: greater than
+ * `gte`: greater than or equal
+ * `lt`: less than
+ * `lte`: less than or equal
+ * `in`: in
+ * `nin`: not in. Required. Is one of the following types: Literal[\"eq\"],
+ Literal[\"ne\"], Literal[\"gt\"], Literal[\"gte\"], Literal[\"lt\"], Literal[\"lte\"]"""
+ key: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The key to compare against the value. Required."""
+ value: Union[str, int, bool, list[Union[str, int]]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The value to compare against the attribute key; supports string, number, or boolean types.
+ Required. Is one of the following types: str, int, bool, [Union[str, int]]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: Literal["eq", "ne", "gt", "gte", "lt", "lte"],
+ key: str,
+ value: Union[str, int, bool, list[Union[str, int]]],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class CompoundFilter(_Model):
+ """Compound Filter.
+
+ :ivar type: Type of operation: ``and`` or ``or``. Required. Is either a Literal["and"] type or
+ a Literal["or"] type.
+ :vartype type: str or str
+ :ivar filters: Array of filters to combine. Items can be ``ComparisonFilter`` or
+ ``CompoundFilter``. Required.
+ :vartype filters: list[~azure.ai.agentserver.responses.models.models.ComparisonFilter or any]
+ """
+
+ type: Literal["and", "or"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Type of operation: ``and`` or ``or``. Required. Is either a Literal[\"and\"] type or a
+ Literal[\"or\"] type."""
+ filters: list[Union["_models.ComparisonFilter", Any]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Array of filters to combine. Items can be ``ComparisonFilter`` or ``CompoundFilter``. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: Literal["and", "or"],
+ filters: list[Union["_models.ComparisonFilter", Any]],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ComputerCallOutputItemParam(Item, discriminator="computer_call_output"):
+ """Computer tool call output.
+
+ :ivar id:
+ :vartype id: str
+ :ivar call_id: The ID of the computer tool call that produced the output. Required.
+ :vartype call_id: str
+ :ivar type: The type of the computer tool call output. Always ``computer_call_output``.
+ Required. COMPUTER_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_CALL_OUTPUT
+ :ivar output: Required.
+ :vartype output: ~azure.ai.agentserver.responses.models.models.ComputerScreenshotImage
+ :ivar acknowledged_safety_checks:
+ :vartype acknowledged_safety_checks:
+ list[~azure.ai.agentserver.responses.models.models.ComputerCallSafetyCheckParam]
+ :ivar status: Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.FunctionCallItemStatus
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the computer tool call that produced the output. Required."""
+ type: Literal[ItemType.COMPUTER_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the computer tool call output. Always ``computer_call_output``. Required.
+ COMPUTER_CALL_OUTPUT."""
+ output: "_models.ComputerScreenshotImage" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ acknowledged_safety_checks: Optional[list["_models.ComputerCallSafetyCheckParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ status: Optional[Union[str, "_models.FunctionCallItemStatus"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: "_models.ComputerScreenshotImage",
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ acknowledged_safety_checks: Optional[list["_models.ComputerCallSafetyCheckParam"]] = None,
+ status: Optional[Union[str, "_models.FunctionCallItemStatus"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.COMPUTER_CALL_OUTPUT # type: ignore
+
+
+class ComputerCallSafetyCheckParam(_Model):
+ """A pending safety check for the computer call.
+
+ :ivar id: The ID of the pending safety check. Required.
+ :vartype id: str
+ :ivar code:
+ :vartype code: str
+ :ivar message:
+ :vartype message: str
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the pending safety check. Required."""
+ code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ code: Optional[str] = None,
+ message: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class MessageContent(_Model):
+ """A content part that makes up an input or output item.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ComputerScreenshotContent, MessageContentInputFileContent, MessageContentInputImageContent,
+ MessageContentInputTextContent, MessageContentOutputTextContent,
+ MessageContentReasoningTextContent, MessageContentRefusalContent, SummaryTextContent,
+ TextContent
+
+ :ivar type: Required. Known values are: "input_text", "output_text", "text", "summary_text",
+ "reasoning_text", "refusal", "input_image", "computer_screenshot", and "input_file".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MessageContentType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"input_text\", \"output_text\", \"text\", \"summary_text\",
+ \"reasoning_text\", \"refusal\", \"input_image\", \"computer_screenshot\", and \"input_file\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ComputerScreenshotContent(MessageContent, discriminator="computer_screenshot"):
+ """Computer screenshot.
+
+ :ivar type: Specifies the event type. For a computer screenshot, this property is always set to
+ ``computer_screenshot``. Required. COMPUTER_SCREENSHOT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_SCREENSHOT
+ :ivar image_url: Required.
+ :vartype image_url: str
+ :ivar file_id: Required.
+ :vartype file_id: str
+ """
+
+ type: Literal[MessageContentType.COMPUTER_SCREENSHOT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a computer screenshot, this property is always set to
+ ``computer_screenshot``. Required. COMPUTER_SCREENSHOT."""
+ image_url: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ image_url: str,
+ file_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.COMPUTER_SCREENSHOT # type: ignore
+
+
+class ComputerScreenshotImage(_Model):
+ """A computer screenshot image used with the computer use tool.
+
+ :ivar type: Specifies the event type. For a computer screenshot, this property is always set to
+ ``computer_screenshot``. Required. Default value is "computer_screenshot".
+ :vartype type: str
+ :ivar image_url: The URL of the screenshot image.
+ :vartype image_url: str
+ :ivar file_id: The identifier of an uploaded file that contains the screenshot.
+ :vartype file_id: str
+ """
+
+ type: Literal["computer_screenshot"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Specifies the event type. For a computer screenshot, this property is always set to
+ ``computer_screenshot``. Required. Default value is \"computer_screenshot\"."""
+ image_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL of the screenshot image."""
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The identifier of an uploaded file that contains the screenshot."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ image_url: Optional[str] = None,
+ file_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["computer_screenshot"] = "computer_screenshot"
+
+
+class ComputerUsePreviewTool(Tool, discriminator="computer_use_preview"):
+ """Computer use preview.
+
+ :ivar type: The type of the computer use tool. Always ``computer_use_preview``. Required.
+ COMPUTER_USE_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_USE_PREVIEW
+ :ivar environment: The type of computer environment to control. Required. Known values are:
+ "windows", "mac", "linux", "ubuntu", and "browser".
+ :vartype environment: str or ~azure.ai.agentserver.responses.models.models.ComputerEnvironment
+ :ivar display_width: The width of the computer display. Required.
+ :vartype display_width: int
+ :ivar display_height: The height of the computer display. Required.
+ :vartype display_height: int
+ """
+
+ type: Literal[ToolType.COMPUTER_USE_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the computer use tool. Always ``computer_use_preview``. Required.
+ COMPUTER_USE_PREVIEW."""
+ environment: Union[str, "_models.ComputerEnvironment"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The type of computer environment to control. Required. Known values are: \"windows\", \"mac\",
+ \"linux\", \"ubuntu\", and \"browser\"."""
+ display_width: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The width of the computer display. Required."""
+ display_height: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The height of the computer display. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ environment: Union[str, "_models.ComputerEnvironment"],
+ display_width: int,
+ display_height: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.COMPUTER_USE_PREVIEW # type: ignore
+
+
+class FunctionShellToolParamEnvironment(_Model):
+ """FunctionShellToolParamEnvironment.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ContainerAutoParam, FunctionShellToolParamEnvironmentContainerReferenceParam,
+ FunctionShellToolParamEnvironmentLocalEnvironmentParam
+
+ :ivar type: Required. Known values are: "container_auto", "local", and "container_reference".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionShellToolParamEnvironmentType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"container_auto\", \"local\", and \"container_reference\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ContainerAutoParam(FunctionShellToolParamEnvironment, discriminator="container_auto"):
+ """ContainerAutoParam.
+
+ :ivar type: Automatically creates a container for this request. Required. CONTAINER_AUTO.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CONTAINER_AUTO
+ :ivar file_ids: An optional list of uploaded files to make available to your code.
+ :vartype file_ids: list[str]
+ :ivar memory_limit: Known values are: "1g", "4g", "16g", and "64g".
+ :vartype memory_limit: str or
+ ~azure.ai.agentserver.responses.models.models.ContainerMemoryLimit
+ :ivar skills: An optional list of skills referenced by id or inline data.
+ :vartype skills: list[~azure.ai.agentserver.responses.models.models.ContainerSkill]
+ :ivar network_policy:
+ :vartype network_policy:
+ ~azure.ai.agentserver.responses.models.models.ContainerNetworkPolicyParam
+ """
+
+ type: Literal[FunctionShellToolParamEnvironmentType.CONTAINER_AUTO] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Automatically creates a container for this request. Required. CONTAINER_AUTO."""
+ file_ids: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An optional list of uploaded files to make available to your code."""
+ memory_limit: Optional[Union[str, "_models.ContainerMemoryLimit"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"1g\", \"4g\", \"16g\", and \"64g\"."""
+ skills: Optional[list["_models.ContainerSkill"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """An optional list of skills referenced by id or inline data."""
+ network_policy: Optional["_models.ContainerNetworkPolicyParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_ids: Optional[list[str]] = None,
+ memory_limit: Optional[Union[str, "_models.ContainerMemoryLimit"]] = None,
+ skills: Optional[list["_models.ContainerSkill"]] = None,
+ network_policy: Optional["_models.ContainerNetworkPolicyParam"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellToolParamEnvironmentType.CONTAINER_AUTO # type: ignore
+
+
+class ContainerFileCitationBody(Annotation, discriminator="container_file_citation"):
+ """Container file citation.
+
+ :ivar type: The type of the container file citation. Always ``container_file_citation``.
+ Required. CONTAINER_FILE_CITATION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CONTAINER_FILE_CITATION
+ :ivar container_id: The ID of the container file. Required.
+ :vartype container_id: str
+ :ivar file_id: The ID of the file. Required.
+ :vartype file_id: str
+ :ivar start_index: The index of the first character of the container file citation in the
+ message. Required.
+ :vartype start_index: int
+ :ivar end_index: The index of the last character of the container file citation in the message.
+ Required.
+ :vartype end_index: int
+ :ivar filename: The filename of the container file cited. Required.
+ :vartype filename: str
+ """
+
+ type: Literal[AnnotationType.CONTAINER_FILE_CITATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the container file citation. Always ``container_file_citation``. Required.
+ CONTAINER_FILE_CITATION."""
+ container_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the container file. Required."""
+ file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the file. Required."""
+ start_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the first character of the container file citation in the message. Required."""
+ end_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the last character of the container file citation in the message. Required."""
+ filename: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The filename of the container file cited. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ container_id: str,
+ file_id: str,
+ start_index: int,
+ end_index: int,
+ filename: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = AnnotationType.CONTAINER_FILE_CITATION # type: ignore
+
+
+class ContainerNetworkPolicyParam(_Model):
+ """Network access policy for the container.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ContainerNetworkPolicyAllowlistParam, ContainerNetworkPolicyDisabledParam
+
+ :ivar type: Required. Known values are: "disabled" and "allowlist".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.ContainerNetworkPolicyParamType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"disabled\" and \"allowlist\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ContainerNetworkPolicyAllowlistParam(ContainerNetworkPolicyParam, discriminator="allowlist"):
+ """ContainerNetworkPolicyAllowlistParam.
+
+ :ivar type: Allow outbound network access only to specified domains. Always ``allowlist``.
+ Required. ALLOWLIST.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ALLOWLIST
+ :ivar allowed_domains: A list of allowed domains when type is ``allowlist``. Required.
+ :vartype allowed_domains: list[str]
+ :ivar domain_secrets: Optional domain-scoped secrets for allowlisted domains.
+ :vartype domain_secrets:
+ list[~azure.ai.agentserver.responses.models.models.ContainerNetworkPolicyDomainSecretParam]
+ """
+
+ type: Literal[ContainerNetworkPolicyParamType.ALLOWLIST] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Allow outbound network access only to specified domains. Always ``allowlist``. Required.
+ ALLOWLIST."""
+ allowed_domains: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A list of allowed domains when type is ``allowlist``. Required."""
+ domain_secrets: Optional[list["_models.ContainerNetworkPolicyDomainSecretParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Optional domain-scoped secrets for allowlisted domains."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ allowed_domains: list[str],
+ domain_secrets: Optional[list["_models.ContainerNetworkPolicyDomainSecretParam"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ContainerNetworkPolicyParamType.ALLOWLIST # type: ignore
+
+
+class ContainerNetworkPolicyDisabledParam(ContainerNetworkPolicyParam, discriminator="disabled"):
+ """ContainerNetworkPolicyDisabledParam.
+
+ :ivar type: Disable outbound network access. Always ``disabled``. Required. DISABLED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.DISABLED
+ """
+
+ type: Literal[ContainerNetworkPolicyParamType.DISABLED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Disable outbound network access. Always ``disabled``. Required. DISABLED."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ContainerNetworkPolicyParamType.DISABLED # type: ignore
+
+
+class ContainerNetworkPolicyDomainSecretParam(_Model):
+ """ContainerNetworkPolicyDomainSecretParam.
+
+ :ivar domain: The domain associated with the secret. Required.
+ :vartype domain: str
+ :ivar name: The name of the secret to inject for the domain. Required.
+ :vartype name: str
+ :ivar value: The secret value to inject for the domain. Required.
+ :vartype value: str
+ """
+
+ domain: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The domain associated with the secret. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the secret to inject for the domain. Required."""
+ value: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The secret value to inject for the domain. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ domain: str,
+ name: str,
+ value: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellCallEnvironment(_Model):
+ """FunctionShellCallEnvironment.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ContainerReferenceResource, LocalEnvironmentResource
+
+ :ivar type: Required. Known values are: "local" and "container_reference".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallEnvironmentType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"local\" and \"container_reference\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ContainerReferenceResource(FunctionShellCallEnvironment, discriminator="container_reference"):
+ """Container Reference.
+
+ :ivar type: The environment type. Always ``container_reference``. Required.
+ CONTAINER_REFERENCE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CONTAINER_REFERENCE
+ :ivar container_id: Required.
+ :vartype container_id: str
+ """
+
+ type: Literal[FunctionShellCallEnvironmentType.CONTAINER_REFERENCE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The environment type. Always ``container_reference``. Required. CONTAINER_REFERENCE."""
+ container_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ container_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallEnvironmentType.CONTAINER_REFERENCE # type: ignore
+
+
+class ContainerSkill(_Model):
+ """ContainerSkill.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ InlineSkillParam, SkillReferenceParam
+
+ :ivar type: Required. Known values are: "skill_reference" and "inline".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ContainerSkillType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"skill_reference\" and \"inline\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ContextManagementParam(_Model):
+ """ContextManagementParam.
+
+ :ivar type: The context management entry type. Currently only 'compaction' is supported.
+ Required.
+ :vartype type: str
+ :ivar compact_threshold:
+ :vartype compact_threshold: int
+ """
+
+ type: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The context management entry type. Currently only 'compaction' is supported. Required."""
+ compact_threshold: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ compact_threshold: Optional[int] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ConversationParam_2(_Model):
+ """Conversation object.
+
+ :ivar id: The unique ID of the conversation. Required.
+ :vartype id: str
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the conversation. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ConversationReference(_Model):
+ """Conversation.
+
+ :ivar id: The unique ID of the conversation that this response was associated with. Required.
+ :vartype id: str
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the conversation that this response was associated with. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class CoordParam(_Model):
+ """Coordinate.
+
+ :ivar x: The x-coordinate. Required.
+ :vartype x: int
+ :ivar y: The y-coordinate. Required.
+ :vartype y: int
+ """
+
+ x: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The x-coordinate. Required."""
+ y: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The y-coordinate. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ x: int,
+ y: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class CreateResponse(_Model):
+ """CreateResponse.
+
+ :ivar metadata:
+ :vartype metadata: ~azure.ai.agentserver.responses.models.models.Metadata
+ :ivar top_logprobs:
+ :vartype top_logprobs: int
+ :ivar temperature:
+ :vartype temperature: int
+ :ivar top_p:
+ :vartype top_p: int
+ :ivar user: This field is being replaced by ``safety_identifier`` and ``prompt_cache_key``. Use
+ ``prompt_cache_key`` instead to maintain caching optimizations. A stable identifier for your
+ end-users. Used to boost cache hit rates by better bucketing similar requests and to help
+ OpenAI detect and prevent abuse. `Learn more
+ `_.
+ :vartype user: str
+ :ivar safety_identifier: A stable identifier used to help detect users of your application that
+ may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies
+ each user. We recommend hashing their username or email address, in order to avoid sending us
+ any identifying information. `Learn more
+ `_.
+ :vartype safety_identifier: str
+ :ivar prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your
+ cache hit rates. Replaces the ``user`` field. `Learn more `_.
+ :vartype prompt_cache_key: str
+ :ivar service_tier: Is one of the following types: Literal["auto"], Literal["default"],
+ Literal["flex"], Literal["scale"], Literal["priority"]
+ :vartype service_tier: str or str or str or str or str
+ :ivar prompt_cache_retention: Is either a Literal["in-memory"] type or a Literal["24h"] type.
+ :vartype prompt_cache_retention: str or str
+ :ivar previous_response_id:
+ :vartype previous_response_id: str
+ :ivar model: The model deployment to use for the creation of this response.
+ :vartype model: str
+ :ivar reasoning:
+ :vartype reasoning: ~azure.ai.agentserver.responses.models.models.Reasoning
+ :ivar background:
+ :vartype background: bool
+ :ivar max_output_tokens:
+ :vartype max_output_tokens: int
+ :ivar max_tool_calls:
+ :vartype max_tool_calls: int
+ :ivar text:
+ :vartype text: ~azure.ai.agentserver.responses.models.models.ResponseTextParam
+ :ivar tools:
+ :vartype tools: list[~azure.ai.agentserver.responses.models.models.Tool]
+ :ivar tool_choice: Is either a Union[str, "_models.ToolChoiceOptions"] type or a
+ ToolChoiceParam type.
+ :vartype tool_choice: str or ~azure.ai.agentserver.responses.models.models.ToolChoiceOptions or
+ ~azure.ai.agentserver.responses.models.models.ToolChoiceParam
+ :ivar prompt:
+ :vartype prompt: ~azure.ai.agentserver.responses.models.models.Prompt
+ :ivar truncation: Is either a Literal["auto"] type or a Literal["disabled"] type.
+ :vartype truncation: str or str
+ :ivar input: Is either a str type or a [Item] type.
+ :vartype input: str or list[~azure.ai.agentserver.responses.models.models.Item]
+ :ivar include:
+ :vartype include: list[str or ~azure.ai.agentserver.responses.models.models.IncludeEnum]
+ :ivar parallel_tool_calls:
+ :vartype parallel_tool_calls: bool
+ :ivar store:
+ :vartype store: bool
+ :ivar instructions:
+ :vartype instructions: str
+ :ivar stream:
+ :vartype stream: bool
+ :ivar stream_options:
+ :vartype stream_options: ~azure.ai.agentserver.responses.models.models.ResponseStreamOptions
+ :ivar conversation: Is either a str type or a ConversationParam_2 type.
+ :vartype conversation: str or ~azure.ai.agentserver.responses.models.models.ConversationParam_2
+ :ivar context_management: Context management configuration for this request.
+ :vartype context_management:
+ list[~azure.ai.agentserver.responses.models.models.ContextManagementParam]
+ :ivar agent_reference: The agent to use for generating the response.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar structured_inputs: The structured inputs to the response that can participate in prompt
+ template substitution or tool argument bindings.
+ :vartype structured_inputs: dict[str, any]
+ """
+
+ metadata: Optional["_models.Metadata"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ top_logprobs: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ temperature: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ top_p: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ user: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """This field is being replaced by ``safety_identifier`` and ``prompt_cache_key``. Use
+ ``prompt_cache_key`` instead to maintain caching optimizations. A stable identifier for your
+ end-users. Used to boost cache hit rates by better bucketing similar requests and to help
+ OpenAI detect and prevent abuse. `Learn more
+ `_."""
+ safety_identifier: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A stable identifier used to help detect users of your application that may be violating
+ OpenAI's usage policies. The IDs should be a string that uniquely identifies each user. We
+ recommend hashing their username or email address, in order to avoid sending us any identifying
+ information. `Learn more `_."""
+ prompt_cache_key: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Used by OpenAI to cache responses for similar requests to optimize your cache hit rates.
+ Replaces the ``user`` field. `Learn more `_."""
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"auto\"], Literal[\"default\"], Literal[\"flex\"],
+ Literal[\"scale\"], Literal[\"priority\"]"""
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a Literal[\"in-memory\"] type or a Literal[\"24h\"] type."""
+ previous_response_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ model: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The model deployment to use for the creation of this response."""
+ reasoning: Optional["_models.Reasoning"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ background: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ max_output_tokens: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ max_tool_calls: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ text: Optional["_models.ResponseTextParam"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ tools: Optional[list["_models.Tool"]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ tool_choice: Optional[Union[str, "_models.ToolChoiceOptions", "_models.ToolChoiceParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a Union[str, \"_models.ToolChoiceOptions\"] type or a ToolChoiceParam type."""
+ prompt: Optional["_models.Prompt"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ truncation: Optional[Literal["auto", "disabled"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a Literal[\"auto\"] type or a Literal[\"disabled\"] type."""
+ input: Optional["_types.InputParam"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Is either a str type or a [Item] type."""
+ include: Optional[list[Union[str, "_models.IncludeEnum"]]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ parallel_tool_calls: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ store: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ instructions: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ stream: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ stream_options: Optional["_models.ResponseStreamOptions"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ conversation: Optional["_types.ConversationParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a str type or a ConversationParam_2 type."""
+ context_management: Optional[list["_models.ContextManagementParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Context management configuration for this request."""
+ agent_reference: Optional["_models.AgentReference"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The agent to use for generating the response."""
+ structured_inputs: Optional[dict[str, Any]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The structured inputs to the response that can participate in prompt template substitution or
+ tool argument bindings."""
+
+ @overload
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ metadata: Optional["_models.Metadata"] = None,
+ top_logprobs: Optional[int] = None,
+ temperature: Optional[int] = None,
+ top_p: Optional[int] = None,
+ user: Optional[str] = None,
+ safety_identifier: Optional[str] = None,
+ prompt_cache_key: Optional[str] = None,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None,
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None,
+ previous_response_id: Optional[str] = None,
+ model: Optional[str] = None,
+ reasoning: Optional["_models.Reasoning"] = None,
+ background: Optional[bool] = None,
+ max_output_tokens: Optional[int] = None,
+ max_tool_calls: Optional[int] = None,
+ text: Optional["_models.ResponseTextParam"] = None,
+ tools: Optional[list["_models.Tool"]] = None,
+ tool_choice: Optional[Union[str, "_models.ToolChoiceOptions", "_models.ToolChoiceParam"]] = None,
+ prompt: Optional["_models.Prompt"] = None,
+ truncation: Optional[Literal["auto", "disabled"]] = None,
+ input: Optional["_types.InputParam"] = None,
+ include: Optional[list[Union[str, "_models.IncludeEnum"]]] = None,
+ parallel_tool_calls: Optional[bool] = None,
+ store: Optional[bool] = None,
+ instructions: Optional[str] = None,
+ stream: Optional[bool] = None,
+ stream_options: Optional["_models.ResponseStreamOptions"] = None,
+ conversation: Optional["_types.ConversationParam"] = None,
+ context_management: Optional[list["_models.ContextManagementParam"]] = None,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ structured_inputs: Optional[dict[str, Any]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class CustomToolParamFormat(_Model):
+ """The input format for the custom tool. Default is unconstrained text.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ CustomGrammarFormatParam, CustomTextFormatParam
+
+ :ivar type: Required. Known values are: "text" and "grammar".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CustomToolParamFormatType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"text\" and \"grammar\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class CustomGrammarFormatParam(CustomToolParamFormat, discriminator="grammar"):
+ """Grammar format.
+
+ :ivar type: Grammar format. Always ``grammar``. Required. GRAMMAR.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.GRAMMAR
+ :ivar syntax: The syntax of the grammar definition. One of ``lark`` or ``regex``. Required.
+ Known values are: "lark" and "regex".
+ :vartype syntax: str or ~azure.ai.agentserver.responses.models.models.GrammarSyntax1
+ :ivar definition: The grammar definition. Required.
+ :vartype definition: str
+ """
+
+ type: Literal[CustomToolParamFormatType.GRAMMAR] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Grammar format. Always ``grammar``. Required. GRAMMAR."""
+ syntax: Union[str, "_models.GrammarSyntax1"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The syntax of the grammar definition. One of ``lark`` or ``regex``. Required. Known values are:
+ \"lark\" and \"regex\"."""
+ definition: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The grammar definition. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ syntax: Union[str, "_models.GrammarSyntax1"],
+ definition: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = CustomToolParamFormatType.GRAMMAR # type: ignore
+
+
+class CustomTextFormatParam(CustomToolParamFormat, discriminator="text"):
+ """Text format.
+
+ :ivar type: Unconstrained text format. Always ``text``. Required. TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.TEXT
+ """
+
+ type: Literal[CustomToolParamFormatType.TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Unconstrained text format. Always ``text``. Required. TEXT."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = CustomToolParamFormatType.TEXT # type: ignore
+
+
+class CustomToolParam(Tool, discriminator="custom"):
+ """Custom tool.
+
+ :ivar type: The type of the custom tool. Always ``custom``. Required. CUSTOM.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM
+ :ivar name: The name of the custom tool, used to identify it in tool calls. Required.
+ :vartype name: str
+ :ivar description: Optional description of the custom tool, used to provide more context.
+ :vartype description: str
+ :ivar format: The input format for the custom tool. Default is unconstrained text.
+ :vartype format: ~azure.ai.agentserver.responses.models.models.CustomToolParamFormat
+ """
+
+ type: Literal[ToolType.CUSTOM] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the custom tool. Always ``custom``. Required. CUSTOM."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the custom tool, used to identify it in tool calls. Required."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional description of the custom tool, used to provide more context."""
+ format: Optional["_models.CustomToolParamFormat"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The input format for the custom tool. Default is unconstrained text."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ description: Optional[str] = None,
+ format: Optional["_models.CustomToolParamFormat"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.CUSTOM # type: ignore
+
+
+class DeleteResponseResult(_Model):
+ """The result of a delete response operation.
+
+ :ivar id: The operation ID. Required.
+ :vartype id: str
+ :ivar deleted: Always return true. Required. Default value is True.
+ :vartype deleted: bool
+ :ivar object: Required. Default value is "response".
+ :vartype object: str
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The operation ID. Required."""
+ deleted: Literal[True] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Always return true. Required. Default value is True."""
+ object: Literal["response"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required. Default value is \"response\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.deleted: Literal[True] = True
+ self.object: Literal["response"] = "response"
+
+
+class DoubleClickAction(ComputerAction, discriminator="double_click"):
+ """DoubleClick.
+
+ :ivar type: Specifies the event type. For a double click action, this property is always set to
+ ``double_click``. Required. DOUBLE_CLICK.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.DOUBLE_CLICK
+ :ivar x: The x-coordinate where the double click occurred. Required.
+ :vartype x: int
+ :ivar y: The y-coordinate where the double click occurred. Required.
+ :vartype y: int
+ """
+
+ type: Literal[ComputerActionType.DOUBLE_CLICK] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a double click action, this property is always set to
+ ``double_click``. Required. DOUBLE_CLICK."""
+ x: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The x-coordinate where the double click occurred. Required."""
+ y: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The y-coordinate where the double click occurred. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ x: int,
+ y: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.DOUBLE_CLICK # type: ignore
+
+
+class DragParam(ComputerAction, discriminator="drag"):
+ """Drag.
+
+ :ivar type: Specifies the event type. For a drag action, this property is always set to
+ ``drag``. Required. DRAG.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.DRAG
+ :ivar path: An array of coordinates representing the path of the drag action. Coordinates will
+ appear as an array of objects, eg
+
+ .. code-block::
+
+ [
+ { x: 100, y: 200 },
+ { x: 200, y: 300 }
+ ]. Required.
+ :vartype path: list[~azure.ai.agentserver.responses.models.models.CoordParam]
+ """
+
+ type: Literal[ComputerActionType.DRAG] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a drag action, this property is always set to ``drag``. Required.
+ DRAG."""
+ path: list["_models.CoordParam"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An array of coordinates representing the path of the drag action. Coordinates will appear as an
+ array of objects, eg
+
+ .. code-block::
+
+ [
+ { x: 100, y: 200 },
+ { x: 200, y: 300 }
+ ]. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ path: list["_models.CoordParam"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.DRAG # type: ignore
+
+
+class Error(_Model):
+ """Error.
+
+ :ivar code: Required.
+ :vartype code: str
+ :ivar message: Required.
+ :vartype message: str
+ :ivar param:
+ :vartype param: str
+ :ivar type:
+ :vartype type: str
+ :ivar details:
+ :vartype details: list[~azure.ai.agentserver.responses.models.models.Error]
+ :ivar additional_info:
+ :vartype additional_info: dict[str, any]
+ :ivar debug_info:
+ :vartype debug_info: dict[str, any]
+ """
+
+ code: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ message: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ param: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ details: Optional[list["_models.Error"]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ additional_info: Optional[dict[str, Any]] = rest_field(
+ name="additionalInfo", visibility=["read", "create", "update", "delete", "query"]
+ )
+ debug_info: Optional[dict[str, Any]] = rest_field(
+ name="debugInfo", visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ code: str,
+ message: str,
+ param: Optional[str] = None,
+ type: Optional[str] = None,
+ details: Optional[list["_models.Error"]] = None,
+ additional_info: Optional[dict[str, Any]] = None,
+ debug_info: Optional[dict[str, Any]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FabricDataAgentToolCall(OutputItem, discriminator="fabric_dataagent_preview_call"):
+ """A Fabric data agent tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. FABRIC_DATAAGENT_PREVIEW_CALL.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FABRIC_DATAAGENT_PREVIEW_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.FABRIC_DATAAGENT_PREVIEW_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. FABRIC_DATAAGENT_PREVIEW_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.FABRIC_DATAAGENT_PREVIEW_CALL # type: ignore
+
+
+class FabricDataAgentToolCallOutput(OutputItem, discriminator="fabric_dataagent_preview_call_output"):
+ """The output of a Fabric data agent tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the Fabric data agent tool call. Is one of the following types:
+ {str: Any}, str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the Fabric data agent tool call. Is one of the following types: {str: Any},
+ str, [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.FABRIC_DATAAGENT_PREVIEW_CALL_OUTPUT # type: ignore
+
+
+class FabricDataAgentToolParameters(_Model):
+ """The fabric data agent tool parameters.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar project_connections: The project connections attached to this tool. There can be a
+ maximum of 1 connection resource attached to the tool.
+ :vartype project_connections:
+ list[~azure.ai.agentserver.responses.models.models.ToolProjectConnection]
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ project_connections: Optional[list["_models.ToolProjectConnection"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The project connections attached to this tool. There can be a maximum of 1 connection resource
+ attached to the tool."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ project_connections: Optional[list["_models.ToolProjectConnection"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FileCitationBody(Annotation, discriminator="file_citation"):
+ """File citation.
+
+ :ivar type: The type of the file citation. Always ``file_citation``. Required. FILE_CITATION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FILE_CITATION
+ :ivar file_id: The ID of the file. Required.
+ :vartype file_id: str
+ :ivar index: The index of the file in the list of files. Required.
+ :vartype index: int
+ :ivar filename: The filename of the file cited. Required.
+ :vartype filename: str
+ """
+
+ type: Literal[AnnotationType.FILE_CITATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the file citation. Always ``file_citation``. Required. FILE_CITATION."""
+ file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the file. Required."""
+ index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the file in the list of files. Required."""
+ filename: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The filename of the file cited. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_id: str,
+ index: int,
+ filename: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = AnnotationType.FILE_CITATION # type: ignore
+
+
+class FilePath(Annotation, discriminator="file_path"):
+ """File path.
+
+ :ivar type: The type of the file path. Always ``file_path``. Required. FILE_PATH.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FILE_PATH
+ :ivar file_id: The ID of the file. Required.
+ :vartype file_id: str
+ :ivar index: The index of the file in the list of files. Required.
+ :vartype index: int
+ """
+
+ type: Literal[AnnotationType.FILE_PATH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the file path. Always ``file_path``. Required. FILE_PATH."""
+ file_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the file. Required."""
+ index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the file in the list of files. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_id: str,
+ index: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = AnnotationType.FILE_PATH # type: ignore
+
+
+class FileSearchTool(Tool, discriminator="file_search"):
+ """File search.
+
+ :ivar type: The type of the file search tool. Always ``file_search``. Required. FILE_SEARCH.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FILE_SEARCH
+ :ivar vector_store_ids: The IDs of the vector stores to search. Required.
+ :vartype vector_store_ids: list[str]
+ :ivar max_num_results: The maximum number of results to return. This number should be between 1
+ and 50 inclusive.
+ :vartype max_num_results: int
+ :ivar ranking_options: Ranking options for search.
+ :vartype ranking_options: ~azure.ai.agentserver.responses.models.models.RankingOptions
+ :ivar filters: Is either a ComparisonFilter type or a CompoundFilter type.
+ :vartype filters: ~azure.ai.agentserver.responses.models.models.ComparisonFilter or
+ ~azure.ai.agentserver.responses.models.models.CompoundFilter
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ """
+
+ type: Literal[ToolType.FILE_SEARCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the file search tool. Always ``file_search``. Required. FILE_SEARCH."""
+ vector_store_ids: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The IDs of the vector stores to search. Required."""
+ max_num_results: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The maximum number of results to return. This number should be between 1 and 50 inclusive."""
+ ranking_options: Optional["_models.RankingOptions"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Ranking options for search."""
+ filters: Optional["_types.Filters"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Is either a ComparisonFilter type or a CompoundFilter type."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ vector_store_ids: list[str],
+ max_num_results: Optional[int] = None,
+ ranking_options: Optional["_models.RankingOptions"] = None,
+ filters: Optional["_types.Filters"] = None,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.FILE_SEARCH # type: ignore
+
+
+class FileSearchToolCallResults(_Model):
+ """FileSearchToolCallResults.
+
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar text:
+ :vartype text: str
+ :ivar filename:
+ :vartype filename: str
+ :ivar attributes:
+ :vartype attributes: ~azure.ai.agentserver.responses.models.models.VectorStoreFileAttributes
+ :ivar score:
+ :vartype score: float
+ """
+
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ attributes: Optional["_models.VectorStoreFileAttributes"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ score: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_id: Optional[str] = None,
+ text: Optional[str] = None,
+ filename: Optional[str] = None,
+ attributes: Optional["_models.VectorStoreFileAttributes"] = None,
+ score: Optional[float] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionAndCustomToolCallOutput(_Model):
+ """FunctionAndCustomToolCallOutput.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FunctionAndCustomToolCallOutputInputFileContent,
+ FunctionAndCustomToolCallOutputInputImageContent,
+ FunctionAndCustomToolCallOutputInputTextContent
+
+ :ivar type: Required. Known values are: "input_text", "input_image", and "input_file".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionAndCustomToolCallOutputType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"input_text\", \"input_image\", and \"input_file\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionAndCustomToolCallOutputInputFileContent(
+ FunctionAndCustomToolCallOutput, discriminator="input_file"
+): # pylint: disable=name-too-long
+ """Input file.
+
+ :ivar type: The type of the input item. Always ``input_file``. Required. INPUT_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.INPUT_FILE
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar filename: The name of the file to be sent to the model.
+ :vartype filename: str
+ :ivar file_url: The URL of the file to be sent to the model.
+ :vartype file_url: str
+ :ivar file_data: The content of the file to be sent to the model.
+ :vartype file_data: str
+ """
+
+ type: Literal[FunctionAndCustomToolCallOutputType.INPUT_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the input item. Always ``input_file``. Required. INPUT_FILE."""
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the file to be sent to the model."""
+ file_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL of the file to be sent to the model."""
+ file_data: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content of the file to be sent to the model."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_id: Optional[str] = None,
+ filename: Optional[str] = None,
+ file_url: Optional[str] = None,
+ file_data: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionAndCustomToolCallOutputType.INPUT_FILE # type: ignore
+
+
+class FunctionAndCustomToolCallOutputInputImageContent(
+ FunctionAndCustomToolCallOutput, discriminator="input_image"
+): # pylint: disable=name-too-long
+ """Input image.
+
+ :ivar type: The type of the input item. Always ``input_image``. Required. INPUT_IMAGE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.INPUT_IMAGE
+ :ivar image_url:
+ :vartype image_url: str
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar detail: The detail level of the image to be sent to the model. One of ``high``, ``low``,
+ or ``auto``. Defaults to ``auto``. Required. Known values are: "low", "high", and "auto".
+ :vartype detail: str or ~azure.ai.agentserver.responses.models.models.ImageDetail
+ """
+
+ type: Literal[FunctionAndCustomToolCallOutputType.INPUT_IMAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the input item. Always ``input_image``. Required. INPUT_IMAGE."""
+ image_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ detail: Union[str, "_models.ImageDetail"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The detail level of the image to be sent to the model. One of ``high``, ``low``, or ``auto``.
+ Defaults to ``auto``. Required. Known values are: \"low\", \"high\", and \"auto\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ detail: Union[str, "_models.ImageDetail"],
+ image_url: Optional[str] = None,
+ file_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionAndCustomToolCallOutputType.INPUT_IMAGE # type: ignore
+
+
+class FunctionAndCustomToolCallOutputInputTextContent(
+ FunctionAndCustomToolCallOutput, discriminator="input_text"
+): # pylint: disable=name-too-long
+ """Input text.
+
+ :ivar type: The type of the input item. Always ``input_text``. Required. INPUT_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.INPUT_TEXT
+ :ivar text: The text input to the model. Required.
+ :vartype text: str
+ """
+
+ type: Literal[FunctionAndCustomToolCallOutputType.INPUT_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the input item. Always ``input_text``. Required. INPUT_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text input to the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionAndCustomToolCallOutputType.INPUT_TEXT # type: ignore
+
+
+class FunctionCallOutputItemParam(Item, discriminator="function_call_output"):
+ """Function tool call output.
+
+ :ivar id:
+ :vartype id: str
+ :ivar call_id: The unique ID of the function tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar type: The type of the function tool call output. Always ``function_call_output``.
+ Required. FUNCTION_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION_CALL_OUTPUT
+ :ivar output: Text, image, or file output of the function tool call. Required. Is either a str
+ type or a [Union["_models.InputTextContentParam", "_models.InputImageContentParamAutoParam",
+ "_models.InputFileContentParam"]] type.
+ :vartype output: str or
+ list[~azure.ai.agentserver.responses.models.models.InputTextContentParam or
+ ~azure.ai.agentserver.responses.models.models.InputImageContentParamAutoParam or
+ ~azure.ai.agentserver.responses.models.models.InputFileContentParam]
+ :ivar status: Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.FunctionCallItemStatus
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call generated by the model. Required."""
+ type: Literal[ItemType.FUNCTION_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the function tool call output. Always ``function_call_output``. Required.
+ FUNCTION_CALL_OUTPUT."""
+ output: Union[
+ str,
+ list[
+ Union[
+ "_models.InputTextContentParam",
+ "_models.InputImageContentParamAutoParam",
+ "_models.InputFileContentParam",
+ ]
+ ],
+ ] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Text, image, or file output of the function tool call. Required. Is either a str type or a
+ [Union[\"_models.InputTextContentParam\", \"_models.InputImageContentParamAutoParam\",
+ \"_models.InputFileContentParam\"]] type."""
+ status: Optional[Union[str, "_models.FunctionCallItemStatus"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: Union[
+ str,
+ list[
+ Union[
+ "_models.InputTextContentParam",
+ "_models.InputImageContentParamAutoParam",
+ "_models.InputFileContentParam",
+ ]
+ ],
+ ],
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Union[str, "_models.FunctionCallItemStatus"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.FUNCTION_CALL_OUTPUT # type: ignore
+
+
+class FunctionShellAction(_Model):
+ """Shell exec action.
+
+ :ivar commands: Required.
+ :vartype commands: list[str]
+ :ivar timeout_ms: Required.
+ :vartype timeout_ms: int
+ :ivar max_output_length: Required.
+ :vartype max_output_length: int
+ """
+
+ commands: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ timeout_ms: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ max_output_length: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ commands: list[str],
+ timeout_ms: int,
+ max_output_length: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellActionParam(_Model):
+ """Shell action.
+
+ :ivar commands: Ordered shell commands for the execution environment to run. Required.
+ :vartype commands: list[str]
+ :ivar timeout_ms:
+ :vartype timeout_ms: int
+ :ivar max_output_length:
+ :vartype max_output_length: int
+ """
+
+ commands: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Ordered shell commands for the execution environment to run. Required."""
+ timeout_ms: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ max_output_length: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ commands: list[str],
+ timeout_ms: Optional[int] = None,
+ max_output_length: Optional[int] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellCallItemParam(Item, discriminator="shell_call"):
+ """Shell tool call.
+
+ :ivar id:
+ :vartype id: str
+ :ivar call_id: The unique ID of the shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar type: The type of the item. Always ``shell_call``. Required. SHELL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL_CALL
+ :ivar action: The shell commands and limits that describe how to run the tool call. Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.FunctionShellActionParam
+ :ivar status: Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallItemStatus
+ :ivar environment:
+ :vartype environment:
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallItemParamEnvironment
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call generated by the model. Required."""
+ type: Literal[ItemType.SHELL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``shell_call``. Required. SHELL_CALL."""
+ action: "_models.FunctionShellActionParam" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The shell commands and limits that describe how to run the tool call. Required."""
+ status: Optional[Union[str, "_models.FunctionShellCallItemStatus"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+ environment: Optional["_models.FunctionShellCallItemParamEnvironment"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ action: "_models.FunctionShellActionParam",
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Union[str, "_models.FunctionShellCallItemStatus"]] = None,
+ environment: Optional["_models.FunctionShellCallItemParamEnvironment"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.SHELL_CALL # type: ignore
+
+
+class FunctionShellCallItemParamEnvironment(_Model):
+ """The environment to execute the shell commands in.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FunctionShellCallItemParamEnvironmentContainerReferenceParam,
+ FunctionShellCallItemParamEnvironmentLocalEnvironmentParam
+
+ :ivar type: Required. Known values are: "local" and "container_reference".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallItemParamEnvironmentType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"local\" and \"container_reference\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellCallItemParamEnvironmentContainerReferenceParam(
+ FunctionShellCallItemParamEnvironment, discriminator="container_reference"
+): # pylint: disable=name-too-long
+ """FunctionShellCallItemParamEnvironmentContainerReferenceParam.
+
+ :ivar type: References a container created with the /v1/containers endpoint. Required.
+ CONTAINER_REFERENCE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CONTAINER_REFERENCE
+ :ivar container_id: The ID of the referenced container. Required.
+ :vartype container_id: str
+ """
+
+ type: Literal[FunctionShellCallItemParamEnvironmentType.CONTAINER_REFERENCE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """References a container created with the /v1/containers endpoint. Required. CONTAINER_REFERENCE."""
+ container_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the referenced container. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ container_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallItemParamEnvironmentType.CONTAINER_REFERENCE # type: ignore
+
+
+class FunctionShellCallItemParamEnvironmentLocalEnvironmentParam(
+ FunctionShellCallItemParamEnvironment, discriminator="local"
+): # pylint: disable=name-too-long
+ """FunctionShellCallItemParamEnvironmentLocalEnvironmentParam.
+
+ :ivar type: Use a local computer environment. Required. LOCAL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL
+ :ivar skills: An optional list of skills.
+ :vartype skills: list[~azure.ai.agentserver.responses.models.models.LocalSkillParam]
+ """
+
+ type: Literal[FunctionShellCallItemParamEnvironmentType.LOCAL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Use a local computer environment. Required. LOCAL."""
+ skills: Optional[list["_models.LocalSkillParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """An optional list of skills."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ skills: Optional[list["_models.LocalSkillParam"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallItemParamEnvironmentType.LOCAL # type: ignore
+
+
+class FunctionShellCallOutputContent(_Model):
+ """Shell call output content.
+
+ :ivar stdout: The standard output that was captured. Required.
+ :vartype stdout: str
+ :ivar stderr: The standard error output that was captured. Required.
+ :vartype stderr: str
+ :ivar outcome: Shell call outcome. Required.
+ :vartype outcome: ~azure.ai.agentserver.responses.models.models.FunctionShellCallOutputOutcome
+ :ivar created_by: The identifier of the actor that created the item.
+ :vartype created_by: str
+ """
+
+ stdout: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The standard output that was captured. Required."""
+ stderr: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The standard error output that was captured. Required."""
+ outcome: "_models.FunctionShellCallOutputOutcome" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Shell call outcome. Required."""
+ created_by: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The identifier of the actor that created the item."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ stdout: str,
+ stderr: str,
+ outcome: "_models.FunctionShellCallOutputOutcome",
+ created_by: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellCallOutputContentParam(_Model):
+ """Shell output content.
+
+ :ivar stdout: Captured stdout output for the shell call. Required.
+ :vartype stdout: str
+ :ivar stderr: Captured stderr output for the shell call. Required.
+ :vartype stderr: str
+ :ivar outcome: The exit or timeout outcome associated with this shell call. Required.
+ :vartype outcome:
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallOutputOutcomeParam
+ """
+
+ stdout: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Captured stdout output for the shell call. Required."""
+ stderr: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Captured stderr output for the shell call. Required."""
+ outcome: "_models.FunctionShellCallOutputOutcomeParam" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The exit or timeout outcome associated with this shell call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ stdout: str,
+ stderr: str,
+ outcome: "_models.FunctionShellCallOutputOutcomeParam",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellCallOutputOutcome(_Model):
+ """Shell call outcome.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FunctionShellCallOutputExitOutcome, FunctionShellCallOutputTimeoutOutcome
+
+ :ivar type: Required. Known values are: "timeout" and "exit".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallOutputOutcomeType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"timeout\" and \"exit\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellCallOutputExitOutcome(FunctionShellCallOutputOutcome, discriminator="exit"):
+ """Shell call exit outcome.
+
+ :ivar type: The outcome type. Always ``exit``. Required. EXIT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.EXIT
+ :ivar exit_code: Exit code from the shell process. Required.
+ :vartype exit_code: int
+ """
+
+ type: Literal[FunctionShellCallOutputOutcomeType.EXIT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The outcome type. Always ``exit``. Required. EXIT."""
+ exit_code: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Exit code from the shell process. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ exit_code: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallOutputOutcomeType.EXIT # type: ignore
+
+
+class FunctionShellCallOutputOutcomeParam(_Model):
+ """Shell call outcome.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ FunctionShellCallOutputExitOutcomeParam, FunctionShellCallOutputTimeoutOutcomeParam
+
+ :ivar type: Required. Known values are: "timeout" and "exit".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallOutputOutcomeParamType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"timeout\" and \"exit\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionShellCallOutputExitOutcomeParam(FunctionShellCallOutputOutcomeParam, discriminator="exit"):
+ """Shell call exit outcome.
+
+ :ivar type: The outcome type. Always ``exit``. Required. EXIT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.EXIT
+ :ivar exit_code: The exit code returned by the shell process. Required.
+ :vartype exit_code: int
+ """
+
+ type: Literal[FunctionShellCallOutputOutcomeParamType.EXIT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The outcome type. Always ``exit``. Required. EXIT."""
+ exit_code: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The exit code returned by the shell process. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ exit_code: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallOutputOutcomeParamType.EXIT # type: ignore
+
+
+class FunctionShellCallOutputItemParam(Item, discriminator="shell_call_output"):
+ """Shell tool call output.
+
+ :ivar id:
+ :vartype id: str
+ :ivar call_id: The unique ID of the shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar type: The type of the item. Always ``shell_call_output``. Required. SHELL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL_CALL_OUTPUT
+ :ivar output: Captured chunks of stdout and stderr output, along with their associated
+ outcomes. Required.
+ :vartype output:
+ list[~azure.ai.agentserver.responses.models.models.FunctionShellCallOutputContentParam]
+ :ivar status: Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallItemStatus
+ :ivar max_output_length:
+ :vartype max_output_length: int
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call generated by the model. Required."""
+ type: Literal[ItemType.SHELL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``shell_call_output``. Required. SHELL_CALL_OUTPUT."""
+ output: list["_models.FunctionShellCallOutputContentParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Captured chunks of stdout and stderr output, along with their associated outcomes. Required."""
+ status: Optional[Union[str, "_models.FunctionShellCallItemStatus"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+ max_output_length: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: list["_models.FunctionShellCallOutputContentParam"],
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Union[str, "_models.FunctionShellCallItemStatus"]] = None,
+ max_output_length: Optional[int] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.SHELL_CALL_OUTPUT # type: ignore
+
+
+class FunctionShellCallOutputTimeoutOutcome(FunctionShellCallOutputOutcome, discriminator="timeout"):
+ """Shell call timeout outcome.
+
+ :ivar type: The outcome type. Always ``timeout``. Required. TIMEOUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.TIMEOUT
+ """
+
+ type: Literal[FunctionShellCallOutputOutcomeType.TIMEOUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The outcome type. Always ``timeout``. Required. TIMEOUT."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallOutputOutcomeType.TIMEOUT # type: ignore
+
+
+class FunctionShellCallOutputTimeoutOutcomeParam(
+ FunctionShellCallOutputOutcomeParam, discriminator="timeout"
+): # pylint: disable=name-too-long
+ """Shell call timeout outcome.
+
+ :ivar type: The outcome type. Always ``timeout``. Required. TIMEOUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.TIMEOUT
+ """
+
+ type: Literal[FunctionShellCallOutputOutcomeParamType.TIMEOUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The outcome type. Always ``timeout``. Required. TIMEOUT."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallOutputOutcomeParamType.TIMEOUT # type: ignore
+
+
+class FunctionShellToolParam(Tool, discriminator="shell"):
+ """Shell tool.
+
+ :ivar type: The type of the shell tool. Always ``shell``. Required. SHELL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL
+ :ivar environment:
+ :vartype environment:
+ ~azure.ai.agentserver.responses.models.models.FunctionShellToolParamEnvironment
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ """
+
+ type: Literal[ToolType.SHELL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the shell tool. Always ``shell``. Required. SHELL."""
+ environment: Optional["_models.FunctionShellToolParamEnvironment"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ environment: Optional["_models.FunctionShellToolParamEnvironment"] = None,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.SHELL # type: ignore
+
+
+class FunctionShellToolParamEnvironmentContainerReferenceParam(
+ FunctionShellToolParamEnvironment, discriminator="container_reference"
+): # pylint: disable=name-too-long
+ """FunctionShellToolParamEnvironmentContainerReferenceParam.
+
+ :ivar type: References a container created with the /v1/containers endpoint. Required.
+ CONTAINER_REFERENCE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CONTAINER_REFERENCE
+ :ivar container_id: The ID of the referenced container. Required.
+ :vartype container_id: str
+ """
+
+ type: Literal[FunctionShellToolParamEnvironmentType.CONTAINER_REFERENCE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """References a container created with the /v1/containers endpoint. Required. CONTAINER_REFERENCE."""
+ container_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the referenced container. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ container_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellToolParamEnvironmentType.CONTAINER_REFERENCE # type: ignore
+
+
+class FunctionShellToolParamEnvironmentLocalEnvironmentParam(
+ FunctionShellToolParamEnvironment, discriminator="local"
+): # pylint: disable=name-too-long
+ """FunctionShellToolParamEnvironmentLocalEnvironmentParam.
+
+ :ivar type: Use a local computer environment. Required. LOCAL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL
+ :ivar skills: An optional list of skills.
+ :vartype skills: list[~azure.ai.agentserver.responses.models.models.LocalSkillParam]
+ """
+
+ type: Literal[FunctionShellToolParamEnvironmentType.LOCAL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Use a local computer environment. Required. LOCAL."""
+ skills: Optional[list["_models.LocalSkillParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """An optional list of skills."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ skills: Optional[list["_models.LocalSkillParam"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellToolParamEnvironmentType.LOCAL # type: ignore
+
+
+class FunctionTool(Tool, discriminator="function"):
+ """Function.
+
+ :ivar type: The type of the function tool. Always ``function``. Required. FUNCTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION
+ :ivar name: The name of the function to call. Required.
+ :vartype name: str
+ :ivar description:
+ :vartype description: str
+ :ivar parameters: Required.
+ :vartype parameters: dict[str, any]
+ :ivar strict: Required.
+ :vartype strict: bool
+ """
+
+ type: Literal[ToolType.FUNCTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the function tool. Always ``function``. Required. FUNCTION."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to call. Required."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ parameters: dict[str, Any] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ strict: bool = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ parameters: dict[str, Any],
+ strict: bool,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.FUNCTION # type: ignore
+
+
+class ItemField(_Model):
+ """An item representing a message, tool call, tool output, reasoning, or other response element.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ItemFieldApplyPatchToolCall, ItemFieldApplyPatchToolCallOutput,
+ ItemFieldCodeInterpreterToolCall, ItemFieldCompactionBody, ItemFieldComputerToolCall,
+ ItemFieldComputerToolCallOutputResource, ItemFieldCustomToolCall,
+ ItemFieldCustomToolCallOutput, ItemFieldFileSearchToolCall, ItemFieldFunctionToolCall,
+ FunctionToolCallOutput, ItemFieldImageGenToolCall, ItemFieldLocalShellToolCall,
+ ItemFieldLocalShellToolCallOutput, ItemFieldMcpApprovalRequest,
+ ItemFieldMcpApprovalResponseResource, ItemFieldMcpToolCall, ItemFieldMcpListTools,
+ ItemFieldMessage, ItemFieldReasoningItem, ItemFieldFunctionShellCall,
+ ItemFieldFunctionShellCallOutput, ItemFieldWebSearchToolCall
+
+ :ivar type: Required. Known values are: "message", "function_call", "function_call_output",
+ "file_search_call", "web_search_call", "image_generation_call", "computer_call",
+ "computer_call_output", "reasoning", "compaction", "code_interpreter_call", "local_shell_call",
+ "local_shell_call_output", "shell_call", "shell_call_output", "apply_patch_call",
+ "apply_patch_call_output", "mcp_list_tools", "mcp_approval_request", "mcp_approval_response",
+ "mcp_call", "custom_tool_call", and "custom_tool_call_output".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ItemFieldType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"message\", \"function_call\", \"function_call_output\",
+ \"file_search_call\", \"web_search_call\", \"image_generation_call\", \"computer_call\",
+ \"computer_call_output\", \"reasoning\", \"compaction\", \"code_interpreter_call\",
+ \"local_shell_call\", \"local_shell_call_output\", \"shell_call\", \"shell_call_output\",
+ \"apply_patch_call\", \"apply_patch_call_output\", \"mcp_list_tools\",
+ \"mcp_approval_request\", \"mcp_approval_response\", \"mcp_call\", \"custom_tool_call\", and
+ \"custom_tool_call_output\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class FunctionToolCallOutput(ItemField, discriminator="function_call_output"):
+ """Function tool call output.
+
+ :ivar id: The unique ID of the function tool call output. Populated when this item is returned
+ via API.
+ :vartype id: str
+ :ivar type: The type of the function tool call output. Always ``function_call_output``.
+ Required. FUNCTION_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION_CALL_OUTPUT
+ :ivar call_id: The unique ID of the function tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the function call generated by your code. Can be a string or an
+ list of output content. Required. Is either a str type or a [FunctionAndCustomToolCallOutput]
+ type.
+ :vartype output: str or
+ list[~azure.ai.agentserver.responses.models.models.FunctionAndCustomToolCallOutput]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call output. Populated when this item is returned via API."""
+ type: Literal[ItemFieldType.FUNCTION_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the function tool call output. Always ``function_call_output``. Required.
+ FUNCTION_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call generated by the model. Required."""
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the function call generated by your code. Can be a string or an list of output
+ content. Required. Is either a str type or a [FunctionAndCustomToolCallOutput] type."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]],
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.FUNCTION_CALL_OUTPUT # type: ignore
+
+
+class FunctionToolCallOutputResource(OutputItem, discriminator="function_call_output"):
+ """FunctionToolCallOutputResource.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar id: The unique ID of the function tool call output. Populated when this item is returned
+ via API.
+ :vartype id: str
+ :ivar type: The type of the function tool call output. Always ``function_call_output``.
+ Required. FUNCTION_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION_CALL_OUTPUT
+ :ivar call_id: The unique ID of the function tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the function call generated by your code. Can be a string or an
+ list of output content. Required. Is either a str type or a [FunctionAndCustomToolCallOutput]
+ type.
+ :vartype output: str or
+ list[~azure.ai.agentserver.responses.models.models.FunctionAndCustomToolCallOutput]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call output. Populated when this item is returned via API."""
+ type: Literal[OutputItemType.FUNCTION_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the function tool call output. Always ``function_call_output``. Required.
+ FUNCTION_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call generated by the model. Required."""
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the function call generated by your code. Can be a string or an list of output
+ content. Required. Is either a str type or a [FunctionAndCustomToolCallOutput] type."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.FUNCTION_CALL_OUTPUT # type: ignore
+
+
+class HybridSearchOptions(_Model):
+ """HybridSearchOptions.
+
+ :ivar embedding_weight: The weight of the embedding in the reciprocal ranking fusion. Required.
+ :vartype embedding_weight: int
+ :ivar text_weight: The weight of the text in the reciprocal ranking fusion. Required.
+ :vartype text_weight: int
+ """
+
+ embedding_weight: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The weight of the embedding in the reciprocal ranking fusion. Required."""
+ text_weight: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The weight of the text in the reciprocal ranking fusion. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ embedding_weight: int,
+ text_weight: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ImageGenTool(Tool, discriminator="image_generation"):
+ """Image generation tool.
+
+ :ivar type: The type of the image generation tool. Always ``image_generation``. Required.
+ IMAGE_GENERATION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.IMAGE_GENERATION
+ :ivar model: Is one of the following types: Literal["gpt-image-1"],
+ Literal["gpt-image-1-mini"], Literal["gpt-image-1.5"], str
+ :vartype model: str or str or str or str
+ :ivar quality: The quality of the generated image. One of ``low``, ``medium``, ``high``, or
+ ``auto``. Default: ``auto``. Is one of the following types: Literal["low"], Literal["medium"],
+ Literal["high"], Literal["auto"]
+ :vartype quality: str or str or str or str
+ :ivar size: The size of the generated image. One of ``1024x1024``, ``1024x1536``,
+ ``1536x1024``, or ``auto``. Default: ``auto``. Is one of the following types:
+ Literal["1024x1024"], Literal["1024x1536"], Literal["1536x1024"], Literal["auto"]
+ :vartype size: str or str or str or str
+ :ivar output_format: The output format of the generated image. One of ``png``, ``webp``, or
+ ``jpeg``. Default: ``png``. Is one of the following types: Literal["png"], Literal["webp"],
+ Literal["jpeg"]
+ :vartype output_format: str or str or str
+ :ivar output_compression: Compression level for the output image. Default: 100.
+ :vartype output_compression: int
+ :ivar moderation: Moderation level for the generated image. Default: ``auto``. Is either a
+ Literal["auto"] type or a Literal["low"] type.
+ :vartype moderation: str or str
+ :ivar background: Background type for the generated image. One of ``transparent``, ``opaque``,
+ or ``auto``. Default: ``auto``. Is one of the following types: Literal["transparent"],
+ Literal["opaque"], Literal["auto"]
+ :vartype background: str or str or str
+ :ivar input_fidelity: Known values are: "high" and "low".
+ :vartype input_fidelity: str or ~azure.ai.agentserver.responses.models.models.InputFidelity
+ :ivar input_image_mask: Optional mask for inpainting. Contains ``image_url`` (string, optional)
+ and ``file_id`` (string, optional).
+ :vartype input_image_mask:
+ ~azure.ai.agentserver.responses.models.models.ImageGenToolInputImageMask
+ :ivar partial_images: Number of partial images to generate in streaming mode, from 0 (default
+ value) to 3.
+ :vartype partial_images: int
+ :ivar action: Whether to generate a new image or edit an existing image. Default: ``auto``.
+ Known values are: "generate", "edit", and "auto".
+ :vartype action: str or ~azure.ai.agentserver.responses.models.models.ImageGenActionEnum
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ """
+
+ type: Literal[ToolType.IMAGE_GENERATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the image generation tool. Always ``image_generation``. Required. IMAGE_GENERATION."""
+ model: Optional[Union[Literal["gpt-image-1"], Literal["gpt-image-1-mini"], Literal["gpt-image-1.5"], str]] = (
+ rest_field(visibility=["read", "create", "update", "delete", "query"])
+ )
+ """Is one of the following types: Literal[\"gpt-image-1\"], Literal[\"gpt-image-1-mini\"],
+ Literal[\"gpt-image-1.5\"], str"""
+ quality: Optional[Literal["low", "medium", "high", "auto"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The quality of the generated image. One of ``low``, ``medium``, ``high``, or ``auto``. Default:
+ ``auto``. Is one of the following types: Literal[\"low\"], Literal[\"medium\"],
+ Literal[\"high\"], Literal[\"auto\"]"""
+ size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The size of the generated image. One of ``1024x1024``, ``1024x1536``, ``1536x1024``, or
+ ``auto``. Default: ``auto``. Is one of the following types: Literal[\"1024x1024\"],
+ Literal[\"1024x1536\"], Literal[\"1536x1024\"], Literal[\"auto\"]"""
+ output_format: Optional[Literal["png", "webp", "jpeg"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output format of the generated image. One of ``png``, ``webp``, or ``jpeg``. Default:
+ ``png``. Is one of the following types: Literal[\"png\"], Literal[\"webp\"], Literal[\"jpeg\"]"""
+ output_compression: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Compression level for the output image. Default: 100."""
+ moderation: Optional[Literal["auto", "low"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Moderation level for the generated image. Default: ``auto``. Is either a Literal[\"auto\"] type
+ or a Literal[\"low\"] type."""
+ background: Optional[Literal["transparent", "opaque", "auto"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Background type for the generated image. One of ``transparent``, ``opaque``, or ``auto``.
+ Default: ``auto``. Is one of the following types: Literal[\"transparent\"],
+ Literal[\"opaque\"], Literal[\"auto\"]"""
+ input_fidelity: Optional[Union[str, "_models.InputFidelity"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"high\" and \"low\"."""
+ input_image_mask: Optional["_models.ImageGenToolInputImageMask"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Optional mask for inpainting. Contains ``image_url`` (string, optional) and ``file_id``
+ (string, optional)."""
+ partial_images: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Number of partial images to generate in streaming mode, from 0 (default value) to 3."""
+ action: Optional[Union[str, "_models.ImageGenActionEnum"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Whether to generate a new image or edit an existing image. Default: ``auto``. Known values are:
+ \"generate\", \"edit\", and \"auto\"."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ model: Optional[
+ Union[Literal["gpt-image-1"], Literal["gpt-image-1-mini"], Literal["gpt-image-1.5"], str]
+ ] = None,
+ quality: Optional[Literal["low", "medium", "high", "auto"]] = None,
+ size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None,
+ output_format: Optional[Literal["png", "webp", "jpeg"]] = None,
+ output_compression: Optional[int] = None,
+ moderation: Optional[Literal["auto", "low"]] = None,
+ background: Optional[Literal["transparent", "opaque", "auto"]] = None,
+ input_fidelity: Optional[Union[str, "_models.InputFidelity"]] = None,
+ input_image_mask: Optional["_models.ImageGenToolInputImageMask"] = None,
+ partial_images: Optional[int] = None,
+ action: Optional[Union[str, "_models.ImageGenActionEnum"]] = None,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.IMAGE_GENERATION # type: ignore
+
+
+class ImageGenToolInputImageMask(_Model):
+ """ImageGenToolInputImageMask.
+
+ :ivar image_url:
+ :vartype image_url: str
+ :ivar file_id:
+ :vartype file_id: str
+ """
+
+ image_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ image_url: Optional[str] = None,
+ file_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class InlineSkillParam(ContainerSkill, discriminator="inline"):
+ """InlineSkillParam.
+
+ :ivar type: Defines an inline skill for this request. Required. INLINE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.INLINE
+ :ivar name: The name of the skill. Required.
+ :vartype name: str
+ :ivar description: The description of the skill. Required.
+ :vartype description: str
+ :ivar source: Inline skill payload. Required.
+ :vartype source: ~azure.ai.agentserver.responses.models.models.InlineSkillSourceParam
+ """
+
+ type: Literal[ContainerSkillType.INLINE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Defines an inline skill for this request. Required. INLINE."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the skill. Required."""
+ description: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The description of the skill. Required."""
+ source: "_models.InlineSkillSourceParam" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Inline skill payload. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ description: str,
+ source: "_models.InlineSkillSourceParam",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ContainerSkillType.INLINE # type: ignore
+
+
+class InlineSkillSourceParam(_Model):
+ """Inline skill payload.
+
+ :ivar type: The type of the inline skill source. Must be ``base64``. Required. Default value is
+ "base64".
+ :vartype type: str
+ :ivar media_type: The media type of the inline skill payload. Must be ``application/zip``.
+ Required. Default value is "application/zip".
+ :vartype media_type: str
+ :ivar data: Base64-encoded skill zip bundle. Required.
+ :vartype data: str
+ """
+
+ type: Literal["base64"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the inline skill source. Must be ``base64``. Required. Default value is \"base64\"."""
+ media_type: Literal["application/zip"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The media type of the inline skill payload. Must be ``application/zip``. Required. Default
+ value is \"application/zip\"."""
+ data: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Base64-encoded skill zip bundle. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ data: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["base64"] = "base64"
+ self.media_type: Literal["application/zip"] = "application/zip"
+
+
+class InputFileContent(_Model):
+ """Input file.
+
+ :ivar type: The type of the input item. Always ``input_file``. Required. Default value is
+ "input_file".
+ :vartype type: str
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar filename: The name of the file to be sent to the model.
+ :vartype filename: str
+ :ivar file_url: The URL of the file to be sent to the model.
+ :vartype file_url: str
+ :ivar file_data: The content of the file to be sent to the model.
+ :vartype file_data: str
+ """
+
+ type: Literal["input_file"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the input item. Always ``input_file``. Required. Default value is \"input_file\"."""
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the file to be sent to the model."""
+ file_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL of the file to be sent to the model."""
+ file_data: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content of the file to be sent to the model."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_id: Optional[str] = None,
+ filename: Optional[str] = None,
+ file_url: Optional[str] = None,
+ file_data: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["input_file"] = "input_file"
+
+
+class InputFileContentParam(_Model):
+ """Input file.
+
+ :ivar type: The type of the input item. Always ``input_file``. Required. Default value is
+ "input_file".
+ :vartype type: str
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar filename:
+ :vartype filename: str
+ :ivar file_data:
+ :vartype file_data: str
+ :ivar file_url:
+ :vartype file_url: str
+ """
+
+ type: Literal["input_file"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the input item. Always ``input_file``. Required. Default value is \"input_file\"."""
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ file_data: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ file_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_id: Optional[str] = None,
+ filename: Optional[str] = None,
+ file_data: Optional[str] = None,
+ file_url: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["input_file"] = "input_file"
+
+
+class InputImageContent(_Model):
+ """Input image.
+
+ :ivar type: The type of the input item. Always ``input_image``. Required. Default value is
+ "input_image".
+ :vartype type: str
+ :ivar image_url:
+ :vartype image_url: str
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar detail: The detail level of the image to be sent to the model. One of ``high``, ``low``,
+ or ``auto``. Defaults to ``auto``. Required. Known values are: "low", "high", and "auto".
+ :vartype detail: str or ~azure.ai.agentserver.responses.models.models.ImageDetail
+ """
+
+ type: Literal["input_image"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the input item. Always ``input_image``. Required. Default value is \"input_image\"."""
+ image_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ detail: Union[str, "_models.ImageDetail"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The detail level of the image to be sent to the model. One of ``high``, ``low``, or ``auto``.
+ Defaults to ``auto``. Required. Known values are: \"low\", \"high\", and \"auto\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ detail: Union[str, "_models.ImageDetail"],
+ image_url: Optional[str] = None,
+ file_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["input_image"] = "input_image"
+
+
+class InputImageContentParamAutoParam(_Model):
+ """Input image.
+
+ :ivar type: The type of the input item. Always ``input_image``. Required. Default value is
+ "input_image".
+ :vartype type: str
+ :ivar image_url:
+ :vartype image_url: str
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar detail: Known values are: "low", "high", and "auto".
+ :vartype detail: str or ~azure.ai.agentserver.responses.models.models.DetailEnum
+ """
+
+ type: Literal["input_image"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the input item. Always ``input_image``. Required. Default value is \"input_image\"."""
+ image_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ detail: Optional[Union[str, "_models.DetailEnum"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Known values are: \"low\", \"high\", and \"auto\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ image_url: Optional[str] = None,
+ file_id: Optional[str] = None,
+ detail: Optional[Union[str, "_models.DetailEnum"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["input_image"] = "input_image"
+
+
+class InputTextContent(_Model):
+ """Input text.
+
+ :ivar type: The type of the input item. Always ``input_text``. Required. Default value is
+ "input_text".
+ :vartype type: str
+ :ivar text: The text input to the model. Required.
+ :vartype text: str
+ """
+
+ type: Literal["input_text"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the input item. Always ``input_text``. Required. Default value is \"input_text\"."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text input to the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["input_text"] = "input_text"
+
+
+class InputTextContentParam(_Model):
+ """Input text.
+
+ :ivar type: The type of the input item. Always ``input_text``. Required. Default value is
+ "input_text".
+ :vartype type: str
+ :ivar text: The text input to the model. Required.
+ :vartype text: str
+ """
+
+ type: Literal["input_text"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the input item. Always ``input_text``. Required. Default value is \"input_text\"."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text input to the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["input_text"] = "input_text"
+
+
+class ItemCodeInterpreterToolCall(Item, discriminator="code_interpreter_call"):
+ """Code interpreter tool call.
+
+ :ivar type: The type of the code interpreter tool call. Always ``code_interpreter_call``.
+ Required. CODE_INTERPRETER_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CODE_INTERPRETER_CALL
+ :ivar id: The unique ID of the code interpreter tool call. Required.
+ :vartype id: str
+ :ivar status: The status of the code interpreter tool call. Valid values are ``in_progress``,
+ ``completed``, ``incomplete``, ``interpreting``, and ``failed``. Required. Is one of the
+ following types: Literal["in_progress"], Literal["completed"], Literal["incomplete"],
+ Literal["interpreting"], Literal["failed"]
+ :vartype status: str or str or str or str or str
+ :ivar container_id: The ID of the container used to run the code. Required.
+ :vartype container_id: str
+ :ivar code: Required.
+ :vartype code: str
+ :ivar outputs: Required.
+ :vartype outputs: list[~azure.ai.agentserver.responses.models.models.CodeInterpreterOutputLogs
+ or ~azure.ai.agentserver.responses.models.models.CodeInterpreterOutputImage]
+ """
+
+ type: Literal[ItemType.CODE_INTERPRETER_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the code interpreter tool call. Always ``code_interpreter_call``. Required.
+ CODE_INTERPRETER_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the code interpreter tool call. Required."""
+ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the code interpreter tool call. Valid values are ``in_progress``, ``completed``,
+ ``incomplete``, ``interpreting``, and ``failed``. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"],
+ Literal[\"interpreting\"], Literal[\"failed\"]"""
+ container_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the container used to run the code. Required."""
+ code: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ outputs: list[Union["_models.CodeInterpreterOutputLogs", "_models.CodeInterpreterOutputImage"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"],
+ container_id: str,
+ code: str,
+ outputs: list[Union["_models.CodeInterpreterOutputLogs", "_models.CodeInterpreterOutputImage"]],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.CODE_INTERPRETER_CALL # type: ignore
+
+
+class ItemComputerToolCall(Item, discriminator="computer_call"):
+ """Computer tool call.
+
+ :ivar type: The type of the computer call. Always ``computer_call``. Required. COMPUTER_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_CALL
+ :ivar id: The unique ID of the computer call. Required.
+ :vartype id: str
+ :ivar call_id: An identifier used when responding to the tool call with output. Required.
+ :vartype call_id: str
+ :ivar action: Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.ComputerAction
+ :ivar pending_safety_checks: The pending safety checks for the computer call. Required.
+ :vartype pending_safety_checks:
+ list[~azure.ai.agentserver.responses.models.models.ComputerCallSafetyCheckParam]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemType.COMPUTER_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the computer call. Always ``computer_call``. Required. COMPUTER_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the computer call. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An identifier used when responding to the tool call with output. Required."""
+ action: "_models.ComputerAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ pending_safety_checks: list["_models.ComputerCallSafetyCheckParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The pending safety checks for the computer call. Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.ComputerAction",
+ pending_safety_checks: list["_models.ComputerCallSafetyCheckParam"],
+ status: Literal["in_progress", "completed", "incomplete"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.COMPUTER_CALL # type: ignore
+
+
+class ItemCustomToolCall(Item, discriminator="custom_tool_call"):
+ """Custom tool call.
+
+ :ivar type: The type of the custom tool call. Always ``custom_tool_call``. Required.
+ CUSTOM_TOOL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM_TOOL_CALL
+ :ivar id: The unique ID of the custom tool call in the OpenAI platform.
+ :vartype id: str
+ :ivar call_id: An identifier used to map this custom tool call to a tool call output. Required.
+ :vartype call_id: str
+ :ivar name: The name of the custom tool being called. Required.
+ :vartype name: str
+ :ivar input: The input for the custom tool call generated by the model. Required.
+ :vartype input: str
+ """
+
+ type: Literal[ItemType.CUSTOM_TOOL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the custom tool call. Always ``custom_tool_call``. Required. CUSTOM_TOOL_CALL."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the custom tool call in the OpenAI platform."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An identifier used to map this custom tool call to a tool call output. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the custom tool being called. Required."""
+ input: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The input for the custom tool call generated by the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ input: str,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.CUSTOM_TOOL_CALL # type: ignore
+
+
+class ItemCustomToolCallOutput(Item, discriminator="custom_tool_call_output"):
+ """Custom tool call output.
+
+ :ivar type: The type of the custom tool call output. Always ``custom_tool_call_output``.
+ Required. CUSTOM_TOOL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM_TOOL_CALL_OUTPUT
+ :ivar id: The unique ID of the custom tool call output in the OpenAI platform.
+ :vartype id: str
+ :ivar call_id: The call ID, used to map this custom tool call output to a custom tool call.
+ Required.
+ :vartype call_id: str
+ :ivar output: The output from the custom tool call generated by your code. Can be a string or
+ an list of output content. Required. Is either a str type or a
+ [FunctionAndCustomToolCallOutput] type.
+ :vartype output: str or
+ list[~azure.ai.agentserver.responses.models.models.FunctionAndCustomToolCallOutput]
+ """
+
+ type: Literal[ItemType.CUSTOM_TOOL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the custom tool call output. Always ``custom_tool_call_output``. Required.
+ CUSTOM_TOOL_CALL_OUTPUT."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the custom tool call output in the OpenAI platform."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The call ID, used to map this custom tool call output to a custom tool call. Required."""
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the custom tool call generated by your code. Can be a string or an list of
+ output content. Required. Is either a str type or a [FunctionAndCustomToolCallOutput] type."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]],
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.CUSTOM_TOOL_CALL_OUTPUT # type: ignore
+
+
+class ItemFieldApplyPatchToolCall(ItemField, discriminator="apply_patch_call"):
+ """Apply patch tool call.
+
+ :ivar type: The type of the item. Always ``apply_patch_call``. Required. APPLY_PATCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH_CALL
+ :ivar id: The unique ID of the apply patch tool call. Populated when this item is returned via
+ API. Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the apply patch tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the apply patch tool call. One of ``in_progress`` or ``completed``.
+ Required. Known values are: "in_progress" and "completed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ApplyPatchCallStatus
+ :ivar operation: Apply patch operation. Required.
+ :vartype operation: ~azure.ai.agentserver.responses.models.models.ApplyPatchFileOperation
+ :ivar created_by: The ID of the entity that created this tool call.
+ :vartype created_by: str
+ """
+
+ type: Literal[ItemFieldType.APPLY_PATCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``apply_patch_call``. Required. APPLY_PATCH_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call. Populated when this item is returned via API.
+ Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call generated by the model. Required."""
+ status: Union[str, "_models.ApplyPatchCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the apply patch tool call. One of ``in_progress`` or ``completed``. Required.
+ Known values are: \"in_progress\" and \"completed\"."""
+ operation: "_models.ApplyPatchFileOperation" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Apply patch operation. Required."""
+ created_by: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the entity that created this tool call."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ status: Union[str, "_models.ApplyPatchCallStatus"],
+ operation: "_models.ApplyPatchFileOperation",
+ created_by: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.APPLY_PATCH_CALL # type: ignore
+
+
+class ItemFieldApplyPatchToolCallOutput(ItemField, discriminator="apply_patch_call_output"):
+ """Apply patch tool call output.
+
+ :ivar type: The type of the item. Always ``apply_patch_call_output``. Required.
+ APPLY_PATCH_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH_CALL_OUTPUT
+ :ivar id: The unique ID of the apply patch tool call output. Populated when this item is
+ returned via API. Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the apply patch tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the apply patch tool call output. One of ``completed`` or
+ ``failed``. Required. Known values are: "completed" and "failed".
+ :vartype status: str or
+ ~azure.ai.agentserver.responses.models.models.ApplyPatchCallOutputStatus
+ :ivar output:
+ :vartype output: str
+ :ivar created_by: The ID of the entity that created this tool call output.
+ :vartype created_by: str
+ """
+
+ type: Literal[ItemFieldType.APPLY_PATCH_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``apply_patch_call_output``. Required. APPLY_PATCH_CALL_OUTPUT."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call output. Populated when this item is returned via
+ API. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call generated by the model. Required."""
+ status: Union[str, "_models.ApplyPatchCallOutputStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the apply patch tool call output. One of ``completed`` or ``failed``. Required.
+ Known values are: \"completed\" and \"failed\"."""
+ output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ created_by: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the entity that created this tool call output."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ status: Union[str, "_models.ApplyPatchCallOutputStatus"],
+ output: Optional[str] = None,
+ created_by: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.APPLY_PATCH_CALL_OUTPUT # type: ignore
+
+
+class ItemFieldCodeInterpreterToolCall(ItemField, discriminator="code_interpreter_call"):
+ """Code interpreter tool call.
+
+ :ivar type: The type of the code interpreter tool call. Always ``code_interpreter_call``.
+ Required. CODE_INTERPRETER_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CODE_INTERPRETER_CALL
+ :ivar id: The unique ID of the code interpreter tool call. Required.
+ :vartype id: str
+ :ivar status: The status of the code interpreter tool call. Valid values are ``in_progress``,
+ ``completed``, ``incomplete``, ``interpreting``, and ``failed``. Required. Is one of the
+ following types: Literal["in_progress"], Literal["completed"], Literal["incomplete"],
+ Literal["interpreting"], Literal["failed"]
+ :vartype status: str or str or str or str or str
+ :ivar container_id: The ID of the container used to run the code. Required.
+ :vartype container_id: str
+ :ivar code: Required.
+ :vartype code: str
+ :ivar outputs: Required.
+ :vartype outputs: list[~azure.ai.agentserver.responses.models.models.CodeInterpreterOutputLogs
+ or ~azure.ai.agentserver.responses.models.models.CodeInterpreterOutputImage]
+ """
+
+ type: Literal[ItemFieldType.CODE_INTERPRETER_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the code interpreter tool call. Always ``code_interpreter_call``. Required.
+ CODE_INTERPRETER_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the code interpreter tool call. Required."""
+ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the code interpreter tool call. Valid values are ``in_progress``, ``completed``,
+ ``incomplete``, ``interpreting``, and ``failed``. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"],
+ Literal[\"interpreting\"], Literal[\"failed\"]"""
+ container_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the container used to run the code. Required."""
+ code: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ outputs: list[Union["_models.CodeInterpreterOutputLogs", "_models.CodeInterpreterOutputImage"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"],
+ container_id: str,
+ code: str,
+ outputs: list[Union["_models.CodeInterpreterOutputLogs", "_models.CodeInterpreterOutputImage"]],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.CODE_INTERPRETER_CALL # type: ignore
+
+
+class ItemFieldCompactionBody(ItemField, discriminator="compaction"):
+ """Compaction item.
+
+ :ivar type: The type of the item. Always ``compaction``. Required. COMPACTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPACTION
+ :ivar id: The unique ID of the compaction item. Required.
+ :vartype id: str
+ :ivar encrypted_content: The encrypted content that was produced by compaction. Required.
+ :vartype encrypted_content: str
+ :ivar created_by: The identifier of the actor that created the item.
+ :vartype created_by: str
+ """
+
+ type: Literal[ItemFieldType.COMPACTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``compaction``. Required. COMPACTION."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the compaction item. Required."""
+ encrypted_content: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The encrypted content that was produced by compaction. Required."""
+ created_by: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The identifier of the actor that created the item."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ encrypted_content: str,
+ created_by: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.COMPACTION # type: ignore
+
+
+class ItemFieldComputerToolCall(ItemField, discriminator="computer_call"):
+ """Computer tool call.
+
+ :ivar type: The type of the computer call. Always ``computer_call``. Required. COMPUTER_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_CALL
+ :ivar id: The unique ID of the computer call. Required.
+ :vartype id: str
+ :ivar call_id: An identifier used when responding to the tool call with output. Required.
+ :vartype call_id: str
+ :ivar action: Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.ComputerAction
+ :ivar pending_safety_checks: The pending safety checks for the computer call. Required.
+ :vartype pending_safety_checks:
+ list[~azure.ai.agentserver.responses.models.models.ComputerCallSafetyCheckParam]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemFieldType.COMPUTER_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the computer call. Always ``computer_call``. Required. COMPUTER_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the computer call. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An identifier used when responding to the tool call with output. Required."""
+ action: "_models.ComputerAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ pending_safety_checks: list["_models.ComputerCallSafetyCheckParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The pending safety checks for the computer call. Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.ComputerAction",
+ pending_safety_checks: list["_models.ComputerCallSafetyCheckParam"],
+ status: Literal["in_progress", "completed", "incomplete"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.COMPUTER_CALL # type: ignore
+
+
+class ItemFieldComputerToolCallOutputResource(ItemField, discriminator="computer_call_output"):
+ """ItemFieldComputerToolCallOutputResource.
+
+ :ivar type: The type of the computer tool call output. Always ``computer_call_output``.
+ Required. COMPUTER_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_CALL_OUTPUT
+ :ivar id: The ID of the computer tool call output.
+ :vartype id: str
+ :ivar call_id: The ID of the computer tool call that produced the output. Required.
+ :vartype call_id: str
+ :ivar acknowledged_safety_checks: The safety checks reported by the API that have been
+ acknowledged by the developer.
+ :vartype acknowledged_safety_checks:
+ list[~azure.ai.agentserver.responses.models.models.ComputerCallSafetyCheckParam]
+ :ivar output: Required.
+ :vartype output: ~azure.ai.agentserver.responses.models.models.ComputerScreenshotImage
+ :ivar status: The status of the message input. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Populated when input items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemFieldType.COMPUTER_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the computer tool call output. Always ``computer_call_output``. Required.
+ COMPUTER_CALL_OUTPUT."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the computer tool call output."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the computer tool call that produced the output. Required."""
+ acknowledged_safety_checks: Optional[list["_models.ComputerCallSafetyCheckParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The safety checks reported by the API that have been acknowledged by the developer."""
+ output: "_models.ComputerScreenshotImage" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the message input. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when input items are returned via API. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: "_models.ComputerScreenshotImage",
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ acknowledged_safety_checks: Optional[list["_models.ComputerCallSafetyCheckParam"]] = None,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.COMPUTER_CALL_OUTPUT # type: ignore
+
+
+class ItemFieldCustomToolCall(ItemField, discriminator="custom_tool_call"):
+ """Custom tool call.
+
+ :ivar type: The type of the custom tool call. Always ``custom_tool_call``. Required.
+ CUSTOM_TOOL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM_TOOL_CALL
+ :ivar id: The unique ID of the custom tool call in the OpenAI platform.
+ :vartype id: str
+ :ivar call_id: An identifier used to map this custom tool call to a tool call output. Required.
+ :vartype call_id: str
+ :ivar name: The name of the custom tool being called. Required.
+ :vartype name: str
+ :ivar input: The input for the custom tool call generated by the model. Required.
+ :vartype input: str
+ """
+
+ type: Literal[ItemFieldType.CUSTOM_TOOL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the custom tool call. Always ``custom_tool_call``. Required. CUSTOM_TOOL_CALL."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the custom tool call in the OpenAI platform."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An identifier used to map this custom tool call to a tool call output. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the custom tool being called. Required."""
+ input: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The input for the custom tool call generated by the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ input: str,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.CUSTOM_TOOL_CALL # type: ignore
+
+
+class ItemFieldCustomToolCallOutput(ItemField, discriminator="custom_tool_call_output"):
+ """Custom tool call output.
+
+ :ivar type: The type of the custom tool call output. Always ``custom_tool_call_output``.
+ Required. CUSTOM_TOOL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM_TOOL_CALL_OUTPUT
+ :ivar id: The unique ID of the custom tool call output in the OpenAI platform.
+ :vartype id: str
+ :ivar call_id: The call ID, used to map this custom tool call output to a custom tool call.
+ Required.
+ :vartype call_id: str
+ :ivar output: The output from the custom tool call generated by your code. Can be a string or
+ an list of output content. Required. Is either a str type or a
+ [FunctionAndCustomToolCallOutput] type.
+ :vartype output: str or
+ list[~azure.ai.agentserver.responses.models.models.FunctionAndCustomToolCallOutput]
+ """
+
+ type: Literal[ItemFieldType.CUSTOM_TOOL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the custom tool call output. Always ``custom_tool_call_output``. Required.
+ CUSTOM_TOOL_CALL_OUTPUT."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the custom tool call output in the OpenAI platform."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The call ID, used to map this custom tool call output to a custom tool call. Required."""
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the custom tool call generated by your code. Can be a string or an list of
+ output content. Required. Is either a str type or a [FunctionAndCustomToolCallOutput] type."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]],
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.CUSTOM_TOOL_CALL_OUTPUT # type: ignore
+
+
+class ItemFieldFileSearchToolCall(ItemField, discriminator="file_search_call"):
+ """File search tool call.
+
+ :ivar id: The unique ID of the file search tool call. Required.
+ :vartype id: str
+ :ivar type: The type of the file search tool call. Always ``file_search_call``. Required.
+ FILE_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FILE_SEARCH_CALL
+ :ivar status: The status of the file search tool call. One of ``in_progress``, ``searching``,
+ ``incomplete`` or ``failed``,. Required. Is one of the following types: Literal["in_progress"],
+ Literal["searching"], Literal["completed"], Literal["incomplete"], Literal["failed"]
+ :vartype status: str or str or str or str or str
+ :ivar queries: The queries used to search for files. Required.
+ :vartype queries: list[str]
+ :ivar results:
+ :vartype results: list[~azure.ai.agentserver.responses.models.models.FileSearchToolCallResults]
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the file search tool call. Required."""
+ type: Literal[ItemFieldType.FILE_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the file search tool call. Always ``file_search_call``. Required. FILE_SEARCH_CALL."""
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the file search tool call. One of ``in_progress``, ``searching``, ``incomplete``
+ or ``failed``,. Required. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"searching\"], Literal[\"completed\"], Literal[\"incomplete\"], Literal[\"failed\"]"""
+ queries: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The queries used to search for files. Required."""
+ results: Optional[list["_models.FileSearchToolCallResults"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"],
+ queries: list[str],
+ results: Optional[list["_models.FileSearchToolCallResults"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.FILE_SEARCH_CALL # type: ignore
+
+
+class ItemFieldFunctionShellCall(ItemField, discriminator="shell_call"):
+ """Shell tool call.
+
+ :ivar type: The type of the item. Always ``shell_call``. Required. SHELL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL_CALL
+ :ivar id: The unique ID of the shell tool call. Populated when this item is returned via API.
+ Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar action: The shell commands and limits that describe how to run the tool call. Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.FunctionShellAction
+ :ivar status: The status of the shell call. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Required. Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.LocalShellCallStatus
+ :ivar environment: Required.
+ :vartype environment:
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallEnvironment
+ :ivar created_by: The ID of the entity that created this tool call.
+ :vartype created_by: str
+ """
+
+ type: Literal[ItemFieldType.SHELL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``shell_call``. Required. SHELL_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call. Populated when this item is returned via API. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call generated by the model. Required."""
+ action: "_models.FunctionShellAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The shell commands and limits that describe how to run the tool call. Required."""
+ status: Union[str, "_models.LocalShellCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the shell call. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Required. Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+ environment: "_models.FunctionShellCallEnvironment" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required."""
+ created_by: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the entity that created this tool call."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.FunctionShellAction",
+ status: Union[str, "_models.LocalShellCallStatus"],
+ environment: "_models.FunctionShellCallEnvironment",
+ created_by: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.SHELL_CALL # type: ignore
+
+
+class ItemFieldFunctionShellCallOutput(ItemField, discriminator="shell_call_output"):
+ """Shell call output.
+
+ :ivar type: The type of the shell call output. Always ``shell_call_output``. Required.
+ SHELL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL_CALL_OUTPUT
+ :ivar id: The unique ID of the shell call output. Populated when this item is returned via API.
+ Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the shell call output. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Required. Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or
+ ~azure.ai.agentserver.responses.models.models.LocalShellCallOutputStatusEnum
+ :ivar output: An array of shell call output contents. Required.
+ :vartype output:
+ list[~azure.ai.agentserver.responses.models.models.FunctionShellCallOutputContent]
+ :ivar max_output_length: Required.
+ :vartype max_output_length: int
+ :ivar created_by: The identifier of the actor that created the item.
+ :vartype created_by: str
+ """
+
+ type: Literal[ItemFieldType.SHELL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the shell call output. Always ``shell_call_output``. Required. SHELL_CALL_OUTPUT."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell call output. Populated when this item is returned via API. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call generated by the model. Required."""
+ status: Union[str, "_models.LocalShellCallOutputStatusEnum"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the shell call output. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Required. Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+ output: list["_models.FunctionShellCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """An array of shell call output contents. Required."""
+ max_output_length: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ created_by: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The identifier of the actor that created the item."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ status: Union[str, "_models.LocalShellCallOutputStatusEnum"],
+ output: list["_models.FunctionShellCallOutputContent"],
+ max_output_length: int,
+ created_by: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.SHELL_CALL_OUTPUT # type: ignore
+
+
+class ItemFieldFunctionToolCall(ItemField, discriminator="function_call"):
+ """Function tool call.
+
+ :ivar id: The unique ID of the function tool call.
+ :vartype id: str
+ :ivar type: The type of the function tool call. Always ``function_call``. Required.
+ FUNCTION_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION_CALL
+ :ivar call_id: The unique ID of the function tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the function to run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments to pass to the function. Required.
+ :vartype arguments: str
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call."""
+ type: Literal[ItemFieldType.FUNCTION_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the function tool call. Always ``function_call``. Required. FUNCTION_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the function. Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ arguments: str,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.FUNCTION_CALL # type: ignore
+
+
+class ItemFieldImageGenToolCall(ItemField, discriminator="image_generation_call"):
+ """Image generation call.
+
+ :ivar type: The type of the image generation call. Always ``image_generation_call``. Required.
+ IMAGE_GENERATION_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.IMAGE_GENERATION_CALL
+ :ivar id: The unique ID of the image generation call. Required.
+ :vartype id: str
+ :ivar status: The status of the image generation call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["generating"], Literal["failed"]
+ :vartype status: str or str or str or str
+ :ivar result: Required.
+ :vartype result: str
+ """
+
+ type: Literal[ItemFieldType.IMAGE_GENERATION_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the image generation call. Always ``image_generation_call``. Required.
+ IMAGE_GENERATION_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the image generation call. Required."""
+ status: Literal["in_progress", "completed", "generating", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the image generation call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"generating\"], Literal[\"failed\"]"""
+ result: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "completed", "generating", "failed"],
+ result: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.IMAGE_GENERATION_CALL # type: ignore
+
+
+class ItemFieldLocalShellToolCall(ItemField, discriminator="local_shell_call"):
+ """Local shell call.
+
+ :ivar type: The type of the local shell call. Always ``local_shell_call``. Required.
+ LOCAL_SHELL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL_SHELL_CALL
+ :ivar id: The unique ID of the local shell call. Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the local shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar action: Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.LocalShellExecAction
+ :ivar status: The status of the local shell call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemFieldType.LOCAL_SHELL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the local shell call. Always ``local_shell_call``. Required. LOCAL_SHELL_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell call. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell tool call generated by the model. Required."""
+ action: "_models.LocalShellExecAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the local shell call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.LocalShellExecAction",
+ status: Literal["in_progress", "completed", "incomplete"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.LOCAL_SHELL_CALL # type: ignore
+
+
+class ItemFieldLocalShellToolCallOutput(ItemField, discriminator="local_shell_call_output"):
+ """Local shell call output.
+
+ :ivar type: The type of the local shell tool call output. Always ``local_shell_call_output``.
+ Required. LOCAL_SHELL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL_SHELL_CALL_OUTPUT
+ :ivar id: The unique ID of the local shell tool call generated by the model. Required.
+ :vartype id: str
+ :ivar output: A JSON string of the output of the local shell tool call. Required.
+ :vartype output: str
+ :ivar status: Is one of the following types: Literal["in_progress"], Literal["completed"],
+ Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemFieldType.LOCAL_SHELL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the local shell tool call output. Always ``local_shell_call_output``. Required.
+ LOCAL_SHELL_CALL_OUTPUT."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell tool call generated by the model. Required."""
+ output: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the output of the local shell tool call. Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"in_progress\"], Literal[\"completed\"],
+ Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ output: str,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.LOCAL_SHELL_CALL_OUTPUT # type: ignore
+
+
+class ItemFieldMcpApprovalRequest(ItemField, discriminator="mcp_approval_request"):
+ """MCP approval request.
+
+ :ivar type: The type of the item. Always ``mcp_approval_request``. Required.
+ MCP_APPROVAL_REQUEST.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_APPROVAL_REQUEST
+ :ivar id: The unique ID of the approval request. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server making the request. Required.
+ :vartype server_label: str
+ :ivar name: The name of the tool to run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of arguments for the tool. Required.
+ :vartype arguments: str
+ """
+
+ type: Literal[ItemFieldType.MCP_APPROVAL_REQUEST] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_approval_request``. Required. MCP_APPROVAL_REQUEST."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the approval request. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server making the request. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the tool to run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of arguments for the tool. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ name: str,
+ arguments: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.MCP_APPROVAL_REQUEST # type: ignore
+
+
+class ItemFieldMcpApprovalResponseResource(ItemField, discriminator="mcp_approval_response"):
+ """MCP approval response.
+
+ :ivar type: The type of the item. Always ``mcp_approval_response``. Required.
+ MCP_APPROVAL_RESPONSE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_APPROVAL_RESPONSE
+ :ivar id: The unique ID of the approval response. Required.
+ :vartype id: str
+ :ivar approval_request_id: The ID of the approval request being answered. Required.
+ :vartype approval_request_id: str
+ :ivar approve: Whether the request was approved. Required.
+ :vartype approve: bool
+ :ivar reason:
+ :vartype reason: str
+ """
+
+ type: Literal[ItemFieldType.MCP_APPROVAL_RESPONSE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_approval_response``. Required. MCP_APPROVAL_RESPONSE."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the approval response. Required."""
+ approval_request_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the approval request being answered. Required."""
+ approve: bool = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Whether the request was approved. Required."""
+ reason: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ approval_request_id: str,
+ approve: bool,
+ reason: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.MCP_APPROVAL_RESPONSE # type: ignore
+
+
+class ItemFieldMcpListTools(ItemField, discriminator="mcp_list_tools"):
+ """MCP list tools.
+
+ :ivar type: The type of the item. Always ``mcp_list_tools``. Required. MCP_LIST_TOOLS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_LIST_TOOLS
+ :ivar id: The unique ID of the list. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server. Required.
+ :vartype server_label: str
+ :ivar tools: The tools available on the server. Required.
+ :vartype tools: list[~azure.ai.agentserver.responses.models.models.MCPListToolsTool]
+ :ivar error:
+ :vartype error: str
+ """
+
+ type: Literal[ItemFieldType.MCP_LIST_TOOLS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_list_tools``. Required. MCP_LIST_TOOLS."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the list. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server. Required."""
+ tools: list["_models.MCPListToolsTool"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The tools available on the server. Required."""
+ error: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ tools: list["_models.MCPListToolsTool"],
+ error: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.MCP_LIST_TOOLS # type: ignore
+
+
+class ItemFieldMcpToolCall(ItemField, discriminator="mcp_call"):
+ """MCP tool call.
+
+ :ivar type: The type of the item. Always ``mcp_call``. Required. MCP_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_CALL
+ :ivar id: The unique ID of the tool call. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server running the tool. Required.
+ :vartype server_label: str
+ :ivar name: The name of the tool that was run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments passed to the tool. Required.
+ :vartype arguments: str
+ :ivar output:
+ :vartype output: str
+ :ivar error:
+ :vartype error: dict[str, any]
+ :ivar status: The status of the tool call. One of ``in_progress``, ``completed``,
+ ``incomplete``, ``calling``, or ``failed``. Known values are: "in_progress", "completed",
+ "incomplete", "calling", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.MCPToolCallStatus
+ :ivar approval_request_id:
+ :vartype approval_request_id: str
+ """
+
+ type: Literal[ItemFieldType.MCP_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_call``. Required. MCP_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server running the tool. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the tool that was run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments passed to the tool. Required."""
+ output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ error: Optional[dict[str, Any]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ status: Optional[Union[str, "_models.MCPToolCallStatus"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. One of ``in_progress``, ``completed``, ``incomplete``,
+ ``calling``, or ``failed``. Known values are: \"in_progress\", \"completed\", \"incomplete\",
+ \"calling\", and \"failed\"."""
+ approval_request_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ name: str,
+ arguments: str,
+ output: Optional[str] = None,
+ error: Optional[dict[str, Any]] = None,
+ status: Optional[Union[str, "_models.MCPToolCallStatus"]] = None,
+ approval_request_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.MCP_CALL # type: ignore
+
+
+class ItemFieldMessage(ItemField, discriminator="message"):
+ """Message.
+
+ :ivar type: The type of the message. Always set to ``message``. Required. MESSAGE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MESSAGE
+ :ivar id: The unique ID of the message. Required.
+ :vartype id: str
+ :ivar status: The status of item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Required. Known values are: "in_progress",
+ "completed", and "incomplete".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.MessageStatus
+ :ivar role: The role of the message. One of ``unknown``, ``user``, ``assistant``, ``system``,
+ ``critic``, ``discriminator``, ``developer``, or ``tool``. Required. Known values are:
+ "unknown", "user", "assistant", "system", "critic", "discriminator", "developer", and "tool".
+ :vartype role: str or ~azure.ai.agentserver.responses.models.models.MessageRole
+ :ivar content: The content of the message. Required.
+ :vartype content: list[~azure.ai.agentserver.responses.models.models.MessageContent]
+ """
+
+ type: Literal[ItemFieldType.MESSAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the message. Always set to ``message``. Required. MESSAGE."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the message. Required."""
+ status: Union[str, "_models.MessageStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The status of item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated when
+ items are returned via API. Required. Known values are: \"in_progress\", \"completed\", and
+ \"incomplete\"."""
+ role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The role of the message. One of ``unknown``, ``user``, ``assistant``, ``system``, ``critic``,
+ ``discriminator``, ``developer``, or ``tool``. Required. Known values are: \"unknown\",
+ \"user\", \"assistant\", \"system\", \"critic\", \"discriminator\", \"developer\", and
+ \"tool\"."""
+ content: list["_models.MessageContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content of the message. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Union[str, "_models.MessageStatus"],
+ role: Union[str, "_models.MessageRole"],
+ content: list["_models.MessageContent"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.MESSAGE # type: ignore
+
+
+class ItemFieldReasoningItem(ItemField, discriminator="reasoning"):
+ """Reasoning.
+
+ :ivar type: The type of the object. Always ``reasoning``. Required. REASONING.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REASONING
+ :ivar id: The unique identifier of the reasoning content. Required.
+ :vartype id: str
+ :ivar encrypted_content:
+ :vartype encrypted_content: str
+ :ivar summary: Reasoning summary content. Required.
+ :vartype summary: list[~azure.ai.agentserver.responses.models.models.SummaryTextContent]
+ :ivar content: Reasoning text content.
+ :vartype content: list[~azure.ai.agentserver.responses.models.models.ReasoningTextContent]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemFieldType.REASONING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the object. Always ``reasoning``. Required. REASONING."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the reasoning content. Required."""
+ encrypted_content: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ summary: list["_models.SummaryTextContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Reasoning summary content. Required."""
+ content: Optional[list["_models.ReasoningTextContent"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Reasoning text content."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ summary: list["_models.SummaryTextContent"],
+ encrypted_content: Optional[str] = None,
+ content: Optional[list["_models.ReasoningTextContent"]] = None,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.REASONING # type: ignore
+
+
+class ItemFieldWebSearchToolCall(ItemField, discriminator="web_search_call"):
+ """Web search tool call.
+
+ :ivar id: The unique ID of the web search tool call. Required.
+ :vartype id: str
+ :ivar type: The type of the web search tool call. Always ``web_search_call``. Required.
+ WEB_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WEB_SEARCH_CALL
+ :ivar status: The status of the web search tool call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["searching"], Literal["completed"], Literal["failed"]
+ :vartype status: str or str or str or str
+ :ivar action: An object describing the specific action taken in this web search call. Includes
+ details on how the model used the web (search, open_page, find_in_page). Required. Is one of
+ the following types: WebSearchActionSearch, WebSearchActionOpenPage, WebSearchActionFind
+ :vartype action: ~azure.ai.agentserver.responses.models.models.WebSearchActionSearch or
+ ~azure.ai.agentserver.responses.models.models.WebSearchActionOpenPage or
+ ~azure.ai.agentserver.responses.models.models.WebSearchActionFind
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the web search tool call. Required."""
+ type: Literal[ItemFieldType.WEB_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the web search tool call. Always ``web_search_call``. Required. WEB_SEARCH_CALL."""
+ status: Literal["in_progress", "searching", "completed", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the web search tool call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"searching\"], Literal[\"completed\"], Literal[\"failed\"]"""
+ action: Union["_models.WebSearchActionSearch", "_models.WebSearchActionOpenPage", "_models.WebSearchActionFind"] = (
+ rest_field(visibility=["read", "create", "update", "delete", "query"])
+ )
+ """An object describing the specific action taken in this web search call. Includes details on how
+ the model used the web (search, open_page, find_in_page). Required. Is one of the following
+ types: WebSearchActionSearch, WebSearchActionOpenPage, WebSearchActionFind"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "searching", "completed", "failed"],
+ action: Union[
+ "_models.WebSearchActionSearch", "_models.WebSearchActionOpenPage", "_models.WebSearchActionFind"
+ ],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemFieldType.WEB_SEARCH_CALL # type: ignore
+
+
+class ItemFileSearchToolCall(Item, discriminator="file_search_call"):
+ """File search tool call.
+
+ :ivar id: The unique ID of the file search tool call. Required.
+ :vartype id: str
+ :ivar type: The type of the file search tool call. Always ``file_search_call``. Required.
+ FILE_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FILE_SEARCH_CALL
+ :ivar status: The status of the file search tool call. One of ``in_progress``, ``searching``,
+ ``incomplete`` or ``failed``,. Required. Is one of the following types: Literal["in_progress"],
+ Literal["searching"], Literal["completed"], Literal["incomplete"], Literal["failed"]
+ :vartype status: str or str or str or str or str
+ :ivar queries: The queries used to search for files. Required.
+ :vartype queries: list[str]
+ :ivar results:
+ :vartype results: list[~azure.ai.agentserver.responses.models.models.FileSearchToolCallResults]
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the file search tool call. Required."""
+ type: Literal[ItemType.FILE_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the file search tool call. Always ``file_search_call``. Required. FILE_SEARCH_CALL."""
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the file search tool call. One of ``in_progress``, ``searching``, ``incomplete``
+ or ``failed``,. Required. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"searching\"], Literal[\"completed\"], Literal[\"incomplete\"], Literal[\"failed\"]"""
+ queries: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The queries used to search for files. Required."""
+ results: Optional[list["_models.FileSearchToolCallResults"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"],
+ queries: list[str],
+ results: Optional[list["_models.FileSearchToolCallResults"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.FILE_SEARCH_CALL # type: ignore
+
+
+class ItemFunctionToolCall(Item, discriminator="function_call"):
+ """Function tool call.
+
+ :ivar id: The unique ID of the function tool call.
+ :vartype id: str
+ :ivar type: The type of the function tool call. Always ``function_call``. Required.
+ FUNCTION_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION_CALL
+ :ivar call_id: The unique ID of the function tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the function to run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments to pass to the function. Required.
+ :vartype arguments: str
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call."""
+ type: Literal[ItemType.FUNCTION_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the function tool call. Always ``function_call``. Required. FUNCTION_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the function. Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ arguments: str,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.FUNCTION_CALL # type: ignore
+
+
+class ItemImageGenToolCall(Item, discriminator="image_generation_call"):
+ """Image generation call.
+
+ :ivar type: The type of the image generation call. Always ``image_generation_call``. Required.
+ IMAGE_GENERATION_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.IMAGE_GENERATION_CALL
+ :ivar id: The unique ID of the image generation call. Required.
+ :vartype id: str
+ :ivar status: The status of the image generation call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["generating"], Literal["failed"]
+ :vartype status: str or str or str or str
+ :ivar result: Required.
+ :vartype result: str
+ """
+
+ type: Literal[ItemType.IMAGE_GENERATION_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the image generation call. Always ``image_generation_call``. Required.
+ IMAGE_GENERATION_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the image generation call. Required."""
+ status: Literal["in_progress", "completed", "generating", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the image generation call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"generating\"], Literal[\"failed\"]"""
+ result: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "completed", "generating", "failed"],
+ result: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.IMAGE_GENERATION_CALL # type: ignore
+
+
+class ItemLocalShellToolCall(Item, discriminator="local_shell_call"):
+ """Local shell call.
+
+ :ivar type: The type of the local shell call. Always ``local_shell_call``. Required.
+ LOCAL_SHELL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL_SHELL_CALL
+ :ivar id: The unique ID of the local shell call. Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the local shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar action: Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.LocalShellExecAction
+ :ivar status: The status of the local shell call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemType.LOCAL_SHELL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the local shell call. Always ``local_shell_call``. Required. LOCAL_SHELL_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell call. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell tool call generated by the model. Required."""
+ action: "_models.LocalShellExecAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the local shell call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.LocalShellExecAction",
+ status: Literal["in_progress", "completed", "incomplete"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.LOCAL_SHELL_CALL # type: ignore
+
+
+class ItemLocalShellToolCallOutput(Item, discriminator="local_shell_call_output"):
+ """Local shell call output.
+
+ :ivar type: The type of the local shell tool call output. Always ``local_shell_call_output``.
+ Required. LOCAL_SHELL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL_SHELL_CALL_OUTPUT
+ :ivar id: The unique ID of the local shell tool call generated by the model. Required.
+ :vartype id: str
+ :ivar output: A JSON string of the output of the local shell tool call. Required.
+ :vartype output: str
+ :ivar status: Is one of the following types: Literal["in_progress"], Literal["completed"],
+ Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemType.LOCAL_SHELL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the local shell tool call output. Always ``local_shell_call_output``. Required.
+ LOCAL_SHELL_CALL_OUTPUT."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell tool call generated by the model. Required."""
+ output: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the output of the local shell tool call. Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"in_progress\"], Literal[\"completed\"],
+ Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ output: str,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.LOCAL_SHELL_CALL_OUTPUT # type: ignore
+
+
+class ItemMcpApprovalRequest(Item, discriminator="mcp_approval_request"):
+ """MCP approval request.
+
+ :ivar type: The type of the item. Always ``mcp_approval_request``. Required.
+ MCP_APPROVAL_REQUEST.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_APPROVAL_REQUEST
+ :ivar id: The unique ID of the approval request. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server making the request. Required.
+ :vartype server_label: str
+ :ivar name: The name of the tool to run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of arguments for the tool. Required.
+ :vartype arguments: str
+ """
+
+ type: Literal[ItemType.MCP_APPROVAL_REQUEST] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_approval_request``. Required. MCP_APPROVAL_REQUEST."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the approval request. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server making the request. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the tool to run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of arguments for the tool. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ name: str,
+ arguments: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.MCP_APPROVAL_REQUEST # type: ignore
+
+
+class ItemMcpListTools(Item, discriminator="mcp_list_tools"):
+ """MCP list tools.
+
+ :ivar type: The type of the item. Always ``mcp_list_tools``. Required. MCP_LIST_TOOLS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_LIST_TOOLS
+ :ivar id: The unique ID of the list. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server. Required.
+ :vartype server_label: str
+ :ivar tools: The tools available on the server. Required.
+ :vartype tools: list[~azure.ai.agentserver.responses.models.models.MCPListToolsTool]
+ :ivar error:
+ :vartype error: str
+ """
+
+ type: Literal[ItemType.MCP_LIST_TOOLS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_list_tools``. Required. MCP_LIST_TOOLS."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the list. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server. Required."""
+ tools: list["_models.MCPListToolsTool"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The tools available on the server. Required."""
+ error: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ tools: list["_models.MCPListToolsTool"],
+ error: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.MCP_LIST_TOOLS # type: ignore
+
+
+class ItemMcpToolCall(Item, discriminator="mcp_call"):
+ """MCP tool call.
+
+ :ivar type: The type of the item. Always ``mcp_call``. Required. MCP_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_CALL
+ :ivar id: The unique ID of the tool call. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server running the tool. Required.
+ :vartype server_label: str
+ :ivar name: The name of the tool that was run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments passed to the tool. Required.
+ :vartype arguments: str
+ :ivar output:
+ :vartype output: str
+ :ivar error:
+ :vartype error: dict[str, any]
+ :ivar status: The status of the tool call. One of ``in_progress``, ``completed``,
+ ``incomplete``, ``calling``, or ``failed``. Known values are: "in_progress", "completed",
+ "incomplete", "calling", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.MCPToolCallStatus
+ :ivar approval_request_id:
+ :vartype approval_request_id: str
+ """
+
+ type: Literal[ItemType.MCP_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_call``. Required. MCP_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server running the tool. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the tool that was run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments passed to the tool. Required."""
+ output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ error: Optional[dict[str, Any]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ status: Optional[Union[str, "_models.MCPToolCallStatus"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. One of ``in_progress``, ``completed``, ``incomplete``,
+ ``calling``, or ``failed``. Known values are: \"in_progress\", \"completed\", \"incomplete\",
+ \"calling\", and \"failed\"."""
+ approval_request_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ name: str,
+ arguments: str,
+ output: Optional[str] = None,
+ error: Optional[dict[str, Any]] = None,
+ status: Optional[Union[str, "_models.MCPToolCallStatus"]] = None,
+ approval_request_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.MCP_CALL # type: ignore
+
+
+class ItemMessage(Item, discriminator="message"):
+ """Message.
+
+ :ivar type: The type of the message. Always set to ``message``. Required. MESSAGE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MESSAGE
+ :ivar role: The role of the message. One of ``unknown``, ``user``, ``assistant``, ``system``,
+ ``critic``, ``discriminator``, ``developer``, or ``tool``. Required. Known values are:
+ "unknown", "user", "assistant", "system", "critic", "discriminator", "developer", and "tool".
+ :vartype role: str or ~azure.ai.agentserver.responses.models.models.MessageRole
+ :ivar content: Required. Is either a str type or a [MessageContent] type.
+ :vartype content: str or list[~azure.ai.agentserver.responses.models.models.MessageContent]
+ """
+
+ type: Literal[ItemType.MESSAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the message. Always set to ``message``. Required. MESSAGE."""
+ role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The role of the message. One of ``unknown``, ``user``, ``assistant``, ``system``, ``critic``,
+ ``discriminator``, ``developer``, or ``tool``. Required. Known values are: \"unknown\",
+ \"user\", \"assistant\", \"system\", \"critic\", \"discriminator\", \"developer\", and
+ \"tool\"."""
+ content: Union[str, list["_models.MessageContent"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required. Is either a str type or a [MessageContent] type."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ role: Union[str, "_models.MessageRole"],
+ content: Union[str, list["_models.MessageContent"]],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.MESSAGE # type: ignore
+
+
+class ItemOutputMessage(Item, discriminator="output_message"):
+ """Output message.
+
+ :ivar id: The unique ID of the output message. Required.
+ :vartype id: str
+ :ivar type: The type of the output message. Always ``message``. Required. OUTPUT_MESSAGE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OUTPUT_MESSAGE
+ :ivar role: The role of the output message. Always ``assistant``. Required. Default value is
+ "assistant".
+ :vartype role: str
+ :ivar content: The content of the output message. Required.
+ :vartype content: list[~azure.ai.agentserver.responses.models.models.OutputMessageContent]
+ :ivar status: The status of the message input. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Populated when input items are returned via API. Required. Is one of the
+ following types: Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the output message. Required."""
+ type: Literal[ItemType.OUTPUT_MESSAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the output message. Always ``message``. Required. OUTPUT_MESSAGE."""
+ role: Literal["assistant"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The role of the output message. Always ``assistant``. Required. Default value is \"assistant\"."""
+ content: list["_models.OutputMessageContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The content of the output message. Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the message input. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when input items are returned via API. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ content: list["_models.OutputMessageContent"],
+ status: Literal["in_progress", "completed", "incomplete"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.OUTPUT_MESSAGE # type: ignore
+ self.role: Literal["assistant"] = "assistant"
+
+
+class ItemReasoningItem(Item, discriminator="reasoning"):
+ """Reasoning.
+
+ :ivar type: The type of the object. Always ``reasoning``. Required. REASONING.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REASONING
+ :ivar id: The unique identifier of the reasoning content. Required.
+ :vartype id: str
+ :ivar encrypted_content:
+ :vartype encrypted_content: str
+ :ivar summary: Reasoning summary content. Required.
+ :vartype summary: list[~azure.ai.agentserver.responses.models.models.SummaryTextContent]
+ :ivar content: Reasoning text content.
+ :vartype content: list[~azure.ai.agentserver.responses.models.models.ReasoningTextContent]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[ItemType.REASONING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the object. Always ``reasoning``. Required. REASONING."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the reasoning content. Required."""
+ encrypted_content: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ summary: list["_models.SummaryTextContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Reasoning summary content. Required."""
+ content: Optional[list["_models.ReasoningTextContent"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Reasoning text content."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ summary: list["_models.SummaryTextContent"],
+ encrypted_content: Optional[str] = None,
+ content: Optional[list["_models.ReasoningTextContent"]] = None,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.REASONING # type: ignore
+
+
+class ItemReferenceParam(Item, discriminator="item_reference"):
+ """Item reference.
+
+ :ivar type: The type of item to reference. Always ``item_reference``. Required. ITEM_REFERENCE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ITEM_REFERENCE
+ :ivar id: The ID of the item to reference. Required.
+ :vartype id: str
+ """
+
+ type: Literal[ItemType.ITEM_REFERENCE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of item to reference. Always ``item_reference``. Required. ITEM_REFERENCE."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item to reference. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.ITEM_REFERENCE # type: ignore
+
+
+class ItemWebSearchToolCall(Item, discriminator="web_search_call"):
+ """Web search tool call.
+
+ :ivar id: The unique ID of the web search tool call. Required.
+ :vartype id: str
+ :ivar type: The type of the web search tool call. Always ``web_search_call``. Required.
+ WEB_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WEB_SEARCH_CALL
+ :ivar status: The status of the web search tool call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["searching"], Literal["completed"], Literal["failed"]
+ :vartype status: str or str or str or str
+ :ivar action: An object describing the specific action taken in this web search call. Includes
+ details on how the model used the web (search, open_page, find_in_page). Required. Is one of
+ the following types: WebSearchActionSearch, WebSearchActionOpenPage, WebSearchActionFind
+ :vartype action: ~azure.ai.agentserver.responses.models.models.WebSearchActionSearch or
+ ~azure.ai.agentserver.responses.models.models.WebSearchActionOpenPage or
+ ~azure.ai.agentserver.responses.models.models.WebSearchActionFind
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the web search tool call. Required."""
+ type: Literal[ItemType.WEB_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the web search tool call. Always ``web_search_call``. Required. WEB_SEARCH_CALL."""
+ status: Literal["in_progress", "searching", "completed", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the web search tool call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"searching\"], Literal[\"completed\"], Literal[\"failed\"]"""
+ action: Union["_models.WebSearchActionSearch", "_models.WebSearchActionOpenPage", "_models.WebSearchActionFind"] = (
+ rest_field(visibility=["read", "create", "update", "delete", "query"])
+ )
+ """An object describing the specific action taken in this web search call. Includes details on how
+ the model used the web (search, open_page, find_in_page). Required. Is one of the following
+ types: WebSearchActionSearch, WebSearchActionOpenPage, WebSearchActionFind"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "searching", "completed", "failed"],
+ action: Union[
+ "_models.WebSearchActionSearch", "_models.WebSearchActionOpenPage", "_models.WebSearchActionFind"
+ ],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.WEB_SEARCH_CALL # type: ignore
+
+
+class KeyPressAction(ComputerAction, discriminator="keypress"):
+ """KeyPress.
+
+ :ivar type: Specifies the event type. For a keypress action, this property is always set to
+ ``keypress``. Required. KEYPRESS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.KEYPRESS
+ :ivar keys_property: The combination of keys the model is requesting to be pressed. This is an
+ array of strings, each representing a key. Required.
+ :vartype keys_property: list[str]
+ """
+
+ type: Literal[ComputerActionType.KEYPRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a keypress action, this property is always set to ``keypress``.
+ Required. KEYPRESS."""
+ keys_property: list[str] = rest_field(
+ name="keys", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="keys"
+ )
+ """The combination of keys the model is requesting to be pressed. This is an array of strings,
+ each representing a key. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ keys_property: list[str],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.KEYPRESS # type: ignore
+
+
+class LocalEnvironmentResource(FunctionShellCallEnvironment, discriminator="local"):
+ """Local Environment.
+
+ :ivar type: The environment type. Always ``local``. Required. LOCAL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL
+ """
+
+ type: Literal[FunctionShellCallEnvironmentType.LOCAL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The environment type. Always ``local``. Required. LOCAL."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = FunctionShellCallEnvironmentType.LOCAL # type: ignore
+
+
+class LocalShellExecAction(_Model):
+ """Local shell exec action.
+
+ :ivar type: The type of the local shell action. Always ``exec``. Required. Default value is
+ "exec".
+ :vartype type: str
+ :ivar command: The command to run. Required.
+ :vartype command: list[str]
+ :ivar timeout_ms:
+ :vartype timeout_ms: int
+ :ivar working_directory:
+ :vartype working_directory: str
+ :ivar env: Environment variables to set for the command. Required.
+ :vartype env: dict[str, str]
+ :ivar user:
+ :vartype user: str
+ """
+
+ type: Literal["exec"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the local shell action. Always ``exec``. Required. Default value is \"exec\"."""
+ command: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The command to run. Required."""
+ timeout_ms: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ working_directory: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ env: dict[str, str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Environment variables to set for the command. Required."""
+ user: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ command: list[str],
+ env: dict[str, str],
+ timeout_ms: Optional[int] = None,
+ working_directory: Optional[str] = None,
+ user: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["exec"] = "exec"
+
+
+class LocalShellToolParam(Tool, discriminator="local_shell"):
+ """Local shell tool.
+
+ :ivar type: The type of the local shell tool. Always ``local_shell``. Required. LOCAL_SHELL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL_SHELL
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ """
+
+ type: Literal[ToolType.LOCAL_SHELL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the local shell tool. Always ``local_shell``. Required. LOCAL_SHELL."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.LOCAL_SHELL # type: ignore
+
+
+class LocalSkillParam(_Model):
+ """LocalSkillParam.
+
+ :ivar name: The name of the skill. Required.
+ :vartype name: str
+ :ivar description: The description of the skill. Required.
+ :vartype description: str
+ :ivar path: The path to the directory containing the skill. Required.
+ :vartype path: str
+ """
+
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the skill. Required."""
+ description: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The description of the skill. Required."""
+ path: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The path to the directory containing the skill. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ description: str,
+ path: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class LogProb(_Model):
+ """Log probability.
+
+ :ivar token: Required.
+ :vartype token: str
+ :ivar logprob: Required.
+ :vartype logprob: int
+ :ivar bytes: Required.
+ :vartype bytes: list[int]
+ :ivar top_logprobs: Required.
+ :vartype top_logprobs: list[~azure.ai.agentserver.responses.models.models.TopLogProb]
+ """
+
+ token: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ logprob: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ bytes: list[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ top_logprobs: list["_models.TopLogProb"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ token: str,
+ logprob: int,
+ bytes: list[int],
+ top_logprobs: list["_models.TopLogProb"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class MCPApprovalResponse(Item, discriminator="mcp_approval_response"):
+ """MCP approval response.
+
+ :ivar type: The type of the item. Always ``mcp_approval_response``. Required.
+ MCP_APPROVAL_RESPONSE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_APPROVAL_RESPONSE
+ :ivar id:
+ :vartype id: str
+ :ivar approval_request_id: The ID of the approval request being answered. Required.
+ :vartype approval_request_id: str
+ :ivar approve: Whether the request was approved. Required.
+ :vartype approve: bool
+ :ivar reason:
+ :vartype reason: str
+ """
+
+ type: Literal[ItemType.MCP_APPROVAL_RESPONSE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_approval_response``. Required. MCP_APPROVAL_RESPONSE."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ approval_request_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the approval request being answered. Required."""
+ approve: bool = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Whether the request was approved. Required."""
+ reason: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ approval_request_id: str,
+ approve: bool,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ reason: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.MCP_APPROVAL_RESPONSE # type: ignore
+
+
+class MCPListToolsTool(_Model):
+ """MCP list tools tool.
+
+ :ivar name: The name of the tool. Required.
+ :vartype name: str
+ :ivar description:
+ :vartype description: str
+ :ivar input_schema: The JSON schema describing the tool's input. Required.
+ :vartype input_schema:
+ ~azure.ai.agentserver.responses.models.models.MCPListToolsToolInputSchema
+ :ivar annotations:
+ :vartype annotations: ~azure.ai.agentserver.responses.models.models.MCPListToolsToolAnnotations
+ """
+
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the tool. Required."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ input_schema: "_models.MCPListToolsToolInputSchema" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The JSON schema describing the tool's input. Required."""
+ annotations: Optional["_models.MCPListToolsToolAnnotations"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ input_schema: "_models.MCPListToolsToolInputSchema",
+ description: Optional[str] = None,
+ annotations: Optional["_models.MCPListToolsToolAnnotations"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class MCPListToolsToolAnnotations(_Model):
+ """MCPListToolsToolAnnotations."""
+
+
+class MCPListToolsToolInputSchema(_Model):
+ """MCPListToolsToolInputSchema."""
+
+
+class MCPTool(Tool, discriminator="mcp"):
+ """MCP tool.
+
+ :ivar type: The type of the MCP tool. Always ``mcp``. Required. MCP.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP
+ :ivar server_label: A label for this MCP server, used to identify it in tool calls. Required.
+ :vartype server_label: str
+ :ivar server_url: The URL for the MCP server. One of ``server_url`` or ``connector_id`` must be
+ provided.
+ :vartype server_url: str
+ :ivar connector_id: Identifier for service connectors, like those available in ChatGPT. One of
+ ``server_url`` or ``connector_id`` must be provided. Learn more about service connectors `here
+ `_. Currently supported ``connector_id`` values are:
+
+ * Dropbox: `connector_dropbox`
+ * Gmail: `connector_gmail`
+ * Google Calendar: `connector_googlecalendar`
+ * Google Drive: `connector_googledrive`
+ * Microsoft Teams: `connector_microsoftteams`
+ * Outlook Calendar: `connector_outlookcalendar`
+ * Outlook Email: `connector_outlookemail`
+ * SharePoint: `connector_sharepoint`. Is one of the following types:
+ Literal["connector_dropbox"], Literal["connector_gmail"], Literal["connector_googlecalendar"],
+ Literal["connector_googledrive"], Literal["connector_microsoftteams"],
+ Literal["connector_outlookcalendar"], Literal["connector_outlookemail"],
+ Literal["connector_sharepoint"]
+ :vartype connector_id: str or str or str or str or str or str or str or str
+ :ivar authorization: An OAuth access token that can be used with a remote MCP server, either
+ with a custom MCP server URL or a service connector. Your application must handle the OAuth
+ authorization flow and provide the token here.
+ :vartype authorization: str
+ :ivar server_description: Optional description of the MCP server, used to provide more context.
+ :vartype server_description: str
+ :ivar headers:
+ :vartype headers: dict[str, str]
+ :ivar allowed_tools: Is either a [str] type or a MCPToolFilter type.
+ :vartype allowed_tools: list[str] or
+ ~azure.ai.agentserver.responses.models.models.MCPToolFilter
+ :ivar require_approval: Is one of the following types: MCPToolRequireApproval,
+ Literal["always"], Literal["never"]
+ :vartype require_approval: ~azure.ai.agentserver.responses.models.models.MCPToolRequireApproval
+ or str or str
+ :ivar project_connection_id: The connection ID in the project for the MCP server. The
+ connection stores authentication and other connection details needed to connect to the MCP
+ server.
+ :vartype project_connection_id: str
+ """
+
+ type: Literal[ToolType.MCP] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the MCP tool. Always ``mcp``. Required. MCP."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A label for this MCP server, used to identify it in tool calls. Required."""
+ server_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL for the MCP server. One of ``server_url`` or ``connector_id`` must be provided."""
+ connector_id: Optional[
+ Literal[
+ "connector_dropbox",
+ "connector_gmail",
+ "connector_googlecalendar",
+ "connector_googledrive",
+ "connector_microsoftteams",
+ "connector_outlookcalendar",
+ "connector_outlookemail",
+ "connector_sharepoint",
+ ]
+ ] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Identifier for service connectors, like those available in ChatGPT. One of ``server_url`` or
+ ``connector_id`` must be provided. Learn more about service connectors `here
+ `_. Currently supported ``connector_id`` values are:
+
+ * Dropbox: `connector_dropbox`
+ * Gmail: `connector_gmail`
+ * Google Calendar: `connector_googlecalendar`
+ * Google Drive: `connector_googledrive`
+ * Microsoft Teams: `connector_microsoftteams`
+ * Outlook Calendar: `connector_outlookcalendar`
+ * Outlook Email: `connector_outlookemail`
+ * SharePoint: `connector_sharepoint`. Is one of the following types:
+ Literal[\"connector_dropbox\"], Literal[\"connector_gmail\"],
+ Literal[\"connector_googlecalendar\"], Literal[\"connector_googledrive\"],
+ Literal[\"connector_microsoftteams\"], Literal[\"connector_outlookcalendar\"],
+ Literal[\"connector_outlookemail\"], Literal[\"connector_sharepoint\"]"""
+ authorization: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An OAuth access token that can be used with a remote MCP server, either with a custom MCP
+ server URL or a service connector. Your application must handle the OAuth authorization flow
+ and provide the token here."""
+ server_description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional description of the MCP server, used to provide more context."""
+ headers: Optional[dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ allowed_tools: Optional[Union[list[str], "_models.MCPToolFilter"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a [str] type or a MCPToolFilter type."""
+ require_approval: Optional[Union["_models.MCPToolRequireApproval", Literal["always"], Literal["never"]]] = (
+ rest_field(visibility=["read", "create", "update", "delete", "query"])
+ )
+ """Is one of the following types: MCPToolRequireApproval, Literal[\"always\"], Literal[\"never\"]"""
+ project_connection_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The connection ID in the project for the MCP server. The connection stores authentication and
+ other connection details needed to connect to the MCP server."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ server_label: str,
+ server_url: Optional[str] = None,
+ connector_id: Optional[
+ Literal[
+ "connector_dropbox",
+ "connector_gmail",
+ "connector_googlecalendar",
+ "connector_googledrive",
+ "connector_microsoftteams",
+ "connector_outlookcalendar",
+ "connector_outlookemail",
+ "connector_sharepoint",
+ ]
+ ] = None,
+ authorization: Optional[str] = None,
+ server_description: Optional[str] = None,
+ headers: Optional[dict[str, str]] = None,
+ allowed_tools: Optional[Union[list[str], "_models.MCPToolFilter"]] = None,
+ require_approval: Optional[Union["_models.MCPToolRequireApproval", Literal["always"], Literal["never"]]] = None,
+ project_connection_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.MCP # type: ignore
+
+
+class MCPToolFilter(_Model):
+ """MCP tool filter.
+
+ :ivar tool_names: MCP allowed tools.
+ :vartype tool_names: list[str]
+ :ivar read_only: Indicates whether or not a tool modifies data or is read-only. If an MCP
+ server is `annotated with `readOnlyHint`
+ `_,
+ it will match this filter.
+ :vartype read_only: bool
+ """
+
+ tool_names: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """MCP allowed tools."""
+ read_only: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Indicates whether or not a tool modifies data or is read-only. If an MCP server is `annotated
+ with `readOnlyHint`
+ `_,
+ it will match this filter."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ tool_names: Optional[list[str]] = None,
+ read_only: Optional[bool] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class MCPToolRequireApproval(_Model):
+ """MCPToolRequireApproval.
+
+ :ivar always:
+ :vartype always: ~azure.ai.agentserver.responses.models.models.MCPToolFilter
+ :ivar never:
+ :vartype never: ~azure.ai.agentserver.responses.models.models.MCPToolFilter
+ """
+
+ always: Optional["_models.MCPToolFilter"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ never: Optional["_models.MCPToolFilter"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ always: Optional["_models.MCPToolFilter"] = None,
+ never: Optional["_models.MCPToolFilter"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class MemorySearchItem(_Model):
+ """A retrieved memory item from memory search.
+
+ :ivar memory_item: Retrieved memory item. Required.
+ :vartype memory_item: ~azure.ai.agentserver.responses.models.models.MemoryItem
+ """
+
+ memory_item: "_models.MemoryItem" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Retrieved memory item. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ memory_item: "_models.MemoryItem",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class MemorySearchOptions(_Model):
+ """Memory search options.
+
+ :ivar max_memories: Maximum number of memory items to return.
+ :vartype max_memories: int
+ """
+
+ max_memories: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Maximum number of memory items to return."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ max_memories: Optional[int] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class MemorySearchPreviewTool(Tool, discriminator="memory_search_preview"):
+ """A tool for integrating memories into the agent.
+
+ :ivar type: The type of the tool. Always ``memory_search_preview``. Required.
+ MEMORY_SEARCH_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MEMORY_SEARCH_PREVIEW
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar memory_store_name: The name of the memory store to use. Required.
+ :vartype memory_store_name: str
+ :ivar scope: The namespace used to group and isolate memories, such as a user ID. Limits which
+ memories can be retrieved or updated. Use special variable ``{{$userId}}`` to scope memories to
+ the current signed-in user. Required.
+ :vartype scope: str
+ :ivar search_options: Options for searching the memory store.
+ :vartype search_options: ~azure.ai.agentserver.responses.models.models.MemorySearchOptions
+ :ivar update_delay: Time to wait before updating memories after inactivity (seconds). Default
+ 300.
+ :vartype update_delay: int
+ """
+
+ type: Literal[ToolType.MEMORY_SEARCH_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the tool. Always ``memory_search_preview``. Required. MEMORY_SEARCH_PREVIEW."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ memory_store_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the memory store to use. Required."""
+ scope: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The namespace used to group and isolate memories, such as a user ID. Limits which memories can
+ be retrieved or updated. Use special variable ``{{$userId}}`` to scope memories to the current
+ signed-in user. Required."""
+ search_options: Optional["_models.MemorySearchOptions"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Options for searching the memory store."""
+ update_delay: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Time to wait before updating memories after inactivity (seconds). Default 300."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ memory_store_name: str,
+ scope: str,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ search_options: Optional["_models.MemorySearchOptions"] = None,
+ update_delay: Optional[int] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.MEMORY_SEARCH_PREVIEW # type: ignore
+
+
+class MemorySearchToolCallItemParam(Item, discriminator="memory_search_call"):
+ """MemorySearchToolCallItemParam.
+
+ :ivar type: Required. MEMORY_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MEMORY_SEARCH_CALL
+ :ivar results: The results returned from the memory search.
+ :vartype results: list[~azure.ai.agentserver.responses.models.models.MemorySearchItem]
+ """
+
+ type: Literal[ItemType.MEMORY_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. MEMORY_SEARCH_CALL."""
+ results: Optional[list["_models.MemorySearchItem"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The results returned from the memory search."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ results: Optional[list["_models.MemorySearchItem"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ItemType.MEMORY_SEARCH_CALL # type: ignore
+
+
+class MemorySearchToolCallItemResource(OutputItem, discriminator="memory_search_call"):
+ """MemorySearchToolCallItemResource.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. MEMORY_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MEMORY_SEARCH_CALL
+ :ivar status: The status of the memory search tool call. One of ``in_progress``, ``searching``,
+ ``completed``, ``incomplete`` or ``failed``,. Required. Is one of the following types:
+ Literal["in_progress"], Literal["searching"], Literal["completed"], Literal["incomplete"],
+ Literal["failed"]
+ :vartype status: str or str or str or str or str
+ :ivar results: The results returned from the memory search.
+ :vartype results: list[~azure.ai.agentserver.responses.models.models.MemorySearchItem]
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.MEMORY_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. MEMORY_SEARCH_CALL."""
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the memory search tool call. One of ``in_progress``, ``searching``,
+ ``completed``, ``incomplete`` or ``failed``,. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"searching\"], Literal[\"completed\"],
+ Literal[\"incomplete\"], Literal[\"failed\"]"""
+ results: Optional[list["_models.MemorySearchItem"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The results returned from the memory search."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ results: Optional[list["_models.MemorySearchItem"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.MEMORY_SEARCH_CALL # type: ignore
+
+
+class MessageContentInputFileContent(MessageContent, discriminator="input_file"):
+ """Input file.
+
+ :ivar type: The type of the input item. Always ``input_file``. Required. INPUT_FILE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.INPUT_FILE
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar filename: The name of the file to be sent to the model.
+ :vartype filename: str
+ :ivar file_url: The URL of the file to be sent to the model.
+ :vartype file_url: str
+ :ivar file_data: The content of the file to be sent to the model.
+ :vartype file_data: str
+ """
+
+ type: Literal[MessageContentType.INPUT_FILE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the input item. Always ``input_file``. Required. INPUT_FILE."""
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ filename: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the file to be sent to the model."""
+ file_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL of the file to be sent to the model."""
+ file_data: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content of the file to be sent to the model."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ file_id: Optional[str] = None,
+ filename: Optional[str] = None,
+ file_url: Optional[str] = None,
+ file_data: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.INPUT_FILE # type: ignore
+
+
+class MessageContentInputImageContent(MessageContent, discriminator="input_image"):
+ """Input image.
+
+ :ivar type: The type of the input item. Always ``input_image``. Required. INPUT_IMAGE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.INPUT_IMAGE
+ :ivar image_url:
+ :vartype image_url: str
+ :ivar file_id:
+ :vartype file_id: str
+ :ivar detail: The detail level of the image to be sent to the model. One of ``high``, ``low``,
+ or ``auto``. Defaults to ``auto``. Required. Known values are: "low", "high", and "auto".
+ :vartype detail: str or ~azure.ai.agentserver.responses.models.models.ImageDetail
+ """
+
+ type: Literal[MessageContentType.INPUT_IMAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the input item. Always ``input_image``. Required. INPUT_IMAGE."""
+ image_url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ file_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ detail: Union[str, "_models.ImageDetail"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The detail level of the image to be sent to the model. One of ``high``, ``low``, or ``auto``.
+ Defaults to ``auto``. Required. Known values are: \"low\", \"high\", and \"auto\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ detail: Union[str, "_models.ImageDetail"],
+ image_url: Optional[str] = None,
+ file_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.INPUT_IMAGE # type: ignore
+
+
+class MessageContentInputTextContent(MessageContent, discriminator="input_text"):
+ """Input text.
+
+ :ivar type: The type of the input item. Always ``input_text``. Required. INPUT_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.INPUT_TEXT
+ :ivar text: The text input to the model. Required.
+ :vartype text: str
+ """
+
+ type: Literal[MessageContentType.INPUT_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the input item. Always ``input_text``. Required. INPUT_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text input to the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.INPUT_TEXT # type: ignore
+
+
+class MessageContentOutputTextContent(MessageContent, discriminator="output_text"):
+ """Output text.
+
+ :ivar type: The type of the output text. Always ``output_text``. Required. OUTPUT_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OUTPUT_TEXT
+ :ivar text: The text output from the model. Required.
+ :vartype text: str
+ :ivar annotations: The annotations of the text output. Required.
+ :vartype annotations: list[~azure.ai.agentserver.responses.models.models.Annotation]
+ :ivar logprobs: Required.
+ :vartype logprobs: list[~azure.ai.agentserver.responses.models.models.LogProb]
+ """
+
+ type: Literal[MessageContentType.OUTPUT_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the output text. Always ``output_text``. Required. OUTPUT_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text output from the model. Required."""
+ annotations: list["_models.Annotation"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The annotations of the text output. Required."""
+ logprobs: list["_models.LogProb"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ annotations: list["_models.Annotation"],
+ logprobs: list["_models.LogProb"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.OUTPUT_TEXT # type: ignore
+
+
+class MessageContentReasoningTextContent(MessageContent, discriminator="reasoning_text"):
+ """Reasoning text.
+
+ :ivar type: The type of the reasoning text. Always ``reasoning_text``. Required.
+ REASONING_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REASONING_TEXT
+ :ivar text: The reasoning text from the model. Required.
+ :vartype text: str
+ """
+
+ type: Literal[MessageContentType.REASONING_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the reasoning text. Always ``reasoning_text``. Required. REASONING_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The reasoning text from the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.REASONING_TEXT # type: ignore
+
+
+class MessageContentRefusalContent(MessageContent, discriminator="refusal"):
+ """Refusal.
+
+ :ivar type: The type of the refusal. Always ``refusal``. Required. REFUSAL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REFUSAL
+ :ivar refusal: The refusal explanation from the model. Required.
+ :vartype refusal: str
+ """
+
+ type: Literal[MessageContentType.REFUSAL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the refusal. Always ``refusal``. Required. REFUSAL."""
+ refusal: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The refusal explanation from the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ refusal: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.REFUSAL # type: ignore
+
+
+class Metadata(_Model):
+ """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
+ additional information about the object in a structured format, and querying for objects via
+ API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are
+ strings with a maximum length of 512 characters.
+
+ """
+
+
+class MicrosoftFabricPreviewTool(Tool, discriminator="fabric_dataagent_preview"):
+ """The input definition information for a Microsoft Fabric tool as used to configure an agent.
+
+ :ivar type: The object type, which is always 'fabric_dataagent_preview'. Required.
+ FABRIC_DATAAGENT_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FABRIC_DATAAGENT_PREVIEW
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar fabric_dataagent_preview: The fabric data agent tool parameters. Required.
+ :vartype fabric_dataagent_preview:
+ ~azure.ai.agentserver.responses.models.models.FabricDataAgentToolParameters
+ """
+
+ type: Literal[ToolType.FABRIC_DATAAGENT_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'fabric_dataagent_preview'. Required.
+ FABRIC_DATAAGENT_PREVIEW."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ fabric_dataagent_preview: "_models.FabricDataAgentToolParameters" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The fabric data agent tool parameters. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ fabric_dataagent_preview: "_models.FabricDataAgentToolParameters",
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.FABRIC_DATAAGENT_PREVIEW # type: ignore
+
+
+class MoveParam(ComputerAction, discriminator="move"):
+ """Move.
+
+ :ivar type: Specifies the event type. For a move action, this property is always set to
+ ``move``. Required. MOVE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MOVE
+ :ivar x: The x-coordinate to move to. Required.
+ :vartype x: int
+ :ivar y: The y-coordinate to move to. Required.
+ :vartype y: int
+ """
+
+ type: Literal[ComputerActionType.MOVE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a move action, this property is always set to ``move``. Required.
+ MOVE."""
+ x: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The x-coordinate to move to. Required."""
+ y: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The y-coordinate to move to. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ x: int,
+ y: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.MOVE # type: ignore
+
+
+class OAuthConsentRequestOutputItem(OutputItem, discriminator="oauth_consent_request"):
+ """Request from the service for the user to perform OAuth consent.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar id: Required.
+ :vartype id: str
+ :ivar type: Required. OAUTH_CONSENT_REQUEST.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OAUTH_CONSENT_REQUEST
+ :ivar consent_link: The link the user can use to perform OAuth consent. Required.
+ :vartype consent_link: str
+ :ivar server_label: The server label for the OAuth consent request. Required.
+ :vartype server_label: str
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ type: Literal[OutputItemType.OAUTH_CONSENT_REQUEST] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. OAUTH_CONSENT_REQUEST."""
+ consent_link: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The link the user can use to perform OAuth consent. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The server label for the OAuth consent request. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ consent_link: str,
+ server_label: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.OAUTH_CONSENT_REQUEST # type: ignore
+
+
+class OpenApiAuthDetails(_Model):
+ """authentication details for OpenApiFunctionDefinition.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ OpenApiAnonymousAuthDetails, OpenApiManagedAuthDetails, OpenApiProjectConnectionAuthDetails
+
+ :ivar type: The type of authentication, must be anonymous/project_connection/managed_identity.
+ Required. Known values are: "anonymous", "project_connection", and "managed_identity".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OpenApiAuthType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """The type of authentication, must be anonymous/project_connection/managed_identity. Required.
+ Known values are: \"anonymous\", \"project_connection\", and \"managed_identity\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class OpenApiAnonymousAuthDetails(OpenApiAuthDetails, discriminator="anonymous"):
+ """Security details for OpenApi anonymous authentication.
+
+ :ivar type: The object type, which is always 'anonymous'. Required. ANONYMOUS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ANONYMOUS
+ """
+
+ type: Literal[OpenApiAuthType.ANONYMOUS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'anonymous'. Required. ANONYMOUS."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OpenApiAuthType.ANONYMOUS # type: ignore
+
+
+class OpenApiFunctionDefinition(_Model):
+ """The input definition information for an openapi function.
+
+ :ivar name: The name of the function to be called. Required.
+ :vartype name: str
+ :ivar description: A description of what the function does, used by the model to choose when
+ and how to call the function.
+ :vartype description: str
+ :ivar spec: The openapi function shape, described as a JSON Schema object. Required.
+ :vartype spec: dict[str, any]
+ :ivar auth: Open API authentication details. Required.
+ :vartype auth: ~azure.ai.agentserver.responses.models.models.OpenApiAuthDetails
+ :ivar default_params: List of OpenAPI spec parameters that will use user-provided defaults.
+ :vartype default_params: list[str]
+ :ivar functions: List of function definitions used by OpenApi tool.
+ :vartype functions:
+ list[~azure.ai.agentserver.responses.models.models.OpenApiFunctionDefinitionFunction]
+ """
+
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to be called. Required."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A description of what the function does, used by the model to choose when and how to call the
+ function."""
+ spec: dict[str, Any] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The openapi function shape, described as a JSON Schema object. Required."""
+ auth: "_models.OpenApiAuthDetails" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Open API authentication details. Required."""
+ default_params: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """List of OpenAPI spec parameters that will use user-provided defaults."""
+ functions: Optional[list["_models.OpenApiFunctionDefinitionFunction"]] = rest_field(visibility=["read"])
+ """List of function definitions used by OpenApi tool."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ spec: dict[str, Any],
+ auth: "_models.OpenApiAuthDetails",
+ description: Optional[str] = None,
+ default_params: Optional[list[str]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class OpenApiFunctionDefinitionFunction(_Model):
+ """OpenApiFunctionDefinitionFunction.
+
+ :ivar name: The name of the function to be called. Required.
+ :vartype name: str
+ :ivar description: A description of what the function does, used by the model to choose when
+ and how to call the function.
+ :vartype description: str
+ :ivar parameters: The parameters the functions accepts, described as a JSON Schema object.
+ Required.
+ :vartype parameters: dict[str, any]
+ """
+
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to be called. Required."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A description of what the function does, used by the model to choose when and how to call the
+ function."""
+ parameters: dict[str, Any] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The parameters the functions accepts, described as a JSON Schema object. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ parameters: dict[str, Any],
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class OpenApiManagedAuthDetails(OpenApiAuthDetails, discriminator="managed_identity"):
+ """Security details for OpenApi managed_identity authentication.
+
+ :ivar type: The object type, which is always 'managed_identity'. Required. MANAGED_IDENTITY.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MANAGED_IDENTITY
+ :ivar security_scheme: Connection auth security details. Required.
+ :vartype security_scheme:
+ ~azure.ai.agentserver.responses.models.models.OpenApiManagedSecurityScheme
+ """
+
+ type: Literal[OpenApiAuthType.MANAGED_IDENTITY] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'managed_identity'. Required. MANAGED_IDENTITY."""
+ security_scheme: "_models.OpenApiManagedSecurityScheme" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Connection auth security details. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ security_scheme: "_models.OpenApiManagedSecurityScheme",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OpenApiAuthType.MANAGED_IDENTITY # type: ignore
+
+
+class OpenApiManagedSecurityScheme(_Model):
+ """Security scheme for OpenApi managed_identity authentication.
+
+ :ivar audience: Authentication scope for managed_identity auth type. Required.
+ :vartype audience: str
+ """
+
+ audience: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Authentication scope for managed_identity auth type. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ audience: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class OpenApiProjectConnectionAuthDetails(OpenApiAuthDetails, discriminator="project_connection"):
+ """Security details for OpenApi project connection authentication.
+
+ :ivar type: The object type, which is always 'project_connection'. Required.
+ PROJECT_CONNECTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.PROJECT_CONNECTION
+ :ivar security_scheme: Project connection auth security details. Required.
+ :vartype security_scheme:
+ ~azure.ai.agentserver.responses.models.models.OpenApiProjectConnectionSecurityScheme
+ """
+
+ type: Literal[OpenApiAuthType.PROJECT_CONNECTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'project_connection'. Required. PROJECT_CONNECTION."""
+ security_scheme: "_models.OpenApiProjectConnectionSecurityScheme" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Project connection auth security details. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ security_scheme: "_models.OpenApiProjectConnectionSecurityScheme",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OpenApiAuthType.PROJECT_CONNECTION # type: ignore
+
+
+class OpenApiProjectConnectionSecurityScheme(_Model):
+ """Security scheme for OpenApi managed_identity authentication.
+
+ :ivar project_connection_id: Project connection id for Project Connection auth type. Required.
+ :vartype project_connection_id: str
+ """
+
+ project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Project connection id for Project Connection auth type. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class OpenApiTool(Tool, discriminator="openapi"):
+ """The input definition information for an OpenAPI tool as used to configure an agent.
+
+ :ivar type: The object type, which is always 'openapi'. Required. OPENAPI.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OPENAPI
+ :ivar openapi: The openapi function definition. Required.
+ :vartype openapi: ~azure.ai.agentserver.responses.models.models.OpenApiFunctionDefinition
+ """
+
+ type: Literal[ToolType.OPENAPI] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'openapi'. Required. OPENAPI."""
+ openapi: "_models.OpenApiFunctionDefinition" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The openapi function definition. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ openapi: "_models.OpenApiFunctionDefinition",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.OPENAPI # type: ignore
+
+
+class OpenApiToolCall(OutputItem, discriminator="openapi_call"):
+ """An OpenAPI tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. OPENAPI_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OPENAPI_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the OpenAPI operation being called. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.OPENAPI_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. OPENAPI_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the OpenAPI operation being called. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.OPENAPI_CALL # type: ignore
+
+
+class OpenApiToolCallOutput(OutputItem, discriminator="openapi_call_output"):
+ """The output of an OpenAPI tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. OPENAPI_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OPENAPI_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the OpenAPI operation that was called. Required.
+ :vartype name: str
+ :ivar output: The output from the OpenAPI tool call. Is one of the following types: {str: Any},
+ str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.OPENAPI_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. OPENAPI_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the OpenAPI operation that was called. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the OpenAPI tool call. Is one of the following types: {str: Any}, str, [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.OPENAPI_CALL_OUTPUT # type: ignore
+
+
+class OutputContent(_Model):
+ """OutputContent.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ OutputContentOutputTextContent, OutputContentReasoningTextContent, OutputContentRefusalContent
+
+ :ivar type: Required. Known values are: "output_text", "refusal", and "reasoning_text".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OutputContentType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"output_text\", \"refusal\", and \"reasoning_text\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class OutputContentOutputTextContent(OutputContent, discriminator="output_text"):
+ """Output text.
+
+ :ivar type: The type of the output text. Always ``output_text``. Required. OUTPUT_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OUTPUT_TEXT
+ :ivar text: The text output from the model. Required.
+ :vartype text: str
+ :ivar annotations: The annotations of the text output. Required.
+ :vartype annotations: list[~azure.ai.agentserver.responses.models.models.Annotation]
+ :ivar logprobs: Required.
+ :vartype logprobs: list[~azure.ai.agentserver.responses.models.models.LogProb]
+ """
+
+ type: Literal[OutputContentType.OUTPUT_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the output text. Always ``output_text``. Required. OUTPUT_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text output from the model. Required."""
+ annotations: list["_models.Annotation"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The annotations of the text output. Required."""
+ logprobs: list["_models.LogProb"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ annotations: list["_models.Annotation"],
+ logprobs: list["_models.LogProb"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputContentType.OUTPUT_TEXT # type: ignore
+
+
+class OutputContentReasoningTextContent(OutputContent, discriminator="reasoning_text"):
+ """Reasoning text.
+
+ :ivar type: The type of the reasoning text. Always ``reasoning_text``. Required.
+ REASONING_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REASONING_TEXT
+ :ivar text: The reasoning text from the model. Required.
+ :vartype text: str
+ """
+
+ type: Literal[OutputContentType.REASONING_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the reasoning text. Always ``reasoning_text``. Required. REASONING_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The reasoning text from the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputContentType.REASONING_TEXT # type: ignore
+
+
+class OutputContentRefusalContent(OutputContent, discriminator="refusal"):
+ """Refusal.
+
+ :ivar type: The type of the refusal. Always ``refusal``. Required. REFUSAL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REFUSAL
+ :ivar refusal: The refusal explanation from the model. Required.
+ :vartype refusal: str
+ """
+
+ type: Literal[OutputContentType.REFUSAL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the refusal. Always ``refusal``. Required. REFUSAL."""
+ refusal: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The refusal explanation from the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ refusal: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputContentType.REFUSAL # type: ignore
+
+
+class OutputItemApplyPatchToolCall(OutputItem, discriminator="apply_patch_call"):
+ """Apply patch tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``apply_patch_call``. Required. APPLY_PATCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH_CALL
+ :ivar id: The unique ID of the apply patch tool call. Populated when this item is returned via
+ API. Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the apply patch tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the apply patch tool call. One of ``in_progress`` or ``completed``.
+ Required. Known values are: "in_progress" and "completed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ApplyPatchCallStatus
+ :ivar operation: Apply patch operation. Required.
+ :vartype operation: ~azure.ai.agentserver.responses.models.models.ApplyPatchFileOperation
+ """
+
+ type: Literal[OutputItemType.APPLY_PATCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``apply_patch_call``. Required. APPLY_PATCH_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call. Populated when this item is returned via API.
+ Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call generated by the model. Required."""
+ status: Union[str, "_models.ApplyPatchCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the apply patch tool call. One of ``in_progress`` or ``completed``. Required.
+ Known values are: \"in_progress\" and \"completed\"."""
+ operation: "_models.ApplyPatchFileOperation" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Apply patch operation. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ status: Union[str, "_models.ApplyPatchCallStatus"],
+ operation: "_models.ApplyPatchFileOperation",
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.APPLY_PATCH_CALL # type: ignore
+
+
+class OutputItemApplyPatchToolCallOutput(OutputItem, discriminator="apply_patch_call_output"):
+ """Apply patch tool call output.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``apply_patch_call_output``. Required.
+ APPLY_PATCH_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH_CALL_OUTPUT
+ :ivar id: The unique ID of the apply patch tool call output. Populated when this item is
+ returned via API. Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the apply patch tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the apply patch tool call output. One of ``completed`` or
+ ``failed``. Required. Known values are: "completed" and "failed".
+ :vartype status: str or
+ ~azure.ai.agentserver.responses.models.models.ApplyPatchCallOutputStatus
+ :ivar output:
+ :vartype output: str
+ """
+
+ type: Literal[OutputItemType.APPLY_PATCH_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``apply_patch_call_output``. Required. APPLY_PATCH_CALL_OUTPUT."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call output. Populated when this item is returned via
+ API. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the apply patch tool call generated by the model. Required."""
+ status: Union[str, "_models.ApplyPatchCallOutputStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the apply patch tool call output. One of ``completed`` or ``failed``. Required.
+ Known values are: \"completed\" and \"failed\"."""
+ output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ status: Union[str, "_models.ApplyPatchCallOutputStatus"],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.APPLY_PATCH_CALL_OUTPUT # type: ignore
+
+
+class OutputItemCodeInterpreterToolCall(OutputItem, discriminator="code_interpreter_call"):
+ """Code interpreter tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the code interpreter tool call. Always ``code_interpreter_call``.
+ Required. CODE_INTERPRETER_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CODE_INTERPRETER_CALL
+ :ivar id: The unique ID of the code interpreter tool call. Required.
+ :vartype id: str
+ :ivar status: The status of the code interpreter tool call. Valid values are ``in_progress``,
+ ``completed``, ``incomplete``, ``interpreting``, and ``failed``. Required. Is one of the
+ following types: Literal["in_progress"], Literal["completed"], Literal["incomplete"],
+ Literal["interpreting"], Literal["failed"]
+ :vartype status: str or str or str or str or str
+ :ivar container_id: The ID of the container used to run the code. Required.
+ :vartype container_id: str
+ :ivar code: Required.
+ :vartype code: str
+ :ivar outputs: Required.
+ :vartype outputs: list[~azure.ai.agentserver.responses.models.models.CodeInterpreterOutputLogs
+ or ~azure.ai.agentserver.responses.models.models.CodeInterpreterOutputImage]
+ """
+
+ type: Literal[OutputItemType.CODE_INTERPRETER_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the code interpreter tool call. Always ``code_interpreter_call``. Required.
+ CODE_INTERPRETER_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the code interpreter tool call. Required."""
+ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the code interpreter tool call. Valid values are ``in_progress``, ``completed``,
+ ``incomplete``, ``interpreting``, and ``failed``. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"],
+ Literal[\"interpreting\"], Literal[\"failed\"]"""
+ container_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the container used to run the code. Required."""
+ code: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ outputs: list[Union["_models.CodeInterpreterOutputLogs", "_models.CodeInterpreterOutputImage"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"],
+ container_id: str,
+ code: str,
+ outputs: list[Union["_models.CodeInterpreterOutputLogs", "_models.CodeInterpreterOutputImage"]],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.CODE_INTERPRETER_CALL # type: ignore
+
+
+class OutputItemCompactionBody(OutputItem, discriminator="compaction"):
+ """Compaction item.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``compaction``. Required. COMPACTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPACTION
+ :ivar id: The unique ID of the compaction item. Required.
+ :vartype id: str
+ :ivar encrypted_content: The encrypted content that was produced by compaction. Required.
+ :vartype encrypted_content: str
+ """
+
+ type: Literal[OutputItemType.COMPACTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``compaction``. Required. COMPACTION."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the compaction item. Required."""
+ encrypted_content: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The encrypted content that was produced by compaction. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ encrypted_content: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.COMPACTION # type: ignore
+
+
+class OutputItemComputerToolCall(OutputItem, discriminator="computer_call"):
+ """Computer tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the computer call. Always ``computer_call``. Required. COMPUTER_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_CALL
+ :ivar id: The unique ID of the computer call. Required.
+ :vartype id: str
+ :ivar call_id: An identifier used when responding to the tool call with output. Required.
+ :vartype call_id: str
+ :ivar action: Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.ComputerAction
+ :ivar pending_safety_checks: The pending safety checks for the computer call. Required.
+ :vartype pending_safety_checks:
+ list[~azure.ai.agentserver.responses.models.models.ComputerCallSafetyCheckParam]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[OutputItemType.COMPUTER_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the computer call. Always ``computer_call``. Required. COMPUTER_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the computer call. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An identifier used when responding to the tool call with output. Required."""
+ action: "_models.ComputerAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ pending_safety_checks: list["_models.ComputerCallSafetyCheckParam"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The pending safety checks for the computer call. Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.ComputerAction",
+ pending_safety_checks: list["_models.ComputerCallSafetyCheckParam"],
+ status: Literal["in_progress", "completed", "incomplete"],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.COMPUTER_CALL # type: ignore
+
+
+class OutputItemComputerToolCallOutputResource(OutputItem, discriminator="computer_call_output"):
+ """OutputItemComputerToolCallOutputResource.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the computer tool call output. Always ``computer_call_output``.
+ Required. COMPUTER_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_CALL_OUTPUT
+ :ivar id: The ID of the computer tool call output.
+ :vartype id: str
+ :ivar call_id: The ID of the computer tool call that produced the output. Required.
+ :vartype call_id: str
+ :ivar acknowledged_safety_checks: The safety checks reported by the API that have been
+ acknowledged by the developer.
+ :vartype acknowledged_safety_checks:
+ list[~azure.ai.agentserver.responses.models.models.ComputerCallSafetyCheckParam]
+ :ivar output: Required.
+ :vartype output: ~azure.ai.agentserver.responses.models.models.ComputerScreenshotImage
+ :ivar status: The status of the message input. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Populated when input items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[OutputItemType.COMPUTER_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the computer tool call output. Always ``computer_call_output``. Required.
+ COMPUTER_CALL_OUTPUT."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the computer tool call output."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the computer tool call that produced the output. Required."""
+ acknowledged_safety_checks: Optional[list["_models.ComputerCallSafetyCheckParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The safety checks reported by the API that have been acknowledged by the developer."""
+ output: "_models.ComputerScreenshotImage" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the message input. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when input items are returned via API. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: "_models.ComputerScreenshotImage",
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ acknowledged_safety_checks: Optional[list["_models.ComputerCallSafetyCheckParam"]] = None,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.COMPUTER_CALL_OUTPUT # type: ignore
+
+
+class OutputItemCustomToolCall(OutputItem, discriminator="custom_tool_call"):
+ """Custom tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the custom tool call. Always ``custom_tool_call``. Required.
+ CUSTOM_TOOL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM_TOOL_CALL
+ :ivar id: The unique ID of the custom tool call in the OpenAI platform.
+ :vartype id: str
+ :ivar call_id: An identifier used to map this custom tool call to a tool call output. Required.
+ :vartype call_id: str
+ :ivar name: The name of the custom tool being called. Required.
+ :vartype name: str
+ :ivar input: The input for the custom tool call generated by the model. Required.
+ :vartype input: str
+ """
+
+ type: Literal[OutputItemType.CUSTOM_TOOL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the custom tool call. Always ``custom_tool_call``. Required. CUSTOM_TOOL_CALL."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the custom tool call in the OpenAI platform."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An identifier used to map this custom tool call to a tool call output. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the custom tool being called. Required."""
+ input: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The input for the custom tool call generated by the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ input: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.CUSTOM_TOOL_CALL # type: ignore
+
+
+class OutputItemCustomToolCallOutput(OutputItem, discriminator="custom_tool_call_output"):
+ """Custom tool call output.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the custom tool call output. Always ``custom_tool_call_output``.
+ Required. CUSTOM_TOOL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM_TOOL_CALL_OUTPUT
+ :ivar id: The unique ID of the custom tool call output in the OpenAI platform.
+ :vartype id: str
+ :ivar call_id: The call ID, used to map this custom tool call output to a custom tool call.
+ Required.
+ :vartype call_id: str
+ :ivar output: The output from the custom tool call generated by your code. Can be a string or
+ an list of output content. Required. Is either a str type or a
+ [FunctionAndCustomToolCallOutput] type.
+ :vartype output: str or
+ list[~azure.ai.agentserver.responses.models.models.FunctionAndCustomToolCallOutput]
+ """
+
+ type: Literal[OutputItemType.CUSTOM_TOOL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the custom tool call output. Always ``custom_tool_call_output``. Required.
+ CUSTOM_TOOL_CALL_OUTPUT."""
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the custom tool call output in the OpenAI platform."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The call ID, used to map this custom tool call output to a custom tool call. Required."""
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the custom tool call generated by your code. Can be a string or an list of
+ output content. Required. Is either a str type or a [FunctionAndCustomToolCallOutput] type."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ output: Union[str, list["_models.FunctionAndCustomToolCallOutput"]],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.CUSTOM_TOOL_CALL_OUTPUT # type: ignore
+
+
+class OutputItemFileSearchToolCall(OutputItem, discriminator="file_search_call"):
+ """File search tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar id: The unique ID of the file search tool call. Required.
+ :vartype id: str
+ :ivar type: The type of the file search tool call. Always ``file_search_call``. Required.
+ FILE_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FILE_SEARCH_CALL
+ :ivar status: The status of the file search tool call. One of ``in_progress``, ``searching``,
+ ``incomplete`` or ``failed``,. Required. Is one of the following types: Literal["in_progress"],
+ Literal["searching"], Literal["completed"], Literal["incomplete"], Literal["failed"]
+ :vartype status: str or str or str or str or str
+ :ivar queries: The queries used to search for files. Required.
+ :vartype queries: list[str]
+ :ivar results:
+ :vartype results: list[~azure.ai.agentserver.responses.models.models.FileSearchToolCallResults]
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the file search tool call. Required."""
+ type: Literal[OutputItemType.FILE_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the file search tool call. Always ``file_search_call``. Required. FILE_SEARCH_CALL."""
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the file search tool call. One of ``in_progress``, ``searching``, ``incomplete``
+ or ``failed``,. Required. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"searching\"], Literal[\"completed\"], Literal[\"incomplete\"], Literal[\"failed\"]"""
+ queries: list[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The queries used to search for files. Required."""
+ results: Optional[list["_models.FileSearchToolCallResults"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "searching", "completed", "incomplete", "failed"],
+ queries: list[str],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ results: Optional[list["_models.FileSearchToolCallResults"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.FILE_SEARCH_CALL # type: ignore
+
+
+class OutputItemFunctionShellCall(OutputItem, discriminator="shell_call"):
+ """Shell tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``shell_call``. Required. SHELL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL_CALL
+ :ivar id: The unique ID of the shell tool call. Populated when this item is returned via API.
+ Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar action: The shell commands and limits that describe how to run the tool call. Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.FunctionShellAction
+ :ivar status: The status of the shell call. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Required. Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.LocalShellCallStatus
+ :ivar environment: Required.
+ :vartype environment:
+ ~azure.ai.agentserver.responses.models.models.FunctionShellCallEnvironment
+ """
+
+ type: Literal[OutputItemType.SHELL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``shell_call``. Required. SHELL_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call. Populated when this item is returned via API. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call generated by the model. Required."""
+ action: "_models.FunctionShellAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The shell commands and limits that describe how to run the tool call. Required."""
+ status: Union[str, "_models.LocalShellCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the shell call. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Required. Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+ environment: "_models.FunctionShellCallEnvironment" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.FunctionShellAction",
+ status: Union[str, "_models.LocalShellCallStatus"],
+ environment: "_models.FunctionShellCallEnvironment",
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.SHELL_CALL # type: ignore
+
+
+class OutputItemFunctionShellCallOutput(OutputItem, discriminator="shell_call_output"):
+ """Shell call output.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the shell call output. Always ``shell_call_output``. Required.
+ SHELL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL_CALL_OUTPUT
+ :ivar id: The unique ID of the shell call output. Populated when this item is returned via API.
+ Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar status: The status of the shell call output. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Required. Known values are: "in_progress", "completed", and "incomplete".
+ :vartype status: str or
+ ~azure.ai.agentserver.responses.models.models.LocalShellCallOutputStatusEnum
+ :ivar output: An array of shell call output contents. Required.
+ :vartype output:
+ list[~azure.ai.agentserver.responses.models.models.FunctionShellCallOutputContent]
+ :ivar max_output_length: Required.
+ :vartype max_output_length: int
+ """
+
+ type: Literal[OutputItemType.SHELL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the shell call output. Always ``shell_call_output``. Required. SHELL_CALL_OUTPUT."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell call output. Populated when this item is returned via API. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the shell tool call generated by the model. Required."""
+ status: Union[str, "_models.LocalShellCallOutputStatusEnum"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the shell call output. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Required. Known values are: \"in_progress\", \"completed\", and \"incomplete\"."""
+ output: list["_models.FunctionShellCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """An array of shell call output contents. Required."""
+ max_output_length: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ status: Union[str, "_models.LocalShellCallOutputStatusEnum"],
+ output: list["_models.FunctionShellCallOutputContent"],
+ max_output_length: int,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.SHELL_CALL_OUTPUT # type: ignore
+
+
+class OutputItemFunctionToolCall(OutputItem, discriminator="function_call"):
+ """Function tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar id: The unique ID of the function tool call.
+ :vartype id: str
+ :ivar type: The type of the function tool call. Always ``function_call``. Required.
+ FUNCTION_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION_CALL
+ :ivar call_id: The unique ID of the function tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar name: The name of the function to run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments to pass to the function. Required.
+ :vartype arguments: str
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call."""
+ type: Literal[OutputItemType.FUNCTION_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the function tool call. Always ``function_call``. Required. FUNCTION_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the function tool call generated by the model. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the function. Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ name: str,
+ arguments: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ id: Optional[str] = None, # pylint: disable=redefined-builtin
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.FUNCTION_CALL # type: ignore
+
+
+class OutputItemImageGenToolCall(OutputItem, discriminator="image_generation_call"):
+ """Image generation call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the image generation call. Always ``image_generation_call``. Required.
+ IMAGE_GENERATION_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.IMAGE_GENERATION_CALL
+ :ivar id: The unique ID of the image generation call. Required.
+ :vartype id: str
+ :ivar status: The status of the image generation call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["generating"], Literal["failed"]
+ :vartype status: str or str or str or str
+ :ivar result: Required.
+ :vartype result: str
+ """
+
+ type: Literal[OutputItemType.IMAGE_GENERATION_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the image generation call. Always ``image_generation_call``. Required.
+ IMAGE_GENERATION_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the image generation call. Required."""
+ status: Literal["in_progress", "completed", "generating", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the image generation call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"generating\"], Literal[\"failed\"]"""
+ result: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "completed", "generating", "failed"],
+ result: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.IMAGE_GENERATION_CALL # type: ignore
+
+
+class OutputItemLocalShellToolCall(OutputItem, discriminator="local_shell_call"):
+ """Local shell call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the local shell call. Always ``local_shell_call``. Required.
+ LOCAL_SHELL_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL_SHELL_CALL
+ :ivar id: The unique ID of the local shell call. Required.
+ :vartype id: str
+ :ivar call_id: The unique ID of the local shell tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar action: Required.
+ :vartype action: ~azure.ai.agentserver.responses.models.models.LocalShellExecAction
+ :ivar status: The status of the local shell call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[OutputItemType.LOCAL_SHELL_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the local shell call. Always ``local_shell_call``. Required. LOCAL_SHELL_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell call. Required."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell tool call generated by the model. Required."""
+ action: "_models.LocalShellExecAction" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the local shell call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ call_id: str,
+ action: "_models.LocalShellExecAction",
+ status: Literal["in_progress", "completed", "incomplete"],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.LOCAL_SHELL_CALL # type: ignore
+
+
+class OutputItemLocalShellToolCallOutput(OutputItem, discriminator="local_shell_call_output"):
+ """Local shell call output.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the local shell tool call output. Always ``local_shell_call_output``.
+ Required. LOCAL_SHELL_CALL_OUTPUT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.LOCAL_SHELL_CALL_OUTPUT
+ :ivar id: The unique ID of the local shell tool call generated by the model. Required.
+ :vartype id: str
+ :ivar output: A JSON string of the output of the local shell tool call. Required.
+ :vartype output: str
+ :ivar status: Is one of the following types: Literal["in_progress"], Literal["completed"],
+ Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[OutputItemType.LOCAL_SHELL_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the local shell tool call output. Always ``local_shell_call_output``. Required.
+ LOCAL_SHELL_CALL_OUTPUT."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the local shell tool call generated by the model. Required."""
+ output: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the output of the local shell tool call. Required."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"in_progress\"], Literal[\"completed\"],
+ Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ output: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.LOCAL_SHELL_CALL_OUTPUT # type: ignore
+
+
+class OutputItemMcpApprovalRequest(OutputItem, discriminator="mcp_approval_request"):
+ """MCP approval request.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``mcp_approval_request``. Required.
+ MCP_APPROVAL_REQUEST.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_APPROVAL_REQUEST
+ :ivar id: The unique ID of the approval request. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server making the request. Required.
+ :vartype server_label: str
+ :ivar name: The name of the tool to run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of arguments for the tool. Required.
+ :vartype arguments: str
+ """
+
+ type: Literal[OutputItemType.MCP_APPROVAL_REQUEST] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_approval_request``. Required. MCP_APPROVAL_REQUEST."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the approval request. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server making the request. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the tool to run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of arguments for the tool. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ name: str,
+ arguments: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.MCP_APPROVAL_REQUEST # type: ignore
+
+
+class OutputItemMcpApprovalResponseResource(OutputItem, discriminator="mcp_approval_response"):
+ """MCP approval response.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``mcp_approval_response``. Required.
+ MCP_APPROVAL_RESPONSE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_APPROVAL_RESPONSE
+ :ivar id: The unique ID of the approval response. Required.
+ :vartype id: str
+ :ivar approval_request_id: The ID of the approval request being answered. Required.
+ :vartype approval_request_id: str
+ :ivar approve: Whether the request was approved. Required.
+ :vartype approve: bool
+ :ivar reason:
+ :vartype reason: str
+ """
+
+ type: Literal[OutputItemType.MCP_APPROVAL_RESPONSE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_approval_response``. Required. MCP_APPROVAL_RESPONSE."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the approval response. Required."""
+ approval_request_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the approval request being answered. Required."""
+ approve: bool = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Whether the request was approved. Required."""
+ reason: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ approval_request_id: str,
+ approve: bool,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ reason: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.MCP_APPROVAL_RESPONSE # type: ignore
+
+
+class OutputItemMcpListTools(OutputItem, discriminator="mcp_list_tools"):
+ """MCP list tools.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``mcp_list_tools``. Required. MCP_LIST_TOOLS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_LIST_TOOLS
+ :ivar id: The unique ID of the list. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server. Required.
+ :vartype server_label: str
+ :ivar tools: The tools available on the server. Required.
+ :vartype tools: list[~azure.ai.agentserver.responses.models.models.MCPListToolsTool]
+ :ivar error:
+ :vartype error: str
+ """
+
+ type: Literal[OutputItemType.MCP_LIST_TOOLS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_list_tools``. Required. MCP_LIST_TOOLS."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the list. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server. Required."""
+ tools: list["_models.MCPListToolsTool"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The tools available on the server. Required."""
+ error: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ tools: list["_models.MCPListToolsTool"],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ error: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.MCP_LIST_TOOLS # type: ignore
+
+
+class OutputItemMcpToolCall(OutputItem, discriminator="mcp_call"):
+ """MCP tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the item. Always ``mcp_call``. Required. MCP_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP_CALL
+ :ivar id: The unique ID of the tool call. Required.
+ :vartype id: str
+ :ivar server_label: The label of the MCP server running the tool. Required.
+ :vartype server_label: str
+ :ivar name: The name of the tool that was run. Required.
+ :vartype name: str
+ :ivar arguments: A JSON string of the arguments passed to the tool. Required.
+ :vartype arguments: str
+ :ivar output:
+ :vartype output: str
+ :ivar error:
+ :vartype error: dict[str, any]
+ :ivar status: The status of the tool call. One of ``in_progress``, ``completed``,
+ ``incomplete``, ``calling``, or ``failed``. Known values are: "in_progress", "completed",
+ "incomplete", "calling", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.MCPToolCallStatus
+ :ivar approval_request_id:
+ :vartype approval_request_id: str
+ """
+
+ type: Literal[OutputItemType.MCP_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the item. Always ``mcp_call``. Required. MCP_CALL."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call. Required."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server running the tool. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the tool that was run. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments passed to the tool. Required."""
+ output: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ error: Optional[dict[str, Any]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ status: Optional[Union[str, "_models.MCPToolCallStatus"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. One of ``in_progress``, ``completed``, ``incomplete``,
+ ``calling``, or ``failed``. Known values are: \"in_progress\", \"completed\", \"incomplete\",
+ \"calling\", and \"failed\"."""
+ approval_request_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ server_label: str,
+ name: str,
+ arguments: str,
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional[str] = None,
+ error: Optional[dict[str, Any]] = None,
+ status: Optional[Union[str, "_models.MCPToolCallStatus"]] = None,
+ approval_request_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.MCP_CALL # type: ignore
+
+
+class OutputItemMessage(OutputItem, discriminator="message"):
+ """Message.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the message. Always set to ``message``. Required. MESSAGE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MESSAGE
+ :ivar id: The unique ID of the message. Required.
+ :vartype id: str
+ :ivar status: The status of item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Required. Known values are: "in_progress",
+ "completed", and "incomplete".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.MessageStatus
+ :ivar role: The role of the message. One of ``unknown``, ``user``, ``assistant``, ``system``,
+ ``critic``, ``discriminator``, ``developer``, or ``tool``. Required. Known values are:
+ "unknown", "user", "assistant", "system", "critic", "discriminator", "developer", and "tool".
+ :vartype role: str or ~azure.ai.agentserver.responses.models.models.MessageRole
+ :ivar content: The content of the message. Required.
+ :vartype content: list[~azure.ai.agentserver.responses.models.models.MessageContent]
+ """
+
+ type: Literal[OutputItemType.MESSAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the message. Always set to ``message``. Required. MESSAGE."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the message. Required."""
+ status: Union[str, "_models.MessageStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The status of item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated when
+ items are returned via API. Required. Known values are: \"in_progress\", \"completed\", and
+ \"incomplete\"."""
+ role: Union[str, "_models.MessageRole"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The role of the message. One of ``unknown``, ``user``, ``assistant``, ``system``, ``critic``,
+ ``discriminator``, ``developer``, or ``tool``. Required. Known values are: \"unknown\",
+ \"user\", \"assistant\", \"system\", \"critic\", \"discriminator\", \"developer\", and
+ \"tool\"."""
+ content: list["_models.MessageContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content of the message. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Union[str, "_models.MessageStatus"],
+ role: Union[str, "_models.MessageRole"],
+ content: list["_models.MessageContent"],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.MESSAGE # type: ignore
+
+
+class OutputItemOutputMessage(OutputItem, discriminator="output_message"):
+ """Output message.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar id: The unique ID of the output message. Required.
+ :vartype id: str
+ :ivar type: The type of the output message. Always ``message``. Required. OUTPUT_MESSAGE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OUTPUT_MESSAGE
+ :ivar role: The role of the output message. Always ``assistant``. Required. Default value is
+ "assistant".
+ :vartype role: str
+ :ivar content: The content of the output message. Required.
+ :vartype content: list[~azure.ai.agentserver.responses.models.models.OutputMessageContent]
+ :ivar status: The status of the message input. One of ``in_progress``, ``completed``, or
+ ``incomplete``. Populated when input items are returned via API. Required. Is one of the
+ following types: Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the output message. Required."""
+ type: Literal[OutputItemType.OUTPUT_MESSAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the output message. Always ``message``. Required. OUTPUT_MESSAGE."""
+ role: Literal["assistant"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The role of the output message. Always ``assistant``. Required. Default value is \"assistant\"."""
+ content: list["_models.OutputMessageContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The content of the output message. Required."""
+ status: Literal["in_progress", "completed", "incomplete"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the message input. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when input items are returned via API. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ content: list["_models.OutputMessageContent"],
+ status: Literal["in_progress", "completed", "incomplete"],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.OUTPUT_MESSAGE # type: ignore
+ self.role: Literal["assistant"] = "assistant"
+
+
+class OutputItemReasoningItem(OutputItem, discriminator="reasoning"):
+ """Reasoning.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: The type of the object. Always ``reasoning``. Required. REASONING.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REASONING
+ :ivar id: The unique identifier of the reasoning content. Required.
+ :vartype id: str
+ :ivar encrypted_content:
+ :vartype encrypted_content: str
+ :ivar summary: Reasoning summary content. Required.
+ :vartype summary: list[~azure.ai.agentserver.responses.models.models.SummaryTextContent]
+ :ivar content: Reasoning text content.
+ :vartype content: list[~azure.ai.agentserver.responses.models.models.ReasoningTextContent]
+ :ivar status: The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``.
+ Populated when items are returned via API. Is one of the following types:
+ Literal["in_progress"], Literal["completed"], Literal["incomplete"]
+ :vartype status: str or str or str
+ """
+
+ type: Literal[OutputItemType.REASONING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the object. Always ``reasoning``. Required. REASONING."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the reasoning content. Required."""
+ encrypted_content: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ summary: list["_models.SummaryTextContent"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Reasoning summary content. Required."""
+ content: Optional[list["_models.ReasoningTextContent"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Reasoning text content."""
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the item. One of ``in_progress``, ``completed``, or ``incomplete``. Populated
+ when items are returned via API. Is one of the following types: Literal[\"in_progress\"],
+ Literal[\"completed\"], Literal[\"incomplete\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ summary: list["_models.SummaryTextContent"],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ encrypted_content: Optional[str] = None,
+ content: Optional[list["_models.ReasoningTextContent"]] = None,
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.REASONING # type: ignore
+
+
+class OutputItemWebSearchToolCall(OutputItem, discriminator="web_search_call"):
+ """Web search tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar id: The unique ID of the web search tool call. Required.
+ :vartype id: str
+ :ivar type: The type of the web search tool call. Always ``web_search_call``. Required.
+ WEB_SEARCH_CALL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WEB_SEARCH_CALL
+ :ivar status: The status of the web search tool call. Required. Is one of the following types:
+ Literal["in_progress"], Literal["searching"], Literal["completed"], Literal["failed"]
+ :vartype status: str or str or str or str
+ :ivar action: An object describing the specific action taken in this web search call. Includes
+ details on how the model used the web (search, open_page, find_in_page). Required. Is one of
+ the following types: WebSearchActionSearch, WebSearchActionOpenPage, WebSearchActionFind
+ :vartype action: ~azure.ai.agentserver.responses.models.models.WebSearchActionSearch or
+ ~azure.ai.agentserver.responses.models.models.WebSearchActionOpenPage or
+ ~azure.ai.agentserver.responses.models.models.WebSearchActionFind
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the web search tool call. Required."""
+ type: Literal[OutputItemType.WEB_SEARCH_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the web search tool call. Always ``web_search_call``. Required. WEB_SEARCH_CALL."""
+ status: Literal["in_progress", "searching", "completed", "failed"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the web search tool call. Required. Is one of the following types:
+ Literal[\"in_progress\"], Literal[\"searching\"], Literal[\"completed\"], Literal[\"failed\"]"""
+ action: Union["_models.WebSearchActionSearch", "_models.WebSearchActionOpenPage", "_models.WebSearchActionFind"] = (
+ rest_field(visibility=["read", "create", "update", "delete", "query"])
+ )
+ """An object describing the specific action taken in this web search call. Includes details on how
+ the model used the web (search, open_page, find_in_page). Required. Is one of the following
+ types: WebSearchActionSearch, WebSearchActionOpenPage, WebSearchActionFind"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ status: Literal["in_progress", "searching", "completed", "failed"],
+ action: Union[
+ "_models.WebSearchActionSearch", "_models.WebSearchActionOpenPage", "_models.WebSearchActionFind"
+ ],
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.WEB_SEARCH_CALL # type: ignore
+
+
+class OutputMessageContent(_Model):
+ """OutputMessageContent.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ OutputMessageContentOutputTextContent, OutputMessageContentRefusalContent
+
+ :ivar type: Required. Known values are: "output_text" and "refusal".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OutputMessageContentType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"output_text\" and \"refusal\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class OutputMessageContentOutputTextContent(OutputMessageContent, discriminator="output_text"):
+ """Output text.
+
+ :ivar type: The type of the output text. Always ``output_text``. Required. OUTPUT_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.OUTPUT_TEXT
+ :ivar text: The text output from the model. Required.
+ :vartype text: str
+ :ivar annotations: The annotations of the text output. Required.
+ :vartype annotations: list[~azure.ai.agentserver.responses.models.models.Annotation]
+ :ivar logprobs: Required.
+ :vartype logprobs: list[~azure.ai.agentserver.responses.models.models.LogProb]
+ """
+
+ type: Literal[OutputMessageContentType.OUTPUT_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the output text. Always ``output_text``. Required. OUTPUT_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text output from the model. Required."""
+ annotations: list["_models.Annotation"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The annotations of the text output. Required."""
+ logprobs: list["_models.LogProb"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ annotations: list["_models.Annotation"],
+ logprobs: list["_models.LogProb"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputMessageContentType.OUTPUT_TEXT # type: ignore
+
+
+class OutputMessageContentRefusalContent(OutputMessageContent, discriminator="refusal"):
+ """Refusal.
+
+ :ivar type: The type of the refusal. Always ``refusal``. Required. REFUSAL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.REFUSAL
+ :ivar refusal: The refusal explanation from the model. Required.
+ :vartype refusal: str
+ """
+
+ type: Literal[OutputMessageContentType.REFUSAL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the refusal. Always ``refusal``. Required. REFUSAL."""
+ refusal: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The refusal explanation from the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ refusal: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputMessageContentType.REFUSAL # type: ignore
+
+
+class Prompt(_Model):
+ """Reference to a prompt template and its variables. `Learn more
+ `_.
+
+ :ivar id: The unique identifier of the prompt template to use. Required.
+ :vartype id: str
+ :ivar version:
+ :vartype version: str
+ :ivar variables:
+ :vartype variables: ~azure.ai.agentserver.responses.models.models.ResponsePromptVariables
+ """
+
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the prompt template to use. Required."""
+ version: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ variables: Optional["_models.ResponsePromptVariables"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+
+ @overload
+ def __init__(
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ version: Optional[str] = None,
+ variables: Optional["_models.ResponsePromptVariables"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class RankingOptions(_Model):
+ """RankingOptions.
+
+ :ivar ranker: The ranker to use for the file search. Known values are: "auto" and
+ "default-2024-11-15".
+ :vartype ranker: str or ~azure.ai.agentserver.responses.models.models.RankerVersionType
+ :ivar score_threshold: The score threshold for the file search, a number between 0 and 1.
+ Numbers closer to 1 will attempt to return only the most relevant results, but may return fewer
+ results.
+ :vartype score_threshold: int
+ :ivar hybrid_search: Weights that control how reciprocal rank fusion balances semantic
+ embedding matches versus sparse keyword matches when hybrid search is enabled.
+ :vartype hybrid_search: ~azure.ai.agentserver.responses.models.models.HybridSearchOptions
+ """
+
+ ranker: Optional[Union[str, "_models.RankerVersionType"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The ranker to use for the file search. Known values are: \"auto\" and \"default-2024-11-15\"."""
+ score_threshold: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will
+ attempt to return only the most relevant results, but may return fewer results."""
+ hybrid_search: Optional["_models.HybridSearchOptions"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Weights that control how reciprocal rank fusion balances semantic embedding matches versus
+ sparse keyword matches when hybrid search is enabled."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ ranker: Optional[Union[str, "_models.RankerVersionType"]] = None,
+ score_threshold: Optional[int] = None,
+ hybrid_search: Optional["_models.HybridSearchOptions"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class Reasoning(_Model):
+ """Reasoning.
+
+ :ivar effort: Is one of the following types: Literal["none"], Literal["minimal"],
+ Literal["low"], Literal["medium"], Literal["high"], Literal["xhigh"]
+ :vartype effort: str or str or str or str or str or str
+ :ivar summary: Is one of the following types: Literal["auto"], Literal["concise"],
+ Literal["detailed"]
+ :vartype summary: str or str or str
+ :ivar generate_summary: Is one of the following types: Literal["auto"], Literal["concise"],
+ Literal["detailed"]
+ :vartype generate_summary: str or str or str
+ """
+
+ effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"none\"], Literal[\"minimal\"], Literal[\"low\"],
+ Literal[\"medium\"], Literal[\"high\"], Literal[\"xhigh\"]"""
+ summary: Optional[Literal["auto", "concise", "detailed"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"auto\"], Literal[\"concise\"], Literal[\"detailed\"]"""
+ generate_summary: Optional[Literal["auto", "concise", "detailed"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"auto\"], Literal[\"concise\"], Literal[\"detailed\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] = None,
+ summary: Optional[Literal["auto", "concise", "detailed"]] = None,
+ generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ReasoningTextContent(_Model):
+ """Reasoning text.
+
+ :ivar type: The type of the reasoning text. Always ``reasoning_text``. Required. Default value
+ is "reasoning_text".
+ :vartype type: str
+ :ivar text: The reasoning text from the model. Required.
+ :vartype text: str
+ """
+
+ type: Literal["reasoning_text"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of the reasoning text. Always ``reasoning_text``. Required. Default value is
+ \"reasoning_text\"."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The reasoning text from the model. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["reasoning_text"] = "reasoning_text"
+
+
+class ResponseStreamEvent(_Model):
+ """ResponseStreamEvent.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ResponseErrorEvent, ResponseAudioDeltaEvent, ResponseAudioDoneEvent,
+ ResponseAudioTranscriptDeltaEvent, ResponseAudioTranscriptDoneEvent,
+ ResponseCodeInterpreterCallCompletedEvent, ResponseCodeInterpreterCallInProgressEvent,
+ ResponseCodeInterpreterCallInterpretingEvent, ResponseCodeInterpreterCallCodeDeltaEvent,
+ ResponseCodeInterpreterCallCodeDoneEvent, ResponseCompletedEvent,
+ ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, ResponseCreatedEvent,
+ ResponseCustomToolCallInputDeltaEvent, ResponseCustomToolCallInputDoneEvent,
+ ResponseFailedEvent, ResponseFileSearchCallCompletedEvent,
+ ResponseFileSearchCallInProgressEvent, ResponseFileSearchCallSearchingEvent,
+ ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionCallArgumentsDoneEvent,
+ ResponseImageGenCallCompletedEvent, ResponseImageGenCallGeneratingEvent,
+ ResponseImageGenCallInProgressEvent, ResponseImageGenCallPartialImageEvent,
+ ResponseInProgressEvent, ResponseIncompleteEvent, ResponseMCPCallCompletedEvent,
+ ResponseMCPCallFailedEvent, ResponseMCPCallInProgressEvent, ResponseMCPCallArgumentsDeltaEvent,
+ ResponseMCPCallArgumentsDoneEvent, ResponseMCPListToolsCompletedEvent,
+ ResponseMCPListToolsFailedEvent, ResponseMCPListToolsInProgressEvent,
+ ResponseOutputItemAddedEvent, ResponseOutputItemDoneEvent,
+ ResponseOutputTextAnnotationAddedEvent, ResponseTextDeltaEvent, ResponseTextDoneEvent,
+ ResponseQueuedEvent, ResponseReasoningSummaryPartAddedEvent,
+ ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent,
+ ResponseReasoningSummaryTextDoneEvent, ResponseReasoningTextDeltaEvent,
+ ResponseReasoningTextDoneEvent, ResponseRefusalDeltaEvent, ResponseRefusalDoneEvent,
+ ResponseWebSearchCallCompletedEvent, ResponseWebSearchCallInProgressEvent,
+ ResponseWebSearchCallSearchingEvent
+
+ :ivar type: Required. Known values are: "response.audio.delta", "response.audio.done",
+ "response.audio.transcript.delta", "response.audio.transcript.done",
+ "response.code_interpreter_call_code.delta", "response.code_interpreter_call_code.done",
+ "response.code_interpreter_call.completed", "response.code_interpreter_call.in_progress",
+ "response.code_interpreter_call.interpreting", "response.completed",
+ "response.content_part.added", "response.content_part.done", "response.created", "error",
+ "response.file_search_call.completed", "response.file_search_call.in_progress",
+ "response.file_search_call.searching", "response.function_call_arguments.delta",
+ "response.function_call_arguments.done", "response.in_progress", "response.failed",
+ "response.incomplete", "response.output_item.added", "response.output_item.done",
+ "response.reasoning_summary_part.added", "response.reasoning_summary_part.done",
+ "response.reasoning_summary_text.delta", "response.reasoning_summary_text.done",
+ "response.reasoning_text.delta", "response.reasoning_text.done", "response.refusal.delta",
+ "response.refusal.done", "response.output_text.delta", "response.output_text.done",
+ "response.web_search_call.completed", "response.web_search_call.in_progress",
+ "response.web_search_call.searching", "response.image_generation_call.completed",
+ "response.image_generation_call.generating", "response.image_generation_call.in_progress",
+ "response.image_generation_call.partial_image", "response.mcp_call_arguments.delta",
+ "response.mcp_call_arguments.done", "response.mcp_call.completed", "response.mcp_call.failed",
+ "response.mcp_call.in_progress", "response.mcp_list_tools.completed",
+ "response.mcp_list_tools.failed", "response.mcp_list_tools.in_progress",
+ "response.output_text.annotation.added", "response.queued",
+ "response.custom_tool_call_input.delta", and "response.custom_tool_call_input.done".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ResponseStreamEventType
+ :ivar sequence_number: Required.
+ :vartype sequence_number: int
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"response.audio.delta\", \"response.audio.done\",
+ \"response.audio.transcript.delta\", \"response.audio.transcript.done\",
+ \"response.code_interpreter_call_code.delta\", \"response.code_interpreter_call_code.done\",
+ \"response.code_interpreter_call.completed\", \"response.code_interpreter_call.in_progress\",
+ \"response.code_interpreter_call.interpreting\", \"response.completed\",
+ \"response.content_part.added\", \"response.content_part.done\", \"response.created\",
+ \"error\", \"response.file_search_call.completed\", \"response.file_search_call.in_progress\",
+ \"response.file_search_call.searching\", \"response.function_call_arguments.delta\",
+ \"response.function_call_arguments.done\", \"response.in_progress\", \"response.failed\",
+ \"response.incomplete\", \"response.output_item.added\", \"response.output_item.done\",
+ \"response.reasoning_summary_part.added\", \"response.reasoning_summary_part.done\",
+ \"response.reasoning_summary_text.delta\", \"response.reasoning_summary_text.done\",
+ \"response.reasoning_text.delta\", \"response.reasoning_text.done\",
+ \"response.refusal.delta\", \"response.refusal.done\", \"response.output_text.delta\",
+ \"response.output_text.done\", \"response.web_search_call.completed\",
+ \"response.web_search_call.in_progress\", \"response.web_search_call.searching\",
+ \"response.image_generation_call.completed\", \"response.image_generation_call.generating\",
+ \"response.image_generation_call.in_progress\",
+ \"response.image_generation_call.partial_image\", \"response.mcp_call_arguments.delta\",
+ \"response.mcp_call_arguments.done\", \"response.mcp_call.completed\",
+ \"response.mcp_call.failed\", \"response.mcp_call.in_progress\",
+ \"response.mcp_list_tools.completed\", \"response.mcp_list_tools.failed\",
+ \"response.mcp_list_tools.in_progress\", \"response.output_text.annotation.added\",
+ \"response.queued\", \"response.custom_tool_call_input.delta\", and
+ \"response.custom_tool_call_input.done\"."""
+ sequence_number: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseAudioDeltaEvent(ResponseStreamEvent, discriminator="response.audio.delta"):
+ """Emitted when there is a partial audio response.
+
+ :ivar type: The type of the event. Always ``response.audio.delta``. Required.
+ RESPONSE_AUDIO_DELTA.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_AUDIO_DELTA
+ :ivar sequence_number: A sequence number for this chunk of the stream response. Required.
+ :vartype sequence_number: int
+ :ivar delta: A chunk of Base64 encoded response audio bytes. Required.
+ :vartype delta: bytes
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_AUDIO_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.audio.delta``. Required. RESPONSE_AUDIO_DELTA."""
+ delta: bytes = rest_field(visibility=["read", "create", "update", "delete", "query"], format="base64")
+ """A chunk of Base64 encoded response audio bytes. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sequence_number: int,
+ delta: bytes,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_AUDIO_DELTA # type: ignore
+
+
+class ResponseAudioDoneEvent(ResponseStreamEvent, discriminator="response.audio.done"):
+ """Emitted when the audio response is complete.
+
+ :ivar type: The type of the event. Always ``response.audio.done``. Required.
+ RESPONSE_AUDIO_DONE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_AUDIO_DONE
+ :ivar sequence_number: The sequence number of the delta. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_AUDIO_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.audio.done``. Required. RESPONSE_AUDIO_DONE."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_AUDIO_DONE # type: ignore
+
+
+class ResponseAudioTranscriptDeltaEvent(ResponseStreamEvent, discriminator="response.audio.transcript.delta"):
+ """Emitted when there is a partial transcript of audio.
+
+ :ivar type: The type of the event. Always ``response.audio.transcript.delta``. Required.
+ RESPONSE_AUDIO_TRANSCRIPT_DELTA.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_AUDIO_TRANSCRIPT_DELTA
+ :ivar delta: The partial transcript of the audio response. Required.
+ :vartype delta: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_AUDIO_TRANSCRIPT_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.audio.transcript.delta``. Required.
+ RESPONSE_AUDIO_TRANSCRIPT_DELTA."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The partial transcript of the audio response. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ delta: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_AUDIO_TRANSCRIPT_DELTA # type: ignore
+
+
+class ResponseAudioTranscriptDoneEvent(ResponseStreamEvent, discriminator="response.audio.transcript.done"):
+ """Emitted when the full audio transcript is completed.
+
+ :ivar type: The type of the event. Always ``response.audio.transcript.done``. Required.
+ RESPONSE_AUDIO_TRANSCRIPT_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_AUDIO_TRANSCRIPT_DONE
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_AUDIO_TRANSCRIPT_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.audio.transcript.done``. Required.
+ RESPONSE_AUDIO_TRANSCRIPT_DONE."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_AUDIO_TRANSCRIPT_DONE # type: ignore
+
+
+class ResponseCodeInterpreterCallCodeDeltaEvent(
+ ResponseStreamEvent, discriminator="response.code_interpreter_call_code.delta"
+): # pylint: disable=name-too-long
+ """Emitted when a partial code snippet is streamed by the code interpreter.
+
+ :ivar type: The type of the event. Always ``response.code_interpreter_call_code.delta``.
+ Required. RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA
+ :ivar output_index: The index of the output item in the response for which the code is being
+ streamed. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the code interpreter tool call item. Required.
+ :vartype item_id: str
+ :ivar delta: The partial code snippet being streamed by the code interpreter. Required.
+ :vartype delta: str
+ :ivar sequence_number: The sequence number of this event, used to order streaming events.
+ Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.code_interpreter_call_code.delta``. Required.
+ RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response for which the code is being streamed. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the code interpreter tool call item. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The partial code snippet being streamed by the code interpreter. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ delta: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA # type: ignore
+
+
+class ResponseCodeInterpreterCallCodeDoneEvent(
+ ResponseStreamEvent, discriminator="response.code_interpreter_call_code.done"
+):
+ """Emitted when the code snippet is finalized by the code interpreter.
+
+ :ivar type: The type of the event. Always ``response.code_interpreter_call_code.done``.
+ Required. RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE
+ :ivar output_index: The index of the output item in the response for which the code is
+ finalized. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the code interpreter tool call item. Required.
+ :vartype item_id: str
+ :ivar code: The final code snippet output by the code interpreter. Required.
+ :vartype code: str
+ :ivar sequence_number: The sequence number of this event, used to order streaming events.
+ Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.code_interpreter_call_code.done``. Required.
+ RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response for which the code is finalized. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the code interpreter tool call item. Required."""
+ code: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The final code snippet output by the code interpreter. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ code: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE # type: ignore
+
+
+class ResponseCodeInterpreterCallCompletedEvent(
+ ResponseStreamEvent, discriminator="response.code_interpreter_call.completed"
+): # pylint: disable=name-too-long
+ """Emitted when the code interpreter call is completed.
+
+ :ivar type: The type of the event. Always ``response.code_interpreter_call.completed``.
+ Required. RESPONSE_CODE_INTERPRETER_CALL_COMPLETED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_CODE_INTERPRETER_CALL_COMPLETED
+ :ivar output_index: The index of the output item in the response for which the code interpreter
+ call is completed. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the code interpreter tool call item. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of this event, used to order streaming events.
+ Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_COMPLETED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.code_interpreter_call.completed``. Required.
+ RESPONSE_CODE_INTERPRETER_CALL_COMPLETED."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response for which the code interpreter call is completed.
+ Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the code interpreter tool call item. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_COMPLETED # type: ignore
+
+
+class ResponseCodeInterpreterCallInProgressEvent(
+ ResponseStreamEvent, discriminator="response.code_interpreter_call.in_progress"
+): # pylint: disable=name-too-long
+ """Emitted when a code interpreter call is in progress.
+
+ :ivar type: The type of the event. Always ``response.code_interpreter_call.in_progress``.
+ Required. RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS
+ :ivar output_index: The index of the output item in the response for which the code interpreter
+ call is in progress. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the code interpreter tool call item. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of this event, used to order streaming events.
+ Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.code_interpreter_call.in_progress``. Required.
+ RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response for which the code interpreter call is in
+ progress. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the code interpreter tool call item. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS # type: ignore
+
+
+class ResponseCodeInterpreterCallInterpretingEvent(
+ ResponseStreamEvent, discriminator="response.code_interpreter_call.interpreting"
+): # pylint: disable=name-too-long
+ """Emitted when the code interpreter is actively interpreting the code snippet.
+
+ :ivar type: The type of the event. Always ``response.code_interpreter_call.interpreting``.
+ Required. RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING
+ :ivar output_index: The index of the output item in the response for which the code interpreter
+ is interpreting code. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the code interpreter tool call item. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of this event, used to order streaming events.
+ Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.code_interpreter_call.interpreting``. Required.
+ RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response for which the code interpreter is interpreting
+ code. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the code interpreter tool call item. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING # type: ignore
+
+
+class ResponseCompletedEvent(ResponseStreamEvent, discriminator="response.completed"):
+ """Emitted when the model response is complete.
+
+ :ivar type: The type of the event. Always ``response.completed``. Required. RESPONSE_COMPLETED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_COMPLETED
+ :ivar response: Properties of the completed response. Required.
+ :vartype response: ~azure.ai.agentserver.responses.models.models.ResponseObject
+ :ivar sequence_number: The sequence number for this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_COMPLETED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.completed``. Required. RESPONSE_COMPLETED."""
+ response: "_models.ResponseObject" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Properties of the completed response. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ response: "_models.ResponseObject",
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_COMPLETED # type: ignore
+
+
+class ResponseContentPartAddedEvent(ResponseStreamEvent, discriminator="response.content_part.added"):
+ """Emitted when a new content part is added.
+
+ :ivar type: The type of the event. Always ``response.content_part.added``. Required.
+ RESPONSE_CONTENT_PART_ADDED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_CONTENT_PART_ADDED
+ :ivar item_id: The ID of the output item that the content part was added to. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that the content part was added to. Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the content part that was added. Required.
+ :vartype content_index: int
+ :ivar part: The content part that was added. Required.
+ :vartype part: ~azure.ai.agentserver.responses.models.models.OutputContent
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CONTENT_PART_ADDED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.content_part.added``. Required.
+ RESPONSE_CONTENT_PART_ADDED."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the content part was added to. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the content part was added to. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the content part that was added. Required."""
+ part: "_models.OutputContent" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content part that was added. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ part: "_models.OutputContent",
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CONTENT_PART_ADDED # type: ignore
+
+
+class ResponseContentPartDoneEvent(ResponseStreamEvent, discriminator="response.content_part.done"):
+ """Emitted when a content part is done.
+
+ :ivar type: The type of the event. Always ``response.content_part.done``. Required.
+ RESPONSE_CONTENT_PART_DONE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_CONTENT_PART_DONE
+ :ivar item_id: The ID of the output item that the content part was added to. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that the content part was added to. Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the content part that is done. Required.
+ :vartype content_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar part: The content part that is done. Required.
+ :vartype part: ~azure.ai.agentserver.responses.models.models.OutputContent
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CONTENT_PART_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.content_part.done``. Required.
+ RESPONSE_CONTENT_PART_DONE."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the content part was added to. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the content part was added to. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the content part that is done. Required."""
+ part: "_models.OutputContent" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The content part that is done. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ sequence_number: int,
+ part: "_models.OutputContent",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CONTENT_PART_DONE # type: ignore
+
+
+class ResponseCreatedEvent(ResponseStreamEvent, discriminator="response.created"):
+ """An event that is emitted when a response is created.
+
+ :ivar type: The type of the event. Always ``response.created``. Required. RESPONSE_CREATED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_CREATED
+ :ivar response: The response that was created. Required.
+ :vartype response: ~azure.ai.agentserver.responses.models.models.ResponseObject
+ :ivar sequence_number: The sequence number for this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CREATED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.created``. Required. RESPONSE_CREATED."""
+ response: "_models.ResponseObject" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The response that was created. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ response: "_models.ResponseObject",
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CREATED # type: ignore
+
+
+class ResponseCustomToolCallInputDeltaEvent(ResponseStreamEvent, discriminator="response.custom_tool_call_input.delta"):
+ """ResponseCustomToolCallInputDelta.
+
+ :ivar type: The event type identifier. Required. RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar output_index: The index of the output this delta applies to. Required.
+ :vartype output_index: int
+ :ivar item_id: Unique identifier for the API item associated with this event. Required.
+ :vartype item_id: str
+ :ivar delta: The incremental input data (delta) for the custom tool call. Required.
+ :vartype delta: str
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The event type identifier. Required. RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output this delta applies to. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unique identifier for the API item associated with this event. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The incremental input data (delta) for the custom tool call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sequence_number: int,
+ output_index: int,
+ item_id: str,
+ delta: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA # type: ignore
+
+
+class ResponseCustomToolCallInputDoneEvent(ResponseStreamEvent, discriminator="response.custom_tool_call_input.done"):
+ """ResponseCustomToolCallInputDone.
+
+ :ivar type: The event type identifier. Required. RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar output_index: The index of the output this event applies to. Required.
+ :vartype output_index: int
+ :ivar item_id: Unique identifier for the API item associated with this event. Required.
+ :vartype item_id: str
+ :ivar input: The complete input data for the custom tool call. Required.
+ :vartype input: str
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The event type identifier. Required. RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output this event applies to. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unique identifier for the API item associated with this event. Required."""
+ input: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The complete input data for the custom tool call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sequence_number: int,
+ output_index: int,
+ item_id: str,
+ input: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE # type: ignore
+
+
+class ResponseErrorEvent(ResponseStreamEvent, discriminator="error"):
+ """Emitted when an error occurs.
+
+ :ivar type: The type of the event. Always ``error``. Required. ERROR.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ERROR
+ :ivar code: Required.
+ :vartype code: str
+ :ivar message: The error message. Required.
+ :vartype message: str
+ :ivar param: Required.
+ :vartype param: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.ERROR] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``error``. Required. ERROR."""
+ code: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ message: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The error message. Required."""
+ param: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ code: str,
+ message: str,
+ param: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.ERROR # type: ignore
+
+
+class ResponseErrorInfo(_Model):
+ """An error object returned when the model fails to generate a Response.
+
+ :ivar code: Required. Known values are: "server_error", "rate_limit_exceeded",
+ "invalid_prompt", "vector_store_timeout", "invalid_image", "invalid_image_format",
+ "invalid_base64_image", "invalid_image_url", "image_too_large", "image_too_small",
+ "image_parse_error", "image_content_policy_violation", "invalid_image_mode",
+ "image_file_too_large", "unsupported_image_media_type", "empty_image_file",
+ "failed_to_download_image", and "image_file_not_found".
+ :vartype code: str or ~azure.ai.agentserver.responses.models.models.ResponseErrorCode
+ :ivar message: A human-readable description of the error. Required.
+ :vartype message: str
+ """
+
+ code: Union[str, "_models.ResponseErrorCode"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required. Known values are: \"server_error\", \"rate_limit_exceeded\", \"invalid_prompt\",
+ \"vector_store_timeout\", \"invalid_image\", \"invalid_image_format\",
+ \"invalid_base64_image\", \"invalid_image_url\", \"image_too_large\", \"image_too_small\",
+ \"image_parse_error\", \"image_content_policy_violation\", \"invalid_image_mode\",
+ \"image_file_too_large\", \"unsupported_image_media_type\", \"empty_image_file\",
+ \"failed_to_download_image\", and \"image_file_not_found\"."""
+ message: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A human-readable description of the error. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ code: Union[str, "_models.ResponseErrorCode"],
+ message: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseFailedEvent(ResponseStreamEvent, discriminator="response.failed"):
+ """An event that is emitted when a response fails.
+
+ :ivar type: The type of the event. Always ``response.failed``. Required. RESPONSE_FAILED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_FAILED
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar response: The response that failed. Required.
+ :vartype response: ~azure.ai.agentserver.responses.models.models.ResponseObject
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_FAILED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.failed``. Required. RESPONSE_FAILED."""
+ response: "_models.ResponseObject" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The response that failed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sequence_number: int,
+ response: "_models.ResponseObject",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_FAILED # type: ignore
+
+
+class ResponseFileSearchCallCompletedEvent(ResponseStreamEvent, discriminator="response.file_search_call.completed"):
+ """Emitted when a file search call is completed (results found).
+
+ :ivar type: The type of the event. Always ``response.file_search_call.completed``. Required.
+ RESPONSE_FILE_SEARCH_CALL_COMPLETED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_FILE_SEARCH_CALL_COMPLETED
+ :ivar output_index: The index of the output item that the file search call is initiated.
+ Required.
+ :vartype output_index: int
+ :ivar item_id: The ID of the output item that the file search call is initiated. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_FILE_SEARCH_CALL_COMPLETED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.file_search_call.completed``. Required.
+ RESPONSE_FILE_SEARCH_CALL_COMPLETED."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the file search call is initiated. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the file search call is initiated. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_FILE_SEARCH_CALL_COMPLETED # type: ignore
+
+
+class ResponseFileSearchCallInProgressEvent(ResponseStreamEvent, discriminator="response.file_search_call.in_progress"):
+ """Emitted when a file search call is initiated.
+
+ :ivar type: The type of the event. Always ``response.file_search_call.in_progress``. Required.
+ RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS
+ :ivar output_index: The index of the output item that the file search call is initiated.
+ Required.
+ :vartype output_index: int
+ :ivar item_id: The ID of the output item that the file search call is initiated. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.file_search_call.in_progress``. Required.
+ RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the file search call is initiated. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the file search call is initiated. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS # type: ignore
+
+
+class ResponseFileSearchCallSearchingEvent(ResponseStreamEvent, discriminator="response.file_search_call.searching"):
+ """Emitted when a file search is currently searching.
+
+ :ivar type: The type of the event. Always ``response.file_search_call.searching``. Required.
+ RESPONSE_FILE_SEARCH_CALL_SEARCHING.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_FILE_SEARCH_CALL_SEARCHING
+ :ivar output_index: The index of the output item that the file search call is searching.
+ Required.
+ :vartype output_index: int
+ :ivar item_id: The ID of the output item that the file search call is initiated. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_FILE_SEARCH_CALL_SEARCHING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.file_search_call.searching``. Required.
+ RESPONSE_FILE_SEARCH_CALL_SEARCHING."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the file search call is searching. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the file search call is initiated. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_FILE_SEARCH_CALL_SEARCHING # type: ignore
+
+
+class ResponseFormatJsonSchemaSchema(_Model):
+ """JSON schema."""
+
+
+class ResponseFunctionCallArgumentsDeltaEvent(
+ ResponseStreamEvent, discriminator="response.function_call_arguments.delta"
+):
+ """Emitted when there is a partial function-call arguments delta.
+
+ :ivar type: The type of the event. Always ``response.function_call_arguments.delta``. Required.
+ RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA
+ :ivar item_id: The ID of the output item that the function-call arguments delta is added to.
+ Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that the function-call arguments delta is
+ added to. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar delta: The function-call arguments delta that is added. Required.
+ :vartype delta: str
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.function_call_arguments.delta``. Required.
+ RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the function-call arguments delta is added to. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the function-call arguments delta is added to. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The function-call arguments delta that is added. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ sequence_number: int,
+ delta: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA # type: ignore
+
+
+class ResponseFunctionCallArgumentsDoneEvent(
+ ResponseStreamEvent, discriminator="response.function_call_arguments.done"
+):
+ """Emitted when function-call arguments are finalized.
+
+ :ivar type: Required. RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE
+ :ivar item_id: The ID of the item. Required.
+ :vartype item_id: str
+ :ivar name: The name of the function that was called. Required.
+ :vartype name: str
+ :ivar output_index: The index of the output item. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar arguments: The function-call arguments. Required.
+ :vartype arguments: str
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item. Required."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function that was called. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The function-call arguments. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ name: str,
+ output_index: int,
+ sequence_number: int,
+ arguments: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE # type: ignore
+
+
+class ResponseImageGenCallCompletedEvent(ResponseStreamEvent, discriminator="response.image_generation_call.completed"):
+ """ResponseImageGenCallCompletedEvent.
+
+ :ivar type: The type of the event. Always 'response.image_generation_call.completed'. Required.
+ RESPONSE_IMAGE_GENERATION_CALL_COMPLETED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_IMAGE_GENERATION_CALL_COMPLETED
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar item_id: The unique identifier of the image generation item being processed. Required.
+ :vartype item_id: str
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_COMPLETED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.image_generation_call.completed'. Required.
+ RESPONSE_IMAGE_GENERATION_CALL_COMPLETED."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the image generation item being processed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ sequence_number: int,
+ item_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_COMPLETED # type: ignore
+
+
+class ResponseImageGenCallGeneratingEvent(
+ ResponseStreamEvent, discriminator="response.image_generation_call.generating"
+):
+ """ResponseImageGenCallGeneratingEvent.
+
+ :ivar type: The type of the event. Always 'response.image_generation_call.generating'.
+ Required. RESPONSE_IMAGE_GENERATION_CALL_GENERATING.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_IMAGE_GENERATION_CALL_GENERATING
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the image generation item being processed. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of the image generation item being processed.
+ Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_GENERATING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.image_generation_call.generating'. Required.
+ RESPONSE_IMAGE_GENERATION_CALL_GENERATING."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the image generation item being processed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_GENERATING # type: ignore
+
+
+class ResponseImageGenCallInProgressEvent(
+ ResponseStreamEvent, discriminator="response.image_generation_call.in_progress"
+):
+ """ResponseImageGenCallInProgressEvent.
+
+ :ivar type: The type of the event. Always 'response.image_generation_call.in_progress'.
+ Required. RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the image generation item being processed. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of the image generation item being processed.
+ Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.image_generation_call.in_progress'. Required.
+ RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the image generation item being processed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS # type: ignore
+
+
+class ResponseImageGenCallPartialImageEvent(
+ ResponseStreamEvent, discriminator="response.image_generation_call.partial_image"
+):
+ """ResponseImageGenCallPartialImageEvent.
+
+ :ivar type: The type of the event. Always 'response.image_generation_call.partial_image'.
+ Required. RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the image generation item being processed. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of the image generation item being processed.
+ Required.
+ :vartype sequence_number: int
+ :ivar partial_image_index: 0-based index for the partial image (backend is 1-based, but this is
+ 0-based for the user). Required.
+ :vartype partial_image_index: int
+ :ivar partial_image_b64: Base64-encoded partial image data, suitable for rendering as an image.
+ Required.
+ :vartype partial_image_b64: str
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.image_generation_call.partial_image'. Required.
+ RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the image generation item being processed. Required."""
+ partial_image_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """0-based index for the partial image (backend is 1-based, but this is 0-based for the user).
+ Required."""
+ partial_image_b64: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Base64-encoded partial image data, suitable for rendering as an image. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ partial_image_index: int,
+ partial_image_b64: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE # type: ignore
+
+
+class ResponseIncompleteDetails(_Model):
+ """ResponseIncompleteDetails.
+
+ :ivar reason: Is either a Literal["max_output_tokens"] type or a Literal["content_filter"]
+ type.
+ :vartype reason: str or str
+ """
+
+ reason: Optional[Literal["max_output_tokens", "content_filter"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a Literal[\"max_output_tokens\"] type or a Literal[\"content_filter\"] type."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ reason: Optional[Literal["max_output_tokens", "content_filter"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseIncompleteEvent(ResponseStreamEvent, discriminator="response.incomplete"):
+ """An event that is emitted when a response finishes as incomplete.
+
+ :ivar type: The type of the event. Always ``response.incomplete``. Required.
+ RESPONSE_INCOMPLETE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_INCOMPLETE
+ :ivar response: The response that was incomplete. Required.
+ :vartype response: ~azure.ai.agentserver.responses.models.models.ResponseObject
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_INCOMPLETE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.incomplete``. Required. RESPONSE_INCOMPLETE."""
+ response: "_models.ResponseObject" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The response that was incomplete. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ response: "_models.ResponseObject",
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_INCOMPLETE # type: ignore
+
+
+class ResponseInProgressEvent(ResponseStreamEvent, discriminator="response.in_progress"):
+ """Emitted when the response is in progress.
+
+ :ivar type: The type of the event. Always ``response.in_progress``. Required.
+ RESPONSE_IN_PROGRESS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_IN_PROGRESS
+ :ivar response: The response that is in progress. Required.
+ :vartype response: ~azure.ai.agentserver.responses.models.models.ResponseObject
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_IN_PROGRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.in_progress``. Required. RESPONSE_IN_PROGRESS."""
+ response: "_models.ResponseObject" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The response that is in progress. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ response: "_models.ResponseObject",
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_IN_PROGRESS # type: ignore
+
+
+class ResponseLogProb(_Model):
+ """A logprob is the logarithmic probability that the model assigns to producing a particular token
+ at a given position in the sequence. Less-negative (higher) logprob values indicate greater
+ model confidence in that token choice.
+
+ :ivar token: A possible text token. Required.
+ :vartype token: str
+ :ivar logprob: The log probability of this token. Required.
+ :vartype logprob: int
+ :ivar top_logprobs: The log probability of the top 20 most likely tokens.
+ :vartype top_logprobs:
+ list[~azure.ai.agentserver.responses.models.models.ResponseLogProbTopLogprobs]
+ """
+
+ token: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A possible text token. Required."""
+ logprob: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The log probability of this token. Required."""
+ top_logprobs: Optional[list["_models.ResponseLogProbTopLogprobs"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The log probability of the top 20 most likely tokens."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ token: str,
+ logprob: int,
+ top_logprobs: Optional[list["_models.ResponseLogProbTopLogprobs"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseLogProbTopLogprobs(_Model):
+ """ResponseLogProbTopLogprobs.
+
+ :ivar token:
+ :vartype token: str
+ :ivar logprob:
+ :vartype logprob: int
+ """
+
+ token: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ logprob: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ token: Optional[str] = None,
+ logprob: Optional[int] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseMCPCallArgumentsDeltaEvent(ResponseStreamEvent, discriminator="response.mcp_call_arguments.delta"):
+ """ResponseMCPCallArgumentsDeltaEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_call_arguments.delta'. Required.
+ RESPONSE_MCP_CALL_ARGUMENTS_DELTA.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_CALL_ARGUMENTS_DELTA
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the MCP tool call item being processed. Required.
+ :vartype item_id: str
+ :ivar delta: A JSON string containing the partial update to the arguments for the MCP tool
+ call. Required.
+ :vartype delta: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_CALL_ARGUMENTS_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_call_arguments.delta'. Required.
+ RESPONSE_MCP_CALL_ARGUMENTS_DELTA."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the MCP tool call item being processed. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string containing the partial update to the arguments for the MCP tool call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ delta: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_CALL_ARGUMENTS_DELTA # type: ignore
+
+
+class ResponseMCPCallArgumentsDoneEvent(ResponseStreamEvent, discriminator="response.mcp_call_arguments.done"):
+ """ResponseMCPCallArgumentsDoneEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_call_arguments.done'. Required.
+ RESPONSE_MCP_CALL_ARGUMENTS_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_CALL_ARGUMENTS_DONE
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the MCP tool call item being processed. Required.
+ :vartype item_id: str
+ :ivar arguments: A JSON string containing the finalized arguments for the MCP tool call.
+ Required.
+ :vartype arguments: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_CALL_ARGUMENTS_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_call_arguments.done'. Required.
+ RESPONSE_MCP_CALL_ARGUMENTS_DONE."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the MCP tool call item being processed. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string containing the finalized arguments for the MCP tool call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ arguments: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_CALL_ARGUMENTS_DONE # type: ignore
+
+
+class ResponseMCPCallCompletedEvent(ResponseStreamEvent, discriminator="response.mcp_call.completed"):
+ """ResponseMCPCallCompletedEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_call.completed'. Required.
+ RESPONSE_MCP_CALL_COMPLETED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_CALL_COMPLETED
+ :ivar item_id: The ID of the MCP tool call item that completed. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that completed. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_CALL_COMPLETED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_call.completed'. Required.
+ RESPONSE_MCP_CALL_COMPLETED."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the MCP tool call item that completed. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that completed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_CALL_COMPLETED # type: ignore
+
+
+class ResponseMCPCallFailedEvent(ResponseStreamEvent, discriminator="response.mcp_call.failed"):
+ """ResponseMCPCallFailedEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_call.failed'. Required.
+ RESPONSE_MCP_CALL_FAILED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_CALL_FAILED
+ :ivar item_id: The ID of the MCP tool call item that failed. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that failed. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_CALL_FAILED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_call.failed'. Required. RESPONSE_MCP_CALL_FAILED."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the MCP tool call item that failed. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that failed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_CALL_FAILED # type: ignore
+
+
+class ResponseMCPCallInProgressEvent(ResponseStreamEvent, discriminator="response.mcp_call.in_progress"):
+ """ResponseMCPCallInProgressEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_call.in_progress'. Required.
+ RESPONSE_MCP_CALL_IN_PROGRESS.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_CALL_IN_PROGRESS
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar item_id: The unique identifier of the MCP tool call item being processed. Required.
+ :vartype item_id: str
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_CALL_IN_PROGRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_call.in_progress'. Required.
+ RESPONSE_MCP_CALL_IN_PROGRESS."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the MCP tool call item being processed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sequence_number: int,
+ output_index: int,
+ item_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_CALL_IN_PROGRESS # type: ignore
+
+
+class ResponseMCPListToolsCompletedEvent(ResponseStreamEvent, discriminator="response.mcp_list_tools.completed"):
+ """ResponseMCPListToolsCompletedEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_list_tools.completed'. Required.
+ RESPONSE_MCP_LIST_TOOLS_COMPLETED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_LIST_TOOLS_COMPLETED
+ :ivar item_id: The ID of the MCP tool call item that produced this output. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that was processed. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_LIST_TOOLS_COMPLETED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_list_tools.completed'. Required.
+ RESPONSE_MCP_LIST_TOOLS_COMPLETED."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the MCP tool call item that produced this output. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that was processed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_LIST_TOOLS_COMPLETED # type: ignore
+
+
+class ResponseMCPListToolsFailedEvent(ResponseStreamEvent, discriminator="response.mcp_list_tools.failed"):
+ """ResponseMCPListToolsFailedEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_list_tools.failed'. Required.
+ RESPONSE_MCP_LIST_TOOLS_FAILED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_LIST_TOOLS_FAILED
+ :ivar item_id: The ID of the MCP tool call item that failed. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that failed. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_LIST_TOOLS_FAILED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_list_tools.failed'. Required.
+ RESPONSE_MCP_LIST_TOOLS_FAILED."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the MCP tool call item that failed. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that failed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_LIST_TOOLS_FAILED # type: ignore
+
+
+class ResponseMCPListToolsInProgressEvent(ResponseStreamEvent, discriminator="response.mcp_list_tools.in_progress"):
+ """ResponseMCPListToolsInProgressEvent.
+
+ :ivar type: The type of the event. Always 'response.mcp_list_tools.in_progress'. Required.
+ RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS
+ :ivar item_id: The ID of the MCP tool call item that is being processed. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that is being processed. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.mcp_list_tools.in_progress'. Required.
+ RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the MCP tool call item that is being processed. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that is being processed. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS # type: ignore
+
+
+class ResponseObject(_Model):
+ """The response object.
+
+ :ivar metadata:
+ :vartype metadata: ~azure.ai.agentserver.responses.models.models.Metadata
+ :ivar top_logprobs:
+ :vartype top_logprobs: int
+ :ivar temperature:
+ :vartype temperature: int
+ :ivar top_p:
+ :vartype top_p: int
+ :ivar user: This field is being replaced by ``safety_identifier`` and ``prompt_cache_key``. Use
+ ``prompt_cache_key`` instead to maintain caching optimizations. A stable identifier for your
+ end-users. Used to boost cache hit rates by better bucketing similar requests and to help
+ OpenAI detect and prevent abuse. `Learn more
+ `_.
+ :vartype user: str
+ :ivar safety_identifier: A stable identifier used to help detect users of your application that
+ may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies
+ each user. We recommend hashing their username or email address, in order to avoid sending us
+ any identifying information. `Learn more
+ `_.
+ :vartype safety_identifier: str
+ :ivar prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your
+ cache hit rates. Replaces the ``user`` field. `Learn more `_.
+ :vartype prompt_cache_key: str
+ :ivar service_tier: Is one of the following types: Literal["auto"], Literal["default"],
+ Literal["flex"], Literal["scale"], Literal["priority"]
+ :vartype service_tier: str or str or str or str or str
+ :ivar prompt_cache_retention: Is either a Literal["in-memory"] type or a Literal["24h"] type.
+ :vartype prompt_cache_retention: str or str
+ :ivar previous_response_id:
+ :vartype previous_response_id: str
+ :ivar model: The model deployment to use for the creation of this response.
+ :vartype model: str
+ :ivar reasoning:
+ :vartype reasoning: ~azure.ai.agentserver.responses.models.models.Reasoning
+ :ivar background:
+ :vartype background: bool
+ :ivar max_output_tokens:
+ :vartype max_output_tokens: int
+ :ivar max_tool_calls:
+ :vartype max_tool_calls: int
+ :ivar text:
+ :vartype text: ~azure.ai.agentserver.responses.models.models.ResponseTextParam
+ :ivar tools:
+ :vartype tools: list[~azure.ai.agentserver.responses.models.models.Tool]
+ :ivar tool_choice: Is either a Union[str, "_models.ToolChoiceOptions"] type or a
+ ToolChoiceParam type.
+ :vartype tool_choice: str or ~azure.ai.agentserver.responses.models.models.ToolChoiceOptions or
+ ~azure.ai.agentserver.responses.models.models.ToolChoiceParam
+ :ivar prompt:
+ :vartype prompt: ~azure.ai.agentserver.responses.models.models.Prompt
+ :ivar truncation: Is either a Literal["auto"] type or a Literal["disabled"] type.
+ :vartype truncation: str or str
+ :ivar id: Unique identifier for this Response. Required.
+ :vartype id: str
+ :ivar object: The object type of this resource - always set to ``response``. Required. Default
+ value is "response".
+ :vartype object: str
+ :ivar status: The status of the response generation. One of ``completed``, ``failed``,
+ ``in_progress``, ``cancelled``, ``queued``, or ``incomplete``. Is one of the following types:
+ Literal["completed"], Literal["failed"], Literal["in_progress"], Literal["cancelled"],
+ Literal["queued"], Literal["incomplete"]
+ :vartype status: str or str or str or str or str or str
+ :ivar created_at: Unix timestamp (in seconds) of when this Response was created. Required.
+ :vartype created_at: ~datetime.datetime
+ :ivar completed_at:
+ :vartype completed_at: ~datetime.datetime
+ :ivar error: Required.
+ :vartype error: ~azure.ai.agentserver.responses.models.models.ResponseErrorInfo
+ :ivar incomplete_details: Required.
+ :vartype incomplete_details:
+ ~azure.ai.agentserver.responses.models.models.ResponseIncompleteDetails
+ :ivar output: An array of content items generated by the model.
+
+ * The length and order of items in the `output` array is dependent
+ on the model's response.
+ * Rather than accessing the first item in the `output` array and
+ assuming it's an `assistant` message with the content generated by
+ the model, you might consider using the `output_text` property where
+ supported in SDKs. Required.
+ :vartype output: list[~azure.ai.agentserver.responses.models.models.OutputItem]
+ :ivar instructions: Required. Is either a str type or a [Item] type.
+ :vartype instructions: str or list[~azure.ai.agentserver.responses.models.models.Item]
+ :ivar output_text:
+ :vartype output_text: str
+ :ivar usage:
+ :vartype usage: ~azure.ai.agentserver.responses.models.models.ResponseUsage
+ :ivar parallel_tool_calls: Whether to allow the model to run tool calls in parallel. Required.
+ :vartype parallel_tool_calls: bool
+ :ivar conversation:
+ :vartype conversation: ~azure.ai.agentserver.responses.models.models.ConversationReference
+ :ivar agent_reference: The agent used for this response. Required.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ """
+
+ metadata: Optional["_models.Metadata"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ top_logprobs: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ temperature: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ top_p: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ user: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """This field is being replaced by ``safety_identifier`` and ``prompt_cache_key``. Use
+ ``prompt_cache_key`` instead to maintain caching optimizations. A stable identifier for your
+ end-users. Used to boost cache hit rates by better bucketing similar requests and to help
+ OpenAI detect and prevent abuse. `Learn more
+ `_."""
+ safety_identifier: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A stable identifier used to help detect users of your application that may be violating
+ OpenAI's usage policies. The IDs should be a string that uniquely identifies each user. We
+ recommend hashing their username or email address, in order to avoid sending us any identifying
+ information. `Learn more `_."""
+ prompt_cache_key: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Used by OpenAI to cache responses for similar requests to optimize your cache hit rates.
+ Replaces the ``user`` field. `Learn more `_."""
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"auto\"], Literal[\"default\"], Literal[\"flex\"],
+ Literal[\"scale\"], Literal[\"priority\"]"""
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a Literal[\"in-memory\"] type or a Literal[\"24h\"] type."""
+ previous_response_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ model: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The model deployment to use for the creation of this response."""
+ reasoning: Optional["_models.Reasoning"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ background: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ max_output_tokens: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ max_tool_calls: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ text: Optional["_models.ResponseTextParam"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ tools: Optional[list["_models.Tool"]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ tool_choice: Optional[Union[str, "_models.ToolChoiceOptions", "_models.ToolChoiceParam"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a Union[str, \"_models.ToolChoiceOptions\"] type or a ToolChoiceParam type."""
+ prompt: Optional["_models.Prompt"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ truncation: Optional[Literal["auto", "disabled"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is either a Literal[\"auto\"] type or a Literal[\"disabled\"] type."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unique identifier for this Response. Required."""
+ object: Literal["response"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The object type of this resource - always set to ``response``. Required. Default value is
+ \"response\"."""
+ status: Optional[Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the response generation. One of ``completed``, ``failed``, ``in_progress``,
+ ``cancelled``, ``queued``, or ``incomplete``. Is one of the following types:
+ Literal[\"completed\"], Literal[\"failed\"], Literal[\"in_progress\"], Literal[\"cancelled\"],
+ Literal[\"queued\"], Literal[\"incomplete\"]"""
+ created_at: datetime.datetime = rest_field(
+ visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp"
+ )
+ """Unix timestamp (in seconds) of when this Response was created. Required."""
+ completed_at: Optional[datetime.datetime] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"], format="unix-timestamp"
+ )
+ error: "_models.ResponseErrorInfo" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ incomplete_details: "_models.ResponseIncompleteDetails" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required."""
+ output: list["_models.OutputItem"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """An array of content items generated by the model.
+
+ * The length and order of items in the `output` array is dependent
+ on the model's response.
+ * Rather than accessing the first item in the `output` array and
+ assuming it's an `assistant` message with the content generated by
+ the model, you might consider using the `output_text` property where
+ supported in SDKs. Required."""
+ instructions: Union[str, list["_models.Item"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required. Is either a str type or a [Item] type."""
+ output_text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ usage: Optional["_models.ResponseUsage"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ parallel_tool_calls: bool = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Whether to allow the model to run tool calls in parallel. Required."""
+ conversation: Optional["_models.ConversationReference"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ agent_reference: "_models.AgentReference" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The agent used for this response. Required."""
+
+ @overload
+ def __init__( # pylint: disable=too-many-locals
+ self,
+ *,
+ id: str, # pylint: disable=redefined-builtin
+ created_at: datetime.datetime,
+ error: "_models.ResponseErrorInfo",
+ incomplete_details: "_models.ResponseIncompleteDetails",
+ output: list["_models.OutputItem"],
+ instructions: Union[str, list["_models.Item"]],
+ parallel_tool_calls: bool,
+ agent_reference: "_models.AgentReference",
+ metadata: Optional["_models.Metadata"] = None,
+ top_logprobs: Optional[int] = None,
+ temperature: Optional[int] = None,
+ top_p: Optional[int] = None,
+ user: Optional[str] = None,
+ safety_identifier: Optional[str] = None,
+ prompt_cache_key: Optional[str] = None,
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None,
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None,
+ previous_response_id: Optional[str] = None,
+ model: Optional[str] = None,
+ reasoning: Optional["_models.Reasoning"] = None,
+ background: Optional[bool] = None,
+ max_output_tokens: Optional[int] = None,
+ max_tool_calls: Optional[int] = None,
+ text: Optional["_models.ResponseTextParam"] = None,
+ tools: Optional[list["_models.Tool"]] = None,
+ tool_choice: Optional[Union[str, "_models.ToolChoiceOptions", "_models.ToolChoiceParam"]] = None,
+ prompt: Optional["_models.Prompt"] = None,
+ truncation: Optional[Literal["auto", "disabled"]] = None,
+ status: Optional[Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"]] = None,
+ completed_at: Optional[datetime.datetime] = None,
+ output_text: Optional[str] = None,
+ usage: Optional["_models.ResponseUsage"] = None,
+ conversation: Optional["_models.ConversationReference"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.object: Literal["response"] = "response"
+
+
+class ResponseOutputItemAddedEvent(ResponseStreamEvent, discriminator="response.output_item.added"):
+ """Emitted when a new output item is added.
+
+ :ivar type: The type of the event. Always ``response.output_item.added``. Required.
+ RESPONSE_OUTPUT_ITEM_ADDED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_OUTPUT_ITEM_ADDED
+ :ivar output_index: The index of the output item that was added. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar item: The output item that was added. Required.
+ :vartype item: ~azure.ai.agentserver.responses.models.models.OutputItem
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_OUTPUT_ITEM_ADDED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.output_item.added``. Required.
+ RESPONSE_OUTPUT_ITEM_ADDED."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that was added. Required."""
+ item: "_models.OutputItem" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The output item that was added. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ sequence_number: int,
+ item: "_models.OutputItem",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_OUTPUT_ITEM_ADDED # type: ignore
+
+
+class ResponseOutputItemDoneEvent(ResponseStreamEvent, discriminator="response.output_item.done"):
+ """Emitted when an output item is marked done.
+
+ :ivar type: The type of the event. Always ``response.output_item.done``. Required.
+ RESPONSE_OUTPUT_ITEM_DONE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_OUTPUT_ITEM_DONE
+ :ivar output_index: The index of the output item that was marked done. Required.
+ :vartype output_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar item: The output item that was marked done. Required.
+ :vartype item: ~azure.ai.agentserver.responses.models.models.OutputItem
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_OUTPUT_ITEM_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.output_item.done``. Required.
+ RESPONSE_OUTPUT_ITEM_DONE."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that was marked done. Required."""
+ item: "_models.OutputItem" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The output item that was marked done. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ sequence_number: int,
+ item: "_models.OutputItem",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_OUTPUT_ITEM_DONE # type: ignore
+
+
+class ResponseOutputTextAnnotationAddedEvent(
+ ResponseStreamEvent, discriminator="response.output_text.annotation.added"
+):
+ """ResponseOutputTextAnnotationAddedEvent.
+
+ :ivar type: The type of the event. Always 'response.output_text.annotation.added'. Required.
+ RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED
+ :ivar item_id: The unique identifier of the item to which the annotation is being added.
+ Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item in the response's output array. Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the content part within the output item. Required.
+ :vartype content_index: int
+ :ivar annotation_index: The index of the annotation within the content part. Required.
+ :vartype annotation_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar annotation: The annotation object being added. (See annotation schema for details.).
+ Required.
+ :vartype annotation: ~azure.ai.agentserver.responses.models.models.Annotation
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.output_text.annotation.added'. Required.
+ RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique identifier of the item to which the annotation is being added. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item in the response's output array. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the content part within the output item. Required."""
+ annotation_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the annotation within the content part. Required."""
+ annotation: "_models.Annotation" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The annotation object being added. (See annotation schema for details.). Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ annotation_index: int,
+ sequence_number: int,
+ annotation: "_models.Annotation",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED # type: ignore
+
+
+class ResponsePromptVariables(_Model):
+ """Prompt Variables."""
+
+
+class ResponseQueuedEvent(ResponseStreamEvent, discriminator="response.queued"):
+ """ResponseQueuedEvent.
+
+ :ivar type: The type of the event. Always 'response.queued'. Required. RESPONSE_QUEUED.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_QUEUED
+ :ivar response: The full response object that is queued. Required.
+ :vartype response: ~azure.ai.agentserver.responses.models.models.ResponseObject
+ :ivar sequence_number: The sequence number for this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_QUEUED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always 'response.queued'. Required. RESPONSE_QUEUED."""
+ response: "_models.ResponseObject" = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The full response object that is queued. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ response: "_models.ResponseObject",
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_QUEUED # type: ignore
+
+
+class ResponseReasoningSummaryPartAddedEvent(
+ ResponseStreamEvent, discriminator="response.reasoning_summary_part.added"
+):
+ """Emitted when a new reasoning summary part is added.
+
+ :ivar type: The type of the event. Always ``response.reasoning_summary_part.added``. Required.
+ RESPONSE_REASONING_SUMMARY_PART_ADDED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_REASONING_SUMMARY_PART_ADDED
+ :ivar item_id: The ID of the item this summary part is associated with. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item this summary part is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar summary_index: The index of the summary part within the reasoning summary. Required.
+ :vartype summary_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar part: The summary part that was added. Required.
+ :vartype part:
+ ~azure.ai.agentserver.responses.models.models.ResponseReasoningSummaryPartAddedEventPart
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_PART_ADDED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.reasoning_summary_part.added``. Required.
+ RESPONSE_REASONING_SUMMARY_PART_ADDED."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item this summary part is associated with. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item this summary part is associated with. Required."""
+ summary_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the summary part within the reasoning summary. Required."""
+ part: "_models.ResponseReasoningSummaryPartAddedEventPart" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The summary part that was added. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ summary_index: int,
+ sequence_number: int,
+ part: "_models.ResponseReasoningSummaryPartAddedEventPart",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_PART_ADDED # type: ignore
+
+
+class ResponseReasoningSummaryPartAddedEventPart(_Model): # pylint: disable=name-too-long
+ """ResponseReasoningSummaryPartAddedEventPart.
+
+ :ivar type: Required. Default value is "summary_text".
+ :vartype type: str
+ :ivar text: Required.
+ :vartype text: str
+ """
+
+ type: Literal["summary_text"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required. Default value is \"summary_text\"."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["summary_text"] = "summary_text"
+
+
+class ResponseReasoningSummaryPartDoneEvent(ResponseStreamEvent, discriminator="response.reasoning_summary_part.done"):
+ """Emitted when a reasoning summary part is completed.
+
+ :ivar type: The type of the event. Always ``response.reasoning_summary_part.done``. Required.
+ RESPONSE_REASONING_SUMMARY_PART_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_REASONING_SUMMARY_PART_DONE
+ :ivar item_id: The ID of the item this summary part is associated with. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item this summary part is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar summary_index: The index of the summary part within the reasoning summary. Required.
+ :vartype summary_index: int
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ :ivar part: The completed summary part. Required.
+ :vartype part:
+ ~azure.ai.agentserver.responses.models.models.ResponseReasoningSummaryPartDoneEventPart
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_PART_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.reasoning_summary_part.done``. Required.
+ RESPONSE_REASONING_SUMMARY_PART_DONE."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item this summary part is associated with. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item this summary part is associated with. Required."""
+ summary_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the summary part within the reasoning summary. Required."""
+ part: "_models.ResponseReasoningSummaryPartDoneEventPart" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The completed summary part. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ summary_index: int,
+ sequence_number: int,
+ part: "_models.ResponseReasoningSummaryPartDoneEventPart",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_PART_DONE # type: ignore
+
+
+class ResponseReasoningSummaryPartDoneEventPart(_Model): # pylint: disable=name-too-long
+ """ResponseReasoningSummaryPartDoneEventPart.
+
+ :ivar type: Required. Default value is "summary_text".
+ :vartype type: str
+ :ivar text: Required.
+ :vartype text: str
+ """
+
+ type: Literal["summary_text"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required. Default value is \"summary_text\"."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["summary_text"] = "summary_text"
+
+
+class ResponseReasoningSummaryTextDeltaEvent(
+ ResponseStreamEvent, discriminator="response.reasoning_summary_text.delta"
+):
+ """Emitted when a delta is added to a reasoning summary text.
+
+ :ivar type: The type of the event. Always ``response.reasoning_summary_text.delta``. Required.
+ RESPONSE_REASONING_SUMMARY_TEXT_DELTA.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_REASONING_SUMMARY_TEXT_DELTA
+ :ivar item_id: The ID of the item this summary text delta is associated with. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item this summary text delta is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar summary_index: The index of the summary part within the reasoning summary. Required.
+ :vartype summary_index: int
+ :ivar delta: The text delta that was added to the summary. Required.
+ :vartype delta: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_TEXT_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.reasoning_summary_text.delta``. Required.
+ RESPONSE_REASONING_SUMMARY_TEXT_DELTA."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item this summary text delta is associated with. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item this summary text delta is associated with. Required."""
+ summary_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the summary part within the reasoning summary. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text delta that was added to the summary. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ summary_index: int,
+ delta: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_TEXT_DELTA # type: ignore
+
+
+class ResponseReasoningSummaryTextDoneEvent(ResponseStreamEvent, discriminator="response.reasoning_summary_text.done"):
+ """Emitted when a reasoning summary text is completed.
+
+ :ivar type: The type of the event. Always ``response.reasoning_summary_text.done``. Required.
+ RESPONSE_REASONING_SUMMARY_TEXT_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_REASONING_SUMMARY_TEXT_DONE
+ :ivar item_id: The ID of the item this summary text is associated with. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item this summary text is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar summary_index: The index of the summary part within the reasoning summary. Required.
+ :vartype summary_index: int
+ :ivar text: The full text of the completed reasoning summary. Required.
+ :vartype text: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_TEXT_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.reasoning_summary_text.done``. Required.
+ RESPONSE_REASONING_SUMMARY_TEXT_DONE."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item this summary text is associated with. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item this summary text is associated with. Required."""
+ summary_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the summary part within the reasoning summary. Required."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The full text of the completed reasoning summary. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ summary_index: int,
+ text: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REASONING_SUMMARY_TEXT_DONE # type: ignore
+
+
+class ResponseReasoningTextDeltaEvent(ResponseStreamEvent, discriminator="response.reasoning_text.delta"):
+ """Emitted when a delta is added to a reasoning text.
+
+ :ivar type: The type of the event. Always ``response.reasoning_text.delta``. Required.
+ RESPONSE_REASONING_TEXT_DELTA.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_REASONING_TEXT_DELTA
+ :ivar item_id: The ID of the item this reasoning text delta is associated with. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item this reasoning text delta is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the reasoning content part this delta is associated with.
+ Required.
+ :vartype content_index: int
+ :ivar delta: The text delta that was added to the reasoning content. Required.
+ :vartype delta: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REASONING_TEXT_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.reasoning_text.delta``. Required.
+ RESPONSE_REASONING_TEXT_DELTA."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item this reasoning text delta is associated with. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item this reasoning text delta is associated with. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the reasoning content part this delta is associated with. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text delta that was added to the reasoning content. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ delta: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REASONING_TEXT_DELTA # type: ignore
+
+
+class ResponseReasoningTextDoneEvent(ResponseStreamEvent, discriminator="response.reasoning_text.done"):
+ """Emitted when a reasoning text is completed.
+
+ :ivar type: The type of the event. Always ``response.reasoning_text.done``. Required.
+ RESPONSE_REASONING_TEXT_DONE.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_REASONING_TEXT_DONE
+ :ivar item_id: The ID of the item this reasoning text is associated with. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item this reasoning text is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the reasoning content part. Required.
+ :vartype content_index: int
+ :ivar text: The full text of the completed reasoning content. Required.
+ :vartype text: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REASONING_TEXT_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.reasoning_text.done``. Required.
+ RESPONSE_REASONING_TEXT_DONE."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the item this reasoning text is associated with. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item this reasoning text is associated with. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the reasoning content part. Required."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The full text of the completed reasoning content. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ text: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REASONING_TEXT_DONE # type: ignore
+
+
+class ResponseRefusalDeltaEvent(ResponseStreamEvent, discriminator="response.refusal.delta"):
+ """Emitted when there is a partial refusal text.
+
+ :ivar type: The type of the event. Always ``response.refusal.delta``. Required.
+ RESPONSE_REFUSAL_DELTA.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_REFUSAL_DELTA
+ :ivar item_id: The ID of the output item that the refusal text is added to. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that the refusal text is added to. Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the content part that the refusal text is added to. Required.
+ :vartype content_index: int
+ :ivar delta: The refusal text that is added. Required.
+ :vartype delta: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REFUSAL_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.refusal.delta``. Required. RESPONSE_REFUSAL_DELTA."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the refusal text is added to. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the refusal text is added to. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the content part that the refusal text is added to. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The refusal text that is added. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ delta: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REFUSAL_DELTA # type: ignore
+
+
+class ResponseRefusalDoneEvent(ResponseStreamEvent, discriminator="response.refusal.done"):
+ """Emitted when refusal text is finalized.
+
+ :ivar type: The type of the event. Always ``response.refusal.done``. Required.
+ RESPONSE_REFUSAL_DONE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_REFUSAL_DONE
+ :ivar item_id: The ID of the output item that the refusal text is finalized. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that the refusal text is finalized. Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the content part that the refusal text is finalized.
+ Required.
+ :vartype content_index: int
+ :ivar refusal: The refusal text that is finalized. Required.
+ :vartype refusal: str
+ :ivar sequence_number: The sequence number of this event. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_REFUSAL_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.refusal.done``. Required. RESPONSE_REFUSAL_DONE."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the refusal text is finalized. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the refusal text is finalized. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the content part that the refusal text is finalized. Required."""
+ refusal: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The refusal text that is finalized. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ refusal: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_REFUSAL_DONE # type: ignore
+
+
+class ResponseStreamOptions(_Model):
+ """Options for streaming responses. Only set this when you set ``stream: true``.
+
+ :ivar include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation
+ adds random characters to an ``obfuscation`` field on streaming delta events to normalize
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation fields are
+ included by default, but add a small amount of overhead to the data stream. You can set
+ ``include_obfuscation`` to false to optimize for bandwidth if you trust the network links
+ between your application and the OpenAI API.
+ :vartype include_obfuscation: bool
+ """
+
+ include_obfuscation: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """When true, stream obfuscation will be enabled. Stream obfuscation adds random characters to an
+ ``obfuscation`` field on streaming delta events to normalize payload sizes as a mitigation to
+ certain side-channel attacks. These obfuscation fields are included by default, but add a small
+ amount of overhead to the data stream. You can set ``include_obfuscation`` to false to optimize
+ for bandwidth if you trust the network links between your application and the OpenAI API."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ include_obfuscation: Optional[bool] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseTextDeltaEvent(ResponseStreamEvent, discriminator="response.output_text.delta"):
+ """Emitted when there is an additional text delta.
+
+ :ivar type: The type of the event. Always ``response.output_text.delta``. Required.
+ RESPONSE_OUTPUT_TEXT_DELTA.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_OUTPUT_TEXT_DELTA
+ :ivar item_id: The ID of the output item that the text delta was added to. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that the text delta was added to. Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the content part that the text delta was added to. Required.
+ :vartype content_index: int
+ :ivar delta: The text delta that was added. Required.
+ :vartype delta: str
+ :ivar sequence_number: The sequence number for this event. Required.
+ :vartype sequence_number: int
+ :ivar logprobs: The log probabilities of the tokens in the delta. Required.
+ :vartype logprobs: list[~azure.ai.agentserver.responses.models.models.ResponseLogProb]
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_OUTPUT_TEXT_DELTA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.output_text.delta``. Required.
+ RESPONSE_OUTPUT_TEXT_DELTA."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the text delta was added to. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the text delta was added to. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the content part that the text delta was added to. Required."""
+ delta: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text delta that was added. Required."""
+ logprobs: list["_models.ResponseLogProb"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The log probabilities of the tokens in the delta. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ delta: str,
+ sequence_number: int,
+ logprobs: list["_models.ResponseLogProb"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_OUTPUT_TEXT_DELTA # type: ignore
+
+
+class ResponseTextDoneEvent(ResponseStreamEvent, discriminator="response.output_text.done"):
+ """Emitted when text content is finalized.
+
+ :ivar type: The type of the event. Always ``response.output_text.done``. Required.
+ RESPONSE_OUTPUT_TEXT_DONE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.RESPONSE_OUTPUT_TEXT_DONE
+ :ivar item_id: The ID of the output item that the text content is finalized. Required.
+ :vartype item_id: str
+ :ivar output_index: The index of the output item that the text content is finalized. Required.
+ :vartype output_index: int
+ :ivar content_index: The index of the content part that the text content is finalized.
+ Required.
+ :vartype content_index: int
+ :ivar text: The text content that is finalized. Required.
+ :vartype text: str
+ :ivar sequence_number: The sequence number for this event. Required.
+ :vartype sequence_number: int
+ :ivar logprobs: The log probabilities of the tokens in the delta. Required.
+ :vartype logprobs: list[~azure.ai.agentserver.responses.models.models.ResponseLogProb]
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_OUTPUT_TEXT_DONE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.output_text.done``. Required.
+ RESPONSE_OUTPUT_TEXT_DONE."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the output item that the text content is finalized. Required."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the text content is finalized. Required."""
+ content_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the content part that the text content is finalized. Required."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text content that is finalized. Required."""
+ logprobs: list["_models.ResponseLogProb"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The log probabilities of the tokens in the delta. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ item_id: str,
+ output_index: int,
+ content_index: int,
+ text: str,
+ sequence_number: int,
+ logprobs: list["_models.ResponseLogProb"],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_OUTPUT_TEXT_DONE # type: ignore
+
+
+class ResponseTextParam(_Model):
+ """Configuration options for a text response from the model. Can be plain
+ text or structured JSON data. Learn more:
+
+ * [Text inputs and outputs](/docs/guides/text)
+ * [Structured Outputs](/docs/guides/structured-outputs).
+
+ :ivar format:
+ :vartype format: ~azure.ai.agentserver.responses.models.models.TextResponseFormatConfiguration
+ :ivar verbosity: Is one of the following types: Literal["low"], Literal["medium"],
+ Literal["high"]
+ :vartype verbosity: str or str or str
+ """
+
+ format: Optional["_models.TextResponseFormatConfiguration"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ verbosity: Optional[Literal["low", "medium", "high"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Is one of the following types: Literal[\"low\"], Literal[\"medium\"], Literal[\"high\"]"""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ format: Optional["_models.TextResponseFormatConfiguration"] = None,
+ verbosity: Optional[Literal["low", "medium", "high"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseUsage(_Model):
+ """Represents token usage details including input tokens, output tokens, a breakdown of output
+ tokens, and the total tokens used.
+
+ :ivar input_tokens: The number of input tokens. Required.
+ :vartype input_tokens: int
+ :ivar input_tokens_details: A detailed breakdown of the input tokens. Required.
+ :vartype input_tokens_details:
+ ~azure.ai.agentserver.responses.models.models.ResponseUsageInputTokensDetails
+ :ivar output_tokens: The number of output tokens. Required.
+ :vartype output_tokens: int
+ :ivar output_tokens_details: A detailed breakdown of the output tokens. Required.
+ :vartype output_tokens_details:
+ ~azure.ai.agentserver.responses.models.models.ResponseUsageOutputTokensDetails
+ :ivar total_tokens: The total number of tokens used. Required.
+ :vartype total_tokens: int
+ """
+
+ input_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The number of input tokens. Required."""
+ input_tokens_details: "_models.ResponseUsageInputTokensDetails" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """A detailed breakdown of the input tokens. Required."""
+ output_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The number of output tokens. Required."""
+ output_tokens_details: "_models.ResponseUsageOutputTokensDetails" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """A detailed breakdown of the output tokens. Required."""
+ total_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The total number of tokens used. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ input_tokens: int,
+ input_tokens_details: "_models.ResponseUsageInputTokensDetails",
+ output_tokens: int,
+ output_tokens_details: "_models.ResponseUsageOutputTokensDetails",
+ total_tokens: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseUsageInputTokensDetails(_Model):
+ """ResponseUsageInputTokensDetails.
+
+ :ivar cached_tokens: Required.
+ :vartype cached_tokens: int
+ """
+
+ cached_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ cached_tokens: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseUsageOutputTokensDetails(_Model):
+ """ResponseUsageOutputTokensDetails.
+
+ :ivar reasoning_tokens: Required.
+ :vartype reasoning_tokens: int
+ """
+
+ reasoning_tokens: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ reasoning_tokens: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class ResponseWebSearchCallCompletedEvent(ResponseStreamEvent, discriminator="response.web_search_call.completed"):
+ """Emitted when a web search call is completed.
+
+ :ivar type: The type of the event. Always ``response.web_search_call.completed``. Required.
+ RESPONSE_WEB_SEARCH_CALL_COMPLETED.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_WEB_SEARCH_CALL_COMPLETED
+ :ivar output_index: The index of the output item that the web search call is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar item_id: Unique ID for the output item associated with the web search call. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of the web search call being processed. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_WEB_SEARCH_CALL_COMPLETED] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.web_search_call.completed``. Required.
+ RESPONSE_WEB_SEARCH_CALL_COMPLETED."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the web search call is associated with. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unique ID for the output item associated with the web search call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_WEB_SEARCH_CALL_COMPLETED # type: ignore
+
+
+class ResponseWebSearchCallInProgressEvent(ResponseStreamEvent, discriminator="response.web_search_call.in_progress"):
+ """Emitted when a web search call is initiated.
+
+ :ivar type: The type of the event. Always ``response.web_search_call.in_progress``. Required.
+ RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS
+ :ivar output_index: The index of the output item that the web search call is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar item_id: Unique ID for the output item associated with the web search call. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of the web search call being processed. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.web_search_call.in_progress``. Required.
+ RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the web search call is associated with. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unique ID for the output item associated with the web search call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS # type: ignore
+
+
+class ResponseWebSearchCallSearchingEvent(ResponseStreamEvent, discriminator="response.web_search_call.searching"):
+ """Emitted when a web search call is executing.
+
+ :ivar type: The type of the event. Always ``response.web_search_call.searching``. Required.
+ RESPONSE_WEB_SEARCH_CALL_SEARCHING.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.RESPONSE_WEB_SEARCH_CALL_SEARCHING
+ :ivar output_index: The index of the output item that the web search call is associated with.
+ Required.
+ :vartype output_index: int
+ :ivar item_id: Unique ID for the output item associated with the web search call. Required.
+ :vartype item_id: str
+ :ivar sequence_number: The sequence number of the web search call being processed. Required.
+ :vartype sequence_number: int
+ """
+
+ type: Literal[ResponseStreamEventType.RESPONSE_WEB_SEARCH_CALL_SEARCHING] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the event. Always ``response.web_search_call.searching``. Required.
+ RESPONSE_WEB_SEARCH_CALL_SEARCHING."""
+ output_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the output item that the web search call is associated with. Required."""
+ item_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unique ID for the output item associated with the web search call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output_index: int,
+ item_id: str,
+ sequence_number: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ResponseStreamEventType.RESPONSE_WEB_SEARCH_CALL_SEARCHING # type: ignore
+
+
+class ScreenshotParam(ComputerAction, discriminator="screenshot"):
+ """Screenshot.
+
+ :ivar type: Specifies the event type. For a screenshot action, this property is always set to
+ ``screenshot``. Required. SCREENSHOT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SCREENSHOT
+ """
+
+ type: Literal[ComputerActionType.SCREENSHOT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a screenshot action, this property is always set to
+ ``screenshot``. Required. SCREENSHOT."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.SCREENSHOT # type: ignore
+
+
+class ScrollParam(ComputerAction, discriminator="scroll"):
+ """Scroll.
+
+ :ivar type: Specifies the event type. For a scroll action, this property is always set to
+ ``scroll``. Required. SCROLL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SCROLL
+ :ivar x: The x-coordinate where the scroll occurred. Required.
+ :vartype x: int
+ :ivar y: The y-coordinate where the scroll occurred. Required.
+ :vartype y: int
+ :ivar scroll_x: The horizontal scroll distance. Required.
+ :vartype scroll_x: int
+ :ivar scroll_y: The vertical scroll distance. Required.
+ :vartype scroll_y: int
+ """
+
+ type: Literal[ComputerActionType.SCROLL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a scroll action, this property is always set to ``scroll``.
+ Required. SCROLL."""
+ x: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The x-coordinate where the scroll occurred. Required."""
+ y: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The y-coordinate where the scroll occurred. Required."""
+ scroll_x: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The horizontal scroll distance. Required."""
+ scroll_y: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The vertical scroll distance. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ x: int,
+ y: int,
+ scroll_x: int,
+ scroll_y: int,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.SCROLL # type: ignore
+
+
+class SharepointGroundingToolCall(OutputItem, discriminator="sharepoint_grounding_preview_call"):
+ """A SharePoint grounding tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. SHAREPOINT_GROUNDING_PREVIEW_CALL.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.SHAREPOINT_GROUNDING_PREVIEW_CALL
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar arguments: A JSON string of the arguments to pass to the tool. Required.
+ :vartype arguments: str
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.SHAREPOINT_GROUNDING_PREVIEW_CALL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. SHAREPOINT_GROUNDING_PREVIEW_CALL."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ arguments: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A JSON string of the arguments to pass to the tool. Required."""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ arguments: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.SHAREPOINT_GROUNDING_PREVIEW_CALL # type: ignore
+
+
+class SharepointGroundingToolCallOutput(OutputItem, discriminator="sharepoint_grounding_preview_call_output"):
+ """The output of a SharePoint grounding tool call.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT
+ :ivar call_id: The unique ID of the tool call generated by the model. Required.
+ :vartype call_id: str
+ :ivar output: The output from the SharePoint grounding tool call. Is one of the following
+ types: {str: Any}, str, [Any]
+ :vartype output: dict[str, any] or str or list[any]
+ :ivar status: The status of the tool call. Required. Known values are: "in_progress",
+ "completed", "incomplete", and "failed".
+ :vartype status: str or ~azure.ai.agentserver.responses.models.models.ToolCallStatus
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT."""
+ call_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The unique ID of the tool call generated by the model. Required."""
+ output: Optional["_types.ToolCallOutputContent"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The output from the SharePoint grounding tool call. Is one of the following types: {str: Any},
+ str, [Any]"""
+ status: Union[str, "_models.ToolCallStatus"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The status of the tool call. Required. Known values are: \"in_progress\", \"completed\",
+ \"incomplete\", and \"failed\"."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ call_id: str,
+ status: Union[str, "_models.ToolCallStatus"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ output: Optional["_types.ToolCallOutputContent"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.SHAREPOINT_GROUNDING_PREVIEW_CALL_OUTPUT # type: ignore
+
+
+class SharepointGroundingToolParameters(_Model):
+ """The sharepoint grounding tool parameters.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar project_connections: The project connections attached to this tool. There can be a
+ maximum of 1 connection resource attached to the tool.
+ :vartype project_connections:
+ list[~azure.ai.agentserver.responses.models.models.ToolProjectConnection]
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ project_connections: Optional[list["_models.ToolProjectConnection"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The project connections attached to this tool. There can be a maximum of 1 connection resource
+ attached to the tool."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ project_connections: Optional[list["_models.ToolProjectConnection"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class SharepointPreviewTool(Tool, discriminator="sharepoint_grounding_preview"):
+ """The input definition information for a sharepoint tool as used to configure an agent.
+
+ :ivar type: The object type, which is always 'sharepoint_grounding_preview'. Required.
+ SHAREPOINT_GROUNDING_PREVIEW.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.SHAREPOINT_GROUNDING_PREVIEW
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar sharepoint_grounding_preview: The sharepoint grounding tool parameters. Required.
+ :vartype sharepoint_grounding_preview:
+ ~azure.ai.agentserver.responses.models.models.SharepointGroundingToolParameters
+ """
+
+ type: Literal[ToolType.SHAREPOINT_GROUNDING_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'sharepoint_grounding_preview'. Required.
+ SHAREPOINT_GROUNDING_PREVIEW."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ sharepoint_grounding_preview: "_models.SharepointGroundingToolParameters" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The sharepoint grounding tool parameters. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ sharepoint_grounding_preview: "_models.SharepointGroundingToolParameters",
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.SHAREPOINT_GROUNDING_PREVIEW # type: ignore
+
+
+class SkillReferenceParam(ContainerSkill, discriminator="skill_reference"):
+ """SkillReferenceParam.
+
+ :ivar type: References a skill created with the /v1/skills endpoint. Required. SKILL_REFERENCE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SKILL_REFERENCE
+ :ivar skill_id: The ID of the referenced skill. Required.
+ :vartype skill_id: str
+ :ivar version: Optional skill version. Use a positive integer or 'latest'. Omit for default.
+ :vartype version: str
+ """
+
+ type: Literal[ContainerSkillType.SKILL_REFERENCE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """References a skill created with the /v1/skills endpoint. Required. SKILL_REFERENCE."""
+ skill_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the referenced skill. Required."""
+ version: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional skill version. Use a positive integer or 'latest'. Omit for default."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ skill_id: str,
+ version: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ContainerSkillType.SKILL_REFERENCE # type: ignore
+
+
+class ToolChoiceParam(_Model):
+ """How the model should select which tool (or tools) to use when generating a response. See the
+ ``tools`` parameter to see how to specify which tools the model can call.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ ToolChoiceAllowed, SpecificApplyPatchParam, ToolChoiceCodeInterpreter,
+ ToolChoiceComputerUsePreview, ToolChoiceCustom, ToolChoiceFileSearch, ToolChoiceFunction,
+ ToolChoiceImageGeneration, ToolChoiceMCP, SpecificFunctionShellParam,
+ ToolChoiceWebSearchPreview, ToolChoiceWebSearchPreview20250311
+
+ :ivar type: Required. Known values are: "allowed_tools", "function", "mcp", "custom",
+ "apply_patch", "shell", "file_search", "web_search_preview", "computer_use_preview",
+ "web_search_preview_2025_03_11", "image_generation", and "code_interpreter".
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ToolChoiceParamType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"allowed_tools\", \"function\", \"mcp\", \"custom\",
+ \"apply_patch\", \"shell\", \"file_search\", \"web_search_preview\", \"computer_use_preview\",
+ \"web_search_preview_2025_03_11\", \"image_generation\", and \"code_interpreter\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class SpecificApplyPatchParam(ToolChoiceParam, discriminator="apply_patch"):
+ """Specific apply patch tool choice.
+
+ :ivar type: The tool to call. Always ``apply_patch``. Required. APPLY_PATCH.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.APPLY_PATCH
+ """
+
+ type: Literal[ToolChoiceParamType.APPLY_PATCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The tool to call. Always ``apply_patch``. Required. APPLY_PATCH."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.APPLY_PATCH # type: ignore
+
+
+class SpecificFunctionShellParam(ToolChoiceParam, discriminator="shell"):
+ """Specific shell tool choice.
+
+ :ivar type: The tool to call. Always ``shell``. Required. SHELL.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SHELL
+ """
+
+ type: Literal[ToolChoiceParamType.SHELL] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The tool to call. Always ``shell``. Required. SHELL."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.SHELL # type: ignore
+
+
+class StructuredOutputDefinition(_Model):
+ """A structured output that can be produced by the agent.
+
+ :ivar name: The name of the structured output. Required.
+ :vartype name: str
+ :ivar description: A description of the output to emit. Used by the model to determine when to
+ emit the output. Required.
+ :vartype description: str
+ :ivar schema: The JSON schema for the structured output. Required.
+ :vartype schema: dict[str, any]
+ :ivar strict: Whether to enforce strict validation. Default ``true``. Required.
+ :vartype strict: bool
+ """
+
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the structured output. Required."""
+ description: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A description of the output to emit. Used by the model to determine when to emit the output.
+ Required."""
+ schema: dict[str, Any] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The JSON schema for the structured output. Required."""
+ strict: bool = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Whether to enforce strict validation. Default ``true``. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ description: str,
+ schema: dict[str, Any],
+ strict: bool,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class StructuredOutputsOutputItem(OutputItem, discriminator="structured_outputs"):
+ """StructuredOutputsOutputItem.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. STRUCTURED_OUTPUTS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.STRUCTURED_OUTPUTS
+ :ivar output: The structured output captured during the response. Required.
+ :vartype output: any
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.STRUCTURED_OUTPUTS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. STRUCTURED_OUTPUTS."""
+ output: Any = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The structured output captured during the response. Required."""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ output: Any,
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.STRUCTURED_OUTPUTS # type: ignore
+
+
+class SummaryTextContent(MessageContent, discriminator="summary_text"):
+ """Summary text.
+
+ :ivar type: The type of the object. Always ``summary_text``. Required. SUMMARY_TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.SUMMARY_TEXT
+ :ivar text: A summary of the reasoning output from the model so far. Required.
+ :vartype text: str
+ """
+
+ type: Literal[MessageContentType.SUMMARY_TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the object. Always ``summary_text``. Required. SUMMARY_TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A summary of the reasoning output from the model so far. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.SUMMARY_TEXT # type: ignore
+
+
+class TextContent(MessageContent, discriminator="text"):
+ """Text Content.
+
+ :ivar type: Required. TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.TEXT
+ :ivar text: Required.
+ :vartype text: str
+ """
+
+ type: Literal[MessageContentType.TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. TEXT."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = MessageContentType.TEXT # type: ignore
+
+
+class TextResponseFormatConfiguration(_Model):
+ """An object specifying the format that the model must output. Configuring ``{ "type":
+ "json_schema" }`` enables Structured Outputs, which ensures the model will match your supplied
+ JSON schema. Learn more in the `Structured Outputs guide `_.
+ The default format is ``{ "type": "text" }`` with no additional options. *Not recommended for
+ gpt-4o and newer models:** Setting to ``{ "type": "json_object" }`` enables the older JSON
+ mode, which ensures the message the model generates is valid JSON. Using ``json_schema`` is
+ preferred for models that support it.
+
+ You probably want to use the sub-classes and not this class directly. Known sub-classes are:
+ TextResponseFormatConfigurationResponseFormatJsonObject, TextResponseFormatJsonSchema,
+ TextResponseFormatConfigurationResponseFormatText
+
+ :ivar type: Required. Known values are: "text", "json_schema", and "json_object".
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.TextResponseFormatConfigurationType
+ """
+
+ __mapping__: dict[str, _Model] = {}
+ type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"])
+ """Required. Known values are: \"text\", \"json_schema\", and \"json_object\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ type: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class TextResponseFormatConfigurationResponseFormatJsonObject(
+ TextResponseFormatConfiguration, discriminator="json_object"
+): # pylint: disable=name-too-long
+ """JSON object.
+
+ :ivar type: The type of response format being defined. Always ``json_object``. Required.
+ JSON_OBJECT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.JSON_OBJECT
+ """
+
+ type: Literal[TextResponseFormatConfigurationType.JSON_OBJECT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of response format being defined. Always ``json_object``. Required. JSON_OBJECT."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = TextResponseFormatConfigurationType.JSON_OBJECT # type: ignore
+
+
+class TextResponseFormatConfigurationResponseFormatText(
+ TextResponseFormatConfiguration, discriminator="text"
+): # pylint: disable=name-too-long
+ """Text.
+
+ :ivar type: The type of response format being defined. Always ``text``. Required. TEXT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.TEXT
+ """
+
+ type: Literal[TextResponseFormatConfigurationType.TEXT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of response format being defined. Always ``text``. Required. TEXT."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = TextResponseFormatConfigurationType.TEXT # type: ignore
+
+
+class TextResponseFormatJsonSchema(TextResponseFormatConfiguration, discriminator="json_schema"):
+ """JSON schema.
+
+ :ivar type: The type of response format being defined. Always ``json_schema``. Required.
+ JSON_SCHEMA.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.JSON_SCHEMA
+ :ivar description: A description of what the response format is for, used by the model to
+ determine how to respond in the format.
+ :vartype description: str
+ :ivar name: The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and
+ dashes, with a maximum length of 64. Required.
+ :vartype name: str
+ :ivar schema: Required.
+ :vartype schema: ~azure.ai.agentserver.responses.models.models.ResponseFormatJsonSchemaSchema
+ :ivar strict:
+ :vartype strict: bool
+ """
+
+ type: Literal[TextResponseFormatConfigurationType.JSON_SCHEMA] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of response format being defined. Always ``json_schema``. Required. JSON_SCHEMA."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A description of what the response format is for, used by the model to determine how to respond
+ in the format."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with
+ a maximum length of 64. Required."""
+ schema: "_models.ResponseFormatJsonSchemaSchema" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Required."""
+ strict: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ schema: "_models.ResponseFormatJsonSchemaSchema",
+ description: Optional[str] = None,
+ strict: Optional[bool] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = TextResponseFormatConfigurationType.JSON_SCHEMA # type: ignore
+
+
+class ToolChoiceAllowed(ToolChoiceParam, discriminator="allowed_tools"):
+ """Allowed tools.
+
+ :ivar type: Allowed tool configuration type. Always ``allowed_tools``. Required. ALLOWED_TOOLS.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.ALLOWED_TOOLS
+ :ivar mode: Constrains the tools available to the model to a pre-defined set. ``auto`` allows
+ the model to pick from among the allowed tools and generate a message. ``required`` requires
+ the model to call one or more of the allowed tools. Required. Is either a Literal["auto"] type
+ or a Literal["required"] type.
+ :vartype mode: str or str
+ :ivar tools: A list of tool definitions that the model should be allowed to call. For the
+ Responses API, the list of tool definitions might look like:
+
+ .. code-block:: json
+
+ [
+ { "type": "function", "name": "get_weather" },
+ { "type": "mcp", "server_label": "deepwiki" },
+ { "type": "image_generation" }
+ ]. Required.
+ :vartype tools: list[dict[str, any]]
+ """
+
+ type: Literal[ToolChoiceParamType.ALLOWED_TOOLS] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Allowed tool configuration type. Always ``allowed_tools``. Required. ALLOWED_TOOLS."""
+ mode: Literal["auto", "required"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Constrains the tools available to the model to a pre-defined set. ``auto`` allows the model to
+ pick from among the allowed tools and generate a message. ``required`` requires the model to
+ call one or more of the allowed tools. Required. Is either a Literal[\"auto\"] type or a
+ Literal[\"required\"] type."""
+ tools: list[dict[str, Any]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A list of tool definitions that the model should be allowed to call. For the Responses API, the
+ list of tool definitions might look like:
+
+ .. code-block:: json
+
+ [
+ { \"type\": \"function\", \"name\": \"get_weather\" },
+ { \"type\": \"mcp\", \"server_label\": \"deepwiki\" },
+ { \"type\": \"image_generation\" }
+ ]. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ mode: Literal["auto", "required"],
+ tools: list[dict[str, Any]],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.ALLOWED_TOOLS # type: ignore
+
+
+class ToolChoiceCodeInterpreter(ToolChoiceParam, discriminator="code_interpreter"):
+ """Indicates that the model should use a built-in tool to generate a response. `Learn more about
+ built-in tools `_.
+
+ :ivar type: Required. CODE_INTERPRETER.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CODE_INTERPRETER
+ """
+
+ type: Literal[ToolChoiceParamType.CODE_INTERPRETER] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. CODE_INTERPRETER."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.CODE_INTERPRETER # type: ignore
+
+
+class ToolChoiceComputerUsePreview(ToolChoiceParam, discriminator="computer_use_preview"):
+ """Indicates that the model should use a built-in tool to generate a response. `Learn more about
+ built-in tools `_.
+
+ :ivar type: Required. COMPUTER_USE_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.COMPUTER_USE_PREVIEW
+ """
+
+ type: Literal[ToolChoiceParamType.COMPUTER_USE_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. COMPUTER_USE_PREVIEW."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.COMPUTER_USE_PREVIEW # type: ignore
+
+
+class ToolChoiceCustom(ToolChoiceParam, discriminator="custom"):
+ """Custom tool.
+
+ :ivar type: For custom tool calling, the type is always ``custom``. Required. CUSTOM.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.CUSTOM
+ :ivar name: The name of the custom tool to call. Required.
+ :vartype name: str
+ """
+
+ type: Literal[ToolChoiceParamType.CUSTOM] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """For custom tool calling, the type is always ``custom``. Required. CUSTOM."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the custom tool to call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.CUSTOM # type: ignore
+
+
+class ToolChoiceFileSearch(ToolChoiceParam, discriminator="file_search"):
+ """Indicates that the model should use a built-in tool to generate a response. `Learn more about
+ built-in tools `_.
+
+ :ivar type: Required. FILE_SEARCH.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FILE_SEARCH
+ """
+
+ type: Literal[ToolChoiceParamType.FILE_SEARCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. FILE_SEARCH."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.FILE_SEARCH # type: ignore
+
+
+class ToolChoiceFunction(ToolChoiceParam, discriminator="function"):
+ """Function tool.
+
+ :ivar type: For function calling, the type is always ``function``. Required. FUNCTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.FUNCTION
+ :ivar name: The name of the function to call. Required.
+ :vartype name: str
+ """
+
+ type: Literal[ToolChoiceParamType.FUNCTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """For function calling, the type is always ``function``. Required. FUNCTION."""
+ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The name of the function to call. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ name: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.FUNCTION # type: ignore
+
+
+class ToolChoiceImageGeneration(ToolChoiceParam, discriminator="image_generation"):
+ """Indicates that the model should use a built-in tool to generate a response. `Learn more about
+ built-in tools `_.
+
+ :ivar type: Required. IMAGE_GENERATION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.IMAGE_GENERATION
+ """
+
+ type: Literal[ToolChoiceParamType.IMAGE_GENERATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. IMAGE_GENERATION."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.IMAGE_GENERATION # type: ignore
+
+
+class ToolChoiceMCP(ToolChoiceParam, discriminator="mcp"):
+ """MCP tool.
+
+ :ivar type: For MCP tools, the type is always ``mcp``. Required. MCP.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.MCP
+ :ivar server_label: The label of the MCP server to use. Required.
+ :vartype server_label: str
+ :ivar name:
+ :vartype name: str
+ """
+
+ type: Literal[ToolChoiceParamType.MCP] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """For MCP tools, the type is always ``mcp``. Required. MCP."""
+ server_label: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The label of the MCP server to use. Required."""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ server_label: str,
+ name: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.MCP # type: ignore
+
+
+class ToolChoiceWebSearchPreview(ToolChoiceParam, discriminator="web_search_preview"):
+ """Indicates that the model should use a built-in tool to generate a response. `Learn more about
+ built-in tools `_.
+
+ :ivar type: Required. WEB_SEARCH_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WEB_SEARCH_PREVIEW
+ """
+
+ type: Literal[ToolChoiceParamType.WEB_SEARCH_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. WEB_SEARCH_PREVIEW."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.WEB_SEARCH_PREVIEW # type: ignore
+
+
+class ToolChoiceWebSearchPreview20250311(ToolChoiceParam, discriminator="web_search_preview_2025_03_11"):
+ """Indicates that the model should use a built-in tool to generate a response. `Learn more about
+ built-in tools `_.
+
+ :ivar type: Required. WEB_SEARCH_PREVIEW2025_03_11.
+ :vartype type: str or
+ ~azure.ai.agentserver.responses.models.models.WEB_SEARCH_PREVIEW2025_03_11
+ """
+
+ type: Literal[ToolChoiceParamType.WEB_SEARCH_PREVIEW2025_03_11] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. WEB_SEARCH_PREVIEW2025_03_11."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolChoiceParamType.WEB_SEARCH_PREVIEW2025_03_11 # type: ignore
+
+
+class ToolProjectConnection(_Model):
+ """A project connection resource.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar project_connection_id: A project connection in a ToolProjectConnectionList attached to
+ this tool. Required.
+ :vartype project_connection_id: str
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """A project connection in a ToolProjectConnectionList attached to this tool. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: str,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class TopLogProb(_Model):
+ """Top log probability.
+
+ :ivar token: Required.
+ :vartype token: str
+ :ivar logprob: Required.
+ :vartype logprob: int
+ :ivar bytes: Required.
+ :vartype bytes: list[int]
+ """
+
+ token: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ logprob: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+ bytes: list[int] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ token: str,
+ logprob: int,
+ bytes: list[int],
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class TypeParam(ComputerAction, discriminator="type"):
+ """Type.
+
+ :ivar type: Specifies the event type. For a type action, this property is always set to
+ ``type``. Required. TYPE.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.TYPE
+ :ivar text: The text to type. Required.
+ :vartype text: str
+ """
+
+ type: Literal[ComputerActionType.TYPE] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a type action, this property is always set to ``type``. Required.
+ TYPE."""
+ text: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The text to type. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ text: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.TYPE # type: ignore
+
+
+class UrlCitationBody(Annotation, discriminator="url_citation"):
+ """URL citation.
+
+ :ivar type: The type of the URL citation. Always ``url_citation``. Required. URL_CITATION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.URL_CITATION
+ :ivar url: The URL of the web resource. Required.
+ :vartype url: str
+ :ivar start_index: The index of the first character of the URL citation in the message.
+ Required.
+ :vartype start_index: int
+ :ivar end_index: The index of the last character of the URL citation in the message. Required.
+ :vartype end_index: int
+ :ivar title: The title of the web resource. Required.
+ :vartype title: str
+ """
+
+ type: Literal[AnnotationType.URL_CITATION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the URL citation. Always ``url_citation``. Required. URL_CITATION."""
+ url: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL of the web resource. Required."""
+ start_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the first character of the URL citation in the message. Required."""
+ end_index: int = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The index of the last character of the URL citation in the message. Required."""
+ title: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The title of the web resource. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ url: str,
+ start_index: int,
+ end_index: int,
+ title: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = AnnotationType.URL_CITATION # type: ignore
+
+
+class UserProfileMemoryItem(MemoryItem, discriminator="user_profile"):
+ """A memory item specifically containing user profile information extracted from conversations,
+ such as preferences, interests, and personal details.
+
+ :ivar memory_id: The unique ID of the memory item. Required.
+ :vartype memory_id: str
+ :ivar updated_at: The last update time of the memory item. Required.
+ :vartype updated_at: ~datetime.datetime
+ :ivar scope: The namespace that logically groups and isolates memories, such as a user ID.
+ Required.
+ :vartype scope: str
+ :ivar content: The content of the memory. Required.
+ :vartype content: str
+ :ivar kind: The kind of the memory item. Required. User profile information extracted from
+ conversations.
+ :vartype kind: str or ~azure.ai.agentserver.responses.models.models.USER_PROFILE
+ """
+
+ kind: Literal[MemoryItemKind.USER_PROFILE] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The kind of the memory item. Required. User profile information extracted from conversations."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ memory_id: str,
+ updated_at: datetime.datetime,
+ scope: str,
+ content: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.kind = MemoryItemKind.USER_PROFILE # type: ignore
+
+
+class VectorStoreFileAttributes(_Model):
+ """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing
+ additional information about the object in a structured format, and querying for objects via
+ API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are
+ strings with a maximum length of 512 characters, booleans, or numbers.
+
+ """
+
+
+class WaitParam(ComputerAction, discriminator="wait"):
+ """Wait.
+
+ :ivar type: Specifies the event type. For a wait action, this property is always set to
+ ``wait``. Required. WAIT.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WAIT
+ """
+
+ type: Literal[ComputerActionType.WAIT] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Specifies the event type. For a wait action, this property is always set to ``wait``. Required.
+ WAIT."""
+
+ @overload
+ def __init__(
+ self,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ComputerActionType.WAIT # type: ignore
+
+
+class WebSearchActionFind(_Model):
+ """Find action.
+
+ :ivar type: The action type. Required. Default value is "find_in_page".
+ :vartype type: str
+ :ivar url: The URL of the page searched for the pattern. Required.
+ :vartype url: str
+ :ivar pattern: The pattern or text to search for within the page. Required.
+ :vartype pattern: str
+ """
+
+ type: Literal["find_in_page"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The action type. Required. Default value is \"find_in_page\"."""
+ url: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL of the page searched for the pattern. Required."""
+ pattern: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The pattern or text to search for within the page. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ url: str,
+ pattern: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["find_in_page"] = "find_in_page"
+
+
+class WebSearchActionOpenPage(_Model):
+ """Open page action.
+
+ :ivar type: The action type. Required. Default value is "open_page".
+ :vartype type: str
+ :ivar url: The URL opened by the model.
+ :vartype url: str
+ """
+
+ type: Literal["open_page"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The action type. Required. Default value is \"open_page\"."""
+ url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The URL opened by the model."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ url: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["open_page"] = "open_page"
+
+
+class WebSearchActionSearch(_Model):
+ """Search action.
+
+ :ivar type: The action type. Required. Default value is "search".
+ :vartype type: str
+ :ivar query: [DEPRECATED] The search query. Required.
+ :vartype query: str
+ :ivar queries: Search queries.
+ :vartype queries: list[str]
+ :ivar sources: Web search sources.
+ :vartype sources:
+ list[~azure.ai.agentserver.responses.models.models.WebSearchActionSearchSources]
+ """
+
+ type: Literal["search"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The action type. Required. Default value is \"search\"."""
+ query: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """[DEPRECATED] The search query. Required."""
+ queries: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Search queries."""
+ sources: Optional[list["_models.WebSearchActionSearchSources"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Web search sources."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ query: str,
+ queries: Optional[list[str]] = None,
+ sources: Optional[list["_models.WebSearchActionSearchSources"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["search"] = "search"
+
+
+class WebSearchActionSearchSources(_Model):
+ """WebSearchActionSearchSources.
+
+ :ivar type: Required. Default value is "url".
+ :vartype type: str
+ :ivar url: Required.
+ :vartype url: str
+ """
+
+ type: Literal["url"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required. Default value is \"url\"."""
+ url: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ url: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["url"] = "url"
+
+
+class WebSearchApproximateLocation(_Model):
+ """Web search approximate location.
+
+ :ivar type: The type of location approximation. Always ``approximate``. Required. Default value
+ is "approximate".
+ :vartype type: str
+ :ivar country:
+ :vartype country: str
+ :ivar region:
+ :vartype region: str
+ :ivar city:
+ :vartype city: str
+ :ivar timezone:
+ :vartype timezone: str
+ """
+
+ type: Literal["approximate"] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The type of location approximation. Always ``approximate``. Required. Default value is
+ \"approximate\"."""
+ country: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ region: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ city: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ timezone: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ country: Optional[str] = None,
+ region: Optional[str] = None,
+ city: Optional[str] = None,
+ timezone: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type: Literal["approximate"] = "approximate"
+
+
+class WebSearchConfiguration(_Model):
+ """A web search configuration for bing custom search.
+
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar project_connection_id: Project connection id for grounding with bing custom search.
+ Required.
+ :vartype project_connection_id: str
+ :ivar instance_name: Name of the custom configuration instance given to config. Required.
+ :vartype instance_name: str
+ """
+
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Project connection id for grounding with bing custom search. Required."""
+ instance_name: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Name of the custom configuration instance given to config. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: str,
+ instance_name: str,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class WebSearchPreviewTool(Tool, discriminator="web_search_preview"):
+ """Web search preview.
+
+ :ivar type: The type of the web search tool. One of ``web_search_preview`` or
+ ``web_search_preview_2025_03_11``. Required. WEB_SEARCH_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WEB_SEARCH_PREVIEW
+ :ivar user_location:
+ :vartype user_location: ~azure.ai.agentserver.responses.models.models.ApproximateLocation
+ :ivar search_context_size: High level guidance for the amount of context window space to use
+ for the search. One of ``low``, ``medium``, or ``high``. ``medium`` is the default. Known
+ values are: "low", "medium", and "high".
+ :vartype search_context_size: str or
+ ~azure.ai.agentserver.responses.models.models.SearchContextSize
+ """
+
+ type: Literal[ToolType.WEB_SEARCH_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the web search tool. One of ``web_search_preview`` or
+ ``web_search_preview_2025_03_11``. Required. WEB_SEARCH_PREVIEW."""
+ user_location: Optional["_models.ApproximateLocation"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ search_context_size: Optional[Union[str, "_models.SearchContextSize"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """High level guidance for the amount of context window space to use for the search. One of
+ ``low``, ``medium``, or ``high``. ``medium`` is the default. Known values are: \"low\",
+ \"medium\", and \"high\"."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ user_location: Optional["_models.ApproximateLocation"] = None,
+ search_context_size: Optional[Union[str, "_models.SearchContextSize"]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.WEB_SEARCH_PREVIEW # type: ignore
+
+
+class WebSearchTool(Tool, discriminator="web_search"):
+ """Web search.
+
+ :ivar type: The type of the web search tool. One of ``web_search`` or
+ ``web_search_2025_08_26``. Required. WEB_SEARCH.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WEB_SEARCH
+ :ivar filters:
+ :vartype filters: ~azure.ai.agentserver.responses.models.models.WebSearchToolFilters
+ :ivar user_location:
+ :vartype user_location:
+ ~azure.ai.agentserver.responses.models.models.WebSearchApproximateLocation
+ :ivar search_context_size: High level guidance for the amount of context window space to use
+ for the search. One of ``low``, ``medium``, or ``high``. ``medium`` is the default. Is one of
+ the following types: Literal["low"], Literal["medium"], Literal["high"]
+ :vartype search_context_size: str or str or str
+ :ivar name: Optional user-defined name for this tool or configuration.
+ :vartype name: str
+ :ivar description: Optional user-defined description for this tool or configuration.
+ :vartype description: str
+ :ivar custom_search_configuration: The project connections attached to this tool. There can be
+ a maximum of 1 connection resource attached to the tool.
+ :vartype custom_search_configuration:
+ ~azure.ai.agentserver.responses.models.models.WebSearchConfiguration
+ """
+
+ type: Literal[ToolType.WEB_SEARCH] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The type of the web search tool. One of ``web_search`` or ``web_search_2025_08_26``. Required.
+ WEB_SEARCH."""
+ filters: Optional["_models.WebSearchToolFilters"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ user_location: Optional["_models.WebSearchApproximateLocation"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ search_context_size: Optional[Literal["low", "medium", "high"]] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """High level guidance for the amount of context window space to use for the search. One of
+ ``low``, ``medium``, or ``high``. ``medium`` is the default. Is one of the following types:
+ Literal[\"low\"], Literal[\"medium\"], Literal[\"high\"]"""
+ name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined name for this tool or configuration."""
+ description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Optional user-defined description for this tool or configuration."""
+ custom_search_configuration: Optional["_models.WebSearchConfiguration"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The project connections attached to this tool. There can be a maximum of 1 connection resource
+ attached to the tool."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ filters: Optional["_models.WebSearchToolFilters"] = None,
+ user_location: Optional["_models.WebSearchApproximateLocation"] = None,
+ search_context_size: Optional[Literal["low", "medium", "high"]] = None,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ custom_search_configuration: Optional["_models.WebSearchConfiguration"] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.WEB_SEARCH # type: ignore
+
+
+class WebSearchToolFilters(_Model):
+ """WebSearchToolFilters.
+
+ :ivar allowed_domains:
+ :vartype allowed_domains: list[str]
+ """
+
+ allowed_domains: Optional[list[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+
+ @overload
+ def __init__(
+ self,
+ *,
+ allowed_domains: Optional[list[str]] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+
+class WorkflowActionOutputItem(OutputItem, discriminator="workflow_action"):
+ """WorkflowActionOutputItem.
+
+ :ivar agent_reference: The agent that created the item.
+ :vartype agent_reference: ~azure.ai.agentserver.responses.models.models.AgentReference
+ :ivar response_id: The response on which the item is created.
+ :vartype response_id: str
+ :ivar type: Required. WORKFLOW_ACTION.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WORKFLOW_ACTION
+ :ivar kind: The kind of CSDL action (e.g., 'SetVariable', 'InvokeAzureAgent'). Required.
+ :vartype kind: str
+ :ivar action_id: Unique identifier for the action. Required.
+ :vartype action_id: str
+ :ivar parent_action_id: ID of the parent action if this is a nested action.
+ :vartype parent_action_id: str
+ :ivar previous_action_id: ID of the previous action if this action follows another.
+ :vartype previous_action_id: str
+ :ivar status: Status of the action (e.g., 'in_progress', 'completed', 'failed', 'cancelled').
+ Required. Is one of the following types: Literal["completed"], Literal["failed"],
+ Literal["in_progress"], Literal["cancelled"]
+ :vartype status: str or str or str or str
+ :ivar id: Required.
+ :vartype id: str
+ """
+
+ type: Literal[OutputItemType.WORKFLOW_ACTION] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """Required. WORKFLOW_ACTION."""
+ kind: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The kind of CSDL action (e.g., 'SetVariable', 'InvokeAzureAgent'). Required."""
+ action_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Unique identifier for the action. Required."""
+ parent_action_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """ID of the parent action if this is a nested action."""
+ previous_action_id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """ID of the previous action if this action follows another."""
+ status: Literal["completed", "failed", "in_progress", "cancelled"] = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """Status of the action (e.g., 'in_progress', 'completed', 'failed', 'cancelled'). Required. Is
+ one of the following types: Literal[\"completed\"], Literal[\"failed\"],
+ Literal[\"in_progress\"], Literal[\"cancelled\"]"""
+ id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ kind: str,
+ action_id: str,
+ status: Literal["completed", "failed", "in_progress", "cancelled"],
+ id: str, # pylint: disable=redefined-builtin
+ agent_reference: Optional["_models.AgentReference"] = None,
+ response_id: Optional[str] = None,
+ parent_action_id: Optional[str] = None,
+ previous_action_id: Optional[str] = None,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = OutputItemType.WORKFLOW_ACTION # type: ignore
+
+
+class WorkIQPreviewTool(Tool, discriminator="work_iq_preview"):
+ """A WorkIQ server-side tool.
+
+ :ivar type: The object type, which is always 'work_iq_preview'. Required. WORK_IQ_PREVIEW.
+ :vartype type: str or ~azure.ai.agentserver.responses.models.models.WORK_IQ_PREVIEW
+ :ivar work_iq_preview: The WorkIQ tool parameters. Required.
+ :vartype work_iq_preview:
+ ~azure.ai.agentserver.responses.models.models.WorkIQPreviewToolParameters
+ """
+
+ type: Literal[ToolType.WORK_IQ_PREVIEW] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore
+ """The object type, which is always 'work_iq_preview'. Required. WORK_IQ_PREVIEW."""
+ work_iq_preview: "_models.WorkIQPreviewToolParameters" = rest_field(
+ visibility=["read", "create", "update", "delete", "query"]
+ )
+ """The WorkIQ tool parameters. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ work_iq_preview: "_models.WorkIQPreviewToolParameters",
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.type = ToolType.WORK_IQ_PREVIEW # type: ignore
+
+
+class WorkIQPreviewToolParameters(_Model):
+ """The WorkIQ tool parameters.
+
+ :ivar project_connection_id: The ID of the WorkIQ project connection. Required.
+ :vartype project_connection_id: str
+ """
+
+ project_connection_id: str = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """The ID of the WorkIQ project connection. Required."""
+
+ @overload
+ def __init__(
+ self,
+ *,
+ project_connection_id: str,
+ ) -> None: ...
+
+ @overload
+ def __init__(self, mapping: Mapping[str, Any]) -> None:
+ """
+ :param mapping: raw JSON to initialize the model.
+ :type mapping: Mapping[str, Any]
+ """
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_patch.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_patch.py
new file mode 100644
index 000000000000..832173773657
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/models/_patch.py
@@ -0,0 +1,88 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Hand-written customizations injected into the generated models package.
+
+This file is copied over the generated ``_patch.py`` inside
+``sdk/models/models/`` by ``make generate-models``. Anything listed in
+``__all__`` is automatically re-exported by the generated ``__init__.py``,
+shadowing the generated class of the same name.
+
+Approach follows the official customization guide:
+https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+
+from enum import Enum
+from typing import Optional
+
+from azure.core import CaseInsensitiveEnumMeta
+from .._utils.model_base import rest_field
+from ._models import CreateResponse as CreateResponseGenerated
+from ._models import ResponseObject as ResponseObjectGenerated
+
+
+class ResponseIncompleteReason(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Reason a response finished as incomplete.
+
+ The upstream TypeSpec defines this as an inline literal union
+ (``"max_output_tokens" | "content_filter"``), so the code generator
+ emits ``Literal[...]`` instead of a named enum. This hand-written
+ enum provides a friendlier symbolic constant for SDK consumers.
+ """
+
+ MAX_OUTPUT_TOKENS = "max_output_tokens"
+ """The response was cut short because the maximum output token limit was reached."""
+ CONTENT_FILTER = "content_filter"
+ """The response was cut short because of a content filter."""
+
+
+# ---------------------------------------------------------------------------
+# Fix temperature / top_p types: numeric → float (emitter bug workaround)
+#
+# The upstream TypeSpec defines temperature and top_p as ``numeric | null``
+# (the abstract base scalar for all numbers). The C# emitter correctly
+# maps this to ``double?`` but @azure-tools/typespec-python@0.61.2 maps
+# ``numeric`` → ``int``. The OpenAPI 3 spec emits ``type: number``
+# (i.e. float), so ``int`` is wrong.
+#
+# Per the official customization guide we subclass the generated models and
+# re-declare the affected fields with the correct type. The generated
+# ``__init__.py`` picks up these subclasses via ``from ._patch import *``
+# which shadows the generated names.
+# ---------------------------------------------------------------------------
+
+
+class CreateResponse(CreateResponseGenerated):
+ """Override generated ``CreateResponse`` to correct temperature/top_p types."""
+
+ temperature: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Sampling temperature. Float between 0 and 2."""
+ top_p: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Nucleus sampling parameter. Float between 0 and 1."""
+
+
+class ResponseObject(ResponseObjectGenerated):
+ """Override generated ``ResponseObject`` to correct temperature/top_p types."""
+
+ temperature: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Sampling temperature. Float between 0 and 2."""
+ top_p: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Nucleus sampling parameter. Float between 0 and 1."""
+
+
+__all__: list[str] = [
+ "ResponseIncompleteReason",
+ "CreateResponse",
+ "ResponseObject",
+]
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/py.typed b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/py.typed
new file mode 100644
index 000000000000..e5aff4f83af8
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_generated/sdk/models/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561.
\ No newline at end of file
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_helpers.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_helpers.py
new file mode 100644
index 000000000000..cbc8892c5ab2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/_helpers.py
@@ -0,0 +1,390 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Helper functions for CreateResponse and Response model expansion."""
+
+from __future__ import annotations
+
+from typing import Any, Optional
+
+from ._generated import (
+ ConversationParam_2,
+ CreateResponse,
+ Item,
+ ItemMessage,
+ MessageContent,
+ MessageContentInputTextContent,
+ MessageRole,
+ OutputItem,
+ ResponseObject,
+ ToolChoiceAllowed,
+ ToolChoiceOptions,
+ ToolChoiceParam,
+)
+from ._generated.sdk.models._utils.model_base import _deserialize
+
+# ---------------------------------------------------------------------------
+# Internal utilities for dict-safe field access
+# ---------------------------------------------------------------------------
+
+
+def _get_field(obj: Any, field: str, default: Any = None) -> Any:
+ """Get *field* from a model instance or a plain dict."""
+ if isinstance(obj, dict):
+ return obj.get(field, default)
+ return getattr(obj, field, default)
+
+
+def _is_type(obj: Any, model_cls: type, type_value: str) -> bool:
+ """Check whether *obj* is *model_cls* or a dict with matching ``type``."""
+ if isinstance(obj, model_cls):
+ return True
+ if isinstance(obj, dict):
+ return obj.get("type") == type_value
+ return False
+
+
+# ---------------------------------------------------------------------------
+# CreateResponse helpers
+# ---------------------------------------------------------------------------
+
+
+def get_conversation_id(request: CreateResponse) -> Optional[str]:
+ """Extract conversation ID from ``CreateResponse.conversation``.
+
+ If conversation is a plain string, returns it directly.
+ If it is a :class:`ConversationParam_2` object, returns its ``id`` field.
+
+ :param request: The create-response request.
+ :type request: CreateResponse
+ :returns: The conversation ID, or ``None`` if no conversation is set.
+ :rtype: str | None
+ """
+ conv = request.conversation
+ if conv is None:
+ return None
+ if isinstance(conv, str):
+ return conv or None
+ # Model instance or plain dict
+ cid = _get_field(conv, "id")
+ return str(cid) if cid else None
+
+
+def get_input_expanded(request: CreateResponse) -> list[Item]:
+ """Normalize ``CreateResponse.input`` into a list of :class:`Item`.
+
+ - If input is ``None``, returns ``[]``.
+ - If input is a string, wraps it as a single :class:`ItemMessage` with
+ ``role=user`` and :class:`MessageContentInputTextContent`.
+ - If input is already a list, each element is deserialized into the appropriate
+ :class:`Item` subclass (e.g., :class:`ItemMessage`, :class:`FunctionCallOutputItemParam`).
+
+ :param request: The create-response request.
+ :type request: CreateResponse
+ :returns: A list of typed input items.
+ :rtype: list[Item]
+ """
+ inp = request.input
+ if inp is None:
+ return []
+ if isinstance(inp, str):
+ return [
+ ItemMessage(
+ role=MessageRole.USER,
+ content=[MessageContentInputTextContent(text=inp)],
+ )
+ ]
+ # Normalize items: per the OpenAI spec, items without an explicit
+ # ``type`` default to ``"message"`` (C-MSG-01 compliance).
+ items: list[Item] = []
+ for raw in inp:
+ d = dict(raw) if isinstance(raw, dict) else raw
+ if isinstance(d, dict) and "type" not in d:
+ d = {**d, "type": "message"}
+ if isinstance(d, Item):
+ items.append(d)
+ else:
+ items.append(_deserialize(Item, d))
+ return items
+
+
+def _get_input_text(request: CreateResponse) -> str:
+ """Extract all text content from ``CreateResponse.input`` as a single string.
+
+ Internal helper — callers should use :meth:`ResponseContext.get_input_text`
+ instead, which handles item-reference resolution.
+
+ :param request: The create-response request.
+ :type request: CreateResponse
+ :returns: The combined text content, or ``""`` if no text found.
+ :rtype: str
+ """
+ items = get_input_expanded(request)
+ texts: list[str] = []
+ for item in items:
+ if _is_type(item, ItemMessage, "message"):
+ for part in _get_field(item, "content") or []:
+ if _is_type(part, MessageContentInputTextContent, "input_text"):
+ text = _get_field(part, "text")
+ if text is not None:
+ texts.append(text)
+ return "\n".join(texts)
+
+
+def get_tool_choice_expanded(request: CreateResponse) -> Optional[ToolChoiceParam]:
+ """Expand ``CreateResponse.tool_choice`` into a typed :class:`ToolChoiceParam`.
+
+ String shorthands (``"auto"``, ``"required"``) are expanded to
+ :class:`ToolChoiceAllowed` with the corresponding mode.
+ ``"none"`` returns ``None``.
+
+ :param request: The create-response request.
+ :type request: CreateResponse
+ :returns: The typed tool choice, or ``None`` if unset or ``"none"``.
+ :rtype: ToolChoiceParam | None
+ :raises ValueError: If the tool_choice value is an unrecognized string.
+ """
+ tc = request.tool_choice
+ if tc is None:
+ return None
+ if isinstance(tc, ToolChoiceParam):
+ return tc
+ if isinstance(tc, str):
+ normalized = tc if not isinstance(tc, ToolChoiceOptions) else tc.value
+ if normalized in ("auto", "required"):
+ return ToolChoiceAllowed(mode=normalized, tools=[])
+ if normalized == "none":
+ return None
+ raise ValueError(
+ f"Unrecognized tool_choice string value: '{normalized}'. Expected 'auto', 'required', or 'none'."
+ )
+ # dict fallback — wrap in ToolChoiceParam if it has a "type" key
+ if isinstance(tc, dict) and "type" in tc:
+ return ToolChoiceParam(tc)
+ return None
+
+
+def get_conversation_expanded(request: CreateResponse) -> Optional[ConversationParam_2]:
+ """Expand ``CreateResponse.conversation`` into a typed :class:`ConversationParam_2`.
+
+ A plain string is treated as the conversation ID.
+
+ :param request: The create-response request.
+ :type request: CreateResponse
+ :returns: The typed conversation parameter, or ``None``.
+ :rtype: ConversationParam_2 | None
+ """
+ conv = request.conversation
+ if conv is None:
+ return None
+ if isinstance(conv, ConversationParam_2):
+ return conv
+ if isinstance(conv, str):
+ return ConversationParam_2(id=conv) if conv else None
+ # dict fallback
+ if isinstance(conv, dict):
+ cid = conv.get("id")
+ return ConversationParam_2(id=cid) if cid else None
+ return None
+
+
+# ---------------------------------------------------------------------------
+# Response helpers
+# ---------------------------------------------------------------------------
+
+
+def get_instruction_items(response: ResponseObject) -> list[Item]:
+ """Expand ``Response.instructions`` into a list of :class:`Item`.
+
+ - If instructions is ``None``, returns ``[]``.
+ - If instructions is a string, wraps it as a single :class:`ItemMessage`
+ with ``role=developer`` and :class:`MessageContentInputTextContent`.
+ - If instructions is already a list, returns a shallow copy.
+
+ :param response: The response object.
+ :type response: ResponseObject
+ :returns: A list of instruction items.
+ :rtype: list[Item]
+ """
+ instr = response.instructions
+ if instr is None:
+ return []
+ if isinstance(instr, str):
+ return [
+ ItemMessage(
+ id="",
+ status="completed",
+ role=MessageRole.DEVELOPER,
+ content=[MessageContentInputTextContent(text=instr)],
+ ).as_dict()
+ ]
+ return list(instr)
+
+
+# ---------------------------------------------------------------------------
+# OutputItem helpers
+# ---------------------------------------------------------------------------
+
+
+def get_output_item_id(item: OutputItem) -> str:
+ """Extract the ``id`` field from any :class:`OutputItem` subtype.
+
+ The base :class:`OutputItem` class does not define ``id``, but all
+ concrete subtypes do. Falls back to dict-style access for unknown
+ subtypes.
+
+ :param item: The output item to extract the ID from.
+ :type item: OutputItem
+ :returns: The item's ID.
+ :rtype: str
+ :raises ValueError: If the item has no valid ``id``.
+ """
+ item_id = _get_field(item, "id")
+ if item_id is not None:
+ return str(item_id)
+
+ # Fallback: Model subclass supports Mapping protocol
+ try:
+ raw_id = item["id"] # type: ignore[index]
+ if raw_id is not None:
+ return str(raw_id)
+ except (KeyError, TypeError):
+ pass
+
+ raise ValueError(
+ f"OutputItem of type '{type(item).__name__}' does not have a valid id. "
+ "Ensure the id property is set before accessing it."
+ )
+
+
+# ---------------------------------------------------------------------------
+# ItemMessage helpers
+# ---------------------------------------------------------------------------
+
+
+def get_content_expanded(message: ItemMessage) -> list[MessageContent]:
+ """Return the typed content list from an :class:`ItemMessage`.
+
+ In Python the generated ``ItemMessage.content`` is already
+ ``list[MessageContent]``, so this is a convenience passthrough that
+ returns an empty list when content is ``None``.
+
+ :param message: The item message.
+ :type message: ItemMessage
+ :returns: The message content parts.
+ :rtype: list[MessageContent]
+ """
+ content = _get_field(message, "content")
+ return list(content) if content else []
+
+
+# ---------------------------------------------------------------------------
+# Item → OutputItem conversion
+# ---------------------------------------------------------------------------
+
+# Item types whose OutputItem counterpart has a ``status`` field AND should
+# be set to ``"completed"`` during conversion. This is an **opt-in** list:
+# any item type NOT listed here will NOT receive a status value. This
+# prevents newly-added item types from accidentally gaining a status field.
+_COMPLETED_STATUS_ITEM_TYPES = frozenset(
+ {
+ "message", # OutputItemMessage
+ "function_call", # OutputItemFunctionToolCall
+ "function_call_output", # FunctionToolCallOutputResource
+ "computer_call", # OutputItemComputerToolCall
+ "computer_call_output", # OutputItemComputerToolCallOutputResource
+ "file_search_call", # OutputItemFileSearchToolCall
+ "web_search_call", # OutputItemWebSearchToolCall
+ "image_generation_call", # OutputItemImageGenToolCall
+ "code_interpreter_call", # OutputItemCodeInterpreterToolCall
+ "local_shell_call", # OutputItemLocalShellToolCall
+ "local_shell_call_output", # OutputItemLocalShellToolCallOutput
+ "shell_call", # OutputItemFunctionShellCall
+ "shell_call_output", # OutputItemFunctionShellCallOutput
+ "mcp_call", # OutputItemMcpToolCall
+ "reasoning", # OutputItemReasoningItem
+ }
+)
+
+# Item types whose original status should be preserved as-is rather than
+# overwritten. The enum string values are identical between Param and
+# Output models (e.g. ``"in_progress"``, ``"completed"``), so the dict
+# representation transfers directly.
+_PRESERVE_STATUS_ITEM_TYPES = frozenset(
+ {
+ "output_message", # ItemOutputMessage – status semantics preserved
+ "apply_patch_call", # ApplyPatchToolCallItemParam → ApplyPatchCallStatus
+ "apply_patch_call_output", # ApplyPatchToolCallOutputItemParam → ApplyPatchCallOutputStatus
+ }
+)
+
+
+def to_output_item(item: Item, response_id: str | None = None) -> OutputItem | None:
+ """Convert an :class:`Item` to the corresponding :class:`OutputItem`.
+
+ Generates a type-specific ID via :meth:`IdGenerator.new_item_id` and
+ applies status according to per-type rules:
+
+ * **Completed** — explicitly listed types get ``status = "completed"``.
+ * **Preserve status** — types whose original status must be kept
+ (``ItemOutputMessage``, ``ApplyPatch*``).
+ * **No status** — all other types (including any future types) receive
+ no status value. This opt-in design prevents newly-added item types
+ from accidentally gaining a status field.
+
+ Returns ``None`` for :class:`ItemReferenceParam` or unrecognised types.
+
+ The conversion leverages ``_deserialize(OutputItem, data)`` which
+ resolves the correct subtype via the ``type`` discriminator. All 24
+ input/output discriminator pairs share the same string values, so the
+ dict representation produced by ``dict(item)`` is directly compatible
+ with ``OutputItem`` deserialization.
+
+ :param item: The input item to convert.
+ :type item: Item
+ :param response_id: An existing ID (typically the response ID) used as
+ a partition-key hint for the generated item ID.
+ :type response_id: str | None
+ :returns: The converted output item, or ``None`` if non-convertible.
+ :rtype: OutputItem | None
+ """
+ # Avoid circular import — IdGenerator lives one level up.
+ from .._id_generator import IdGenerator
+
+ item_id = IdGenerator.new_item_id(item, response_id)
+ if item_id is None:
+ return None # ItemReferenceParam or unrecognised
+
+ data = dict(item)
+ data["id"] = item_id
+
+ item_type = data.get("type", "")
+
+ # ── Status handling (opt-in) ─────────────────────────────────────
+ if item_type in _COMPLETED_STATUS_ITEM_TYPES:
+ data["status"] = "completed"
+ elif item_type in _PRESERVE_STATUS_ITEM_TYPES:
+ pass # keep the original status from the input item
+
+ return _deserialize(OutputItem, data)
+
+
+def to_item(output_item: OutputItem) -> Item | None:
+ """Convert an :class:`OutputItem` back to the corresponding :class:`Item`.
+
+ Both hierarchies share the same ``type`` discriminator values, so
+ serialising an :class:`OutputItem` to a dict and deserialising as
+ :class:`Item` produces the correct concrete subtype (e.g.
+ :class:`OutputItemMessage` → :class:`ItemMessage`).
+
+ Returns ``None`` if the output item type has no :class:`Item` counterpart.
+
+ :param output_item: The output item to convert.
+ :type output_item: OutputItem
+ :returns: The corresponding input item, or ``None``.
+ :rtype: Item | None
+ """
+ try:
+ data = dict(output_item)
+ return _deserialize(Item, data)
+ except Exception: # pylint: disable=broad-except
+ return None
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/errors.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/errors.py
new file mode 100644
index 000000000000..9a92ec5bef38
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/errors.py
@@ -0,0 +1,64 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Error model types for request validation failures."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from azure.ai.agentserver.responses.models._generated import ApiErrorResponse, Error
+
+
+class RequestValidationError(ValueError):
+ """Represents a client-visible request validation failure."""
+
+ def __init__(
+ self,
+ message: str,
+ *,
+ code: str = "invalid_request",
+ param: str | None = None,
+ error_type: str = "invalid_request_error",
+ debug_info: dict[str, Any] | None = None,
+ details: list[dict[str, str]] | None = None,
+ ) -> None:
+ super().__init__(message)
+ self.message = message
+ self.code = code
+ self.param = param
+ self.error_type = error_type
+ self.debug_info = debug_info
+ self.details = details
+
+ def to_error(self) -> Error:
+ """Convert this validation error to the generated ``Error`` model.
+
+ :returns: An ``Error`` instance populated from this validation error's fields.
+ :rtype: Error
+ """
+ detail_errors: list[Error] | None = None
+ if self.details:
+ detail_errors = [
+ Error(
+ code=d.get("code", "invalid_value"),
+ message=d.get("message", ""),
+ param=d.get("param"),
+ type="invalid_request_error",
+ )
+ for d in self.details
+ ]
+ return Error(
+ code=self.code,
+ message=self.message,
+ param=self.param,
+ type=self.error_type,
+ details=detail_errors,
+ )
+
+ def to_api_error_response(self) -> ApiErrorResponse:
+ """Convert this validation error to the generated API error envelope.
+
+ :returns: An ``ApiErrorResponse`` wrapping the generated ``Error``.
+ :rtype: ApiErrorResponse
+ """
+ return ApiErrorResponse(error=self.to_error())
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/runtime.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/runtime.py
new file mode 100644
index 000000000000..3337f48bef5c
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/models/runtime.py
@@ -0,0 +1,366 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Runtime domain models for response sessions and stream events."""
+
+from __future__ import annotations
+
+import asyncio # pylint: disable=do-not-import-asyncio
+from copy import deepcopy
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from typing import TYPE_CHECKING, Any, Literal, Mapping
+
+from ._generated import AgentReference, OutputItem, ResponseObject, ResponseStreamEvent, ResponseStreamEventType
+
+if TYPE_CHECKING:
+ from .._response_context import ResponseContext
+ from ..hosting._event_subject import _ResponseEventSubject
+
+EVENT_TYPE = ResponseStreamEventType
+
+ResponseStatus = Literal["queued", "in_progress", "completed", "failed", "cancelled", "incomplete"]
+TerminalResponseStatus = Literal["completed", "failed", "cancelled", "incomplete"]
+
+
+@dataclass(frozen=True, slots=True)
+class ResponseModeFlags:
+ """Execution mode flags captured from the create request."""
+
+ stream: bool
+ store: bool
+ background: bool
+
+
+@dataclass(slots=True)
+class StreamEventRecord:
+ """A persisted record for one emitted stream event."""
+
+ sequence_number: int
+ event_type: str
+ payload: Mapping[str, Any]
+ emitted_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+
+ @property
+ def terminal(self) -> bool:
+ """Return True when this event is one of the terminal response events."""
+ return self.event_type in {
+ EVENT_TYPE.RESPONSE_COMPLETED.value,
+ EVENT_TYPE.RESPONSE_FAILED.value,
+ EVENT_TYPE.RESPONSE_INCOMPLETE.value,
+ }
+
+ @classmethod
+ def from_generated(cls, event: ResponseStreamEvent, payload: Mapping[str, Any]) -> "StreamEventRecord":
+ """Create a stream event record from a generated response stream event model."""
+ return cls(sequence_number=event.sequence_number, event_type=event.type, payload=payload)
+
+
+class ResponseExecution: # pylint: disable=too-many-instance-attributes
+ """Lightweight pipeline state for one response execution.
+
+ This type intentionally does not own persisted stream history. Stream replay
+ concerns are modeled separately in :class:`StreamReplayState`.
+ """
+
+ def __init__(
+ self,
+ *,
+ response_id: str,
+ mode_flags: ResponseModeFlags,
+ created_at: datetime | None = None,
+ updated_at: datetime | None = None,
+ completed_at: datetime | None = None,
+ status: ResponseStatus = "in_progress",
+ response: ResponseObject | None = None,
+ execution_task: asyncio.Task[Any] | None = None,
+ cancel_requested: bool = False,
+ client_disconnected: bool = False,
+ response_created_seen: bool = False,
+ subject: _ResponseEventSubject | None = None,
+ cancel_signal: asyncio.Event | None = None,
+ input_items: list[OutputItem] | None = None,
+ previous_response_id: str | None = None,
+ response_context: ResponseContext | None = None,
+ initial_model: str | None = None,
+ initial_agent_reference: AgentReference | dict[str, Any] | None = None,
+ ) -> None:
+ self.response_id = response_id
+ self.mode_flags = mode_flags
+ self.created_at = created_at if created_at is not None else datetime.now(timezone.utc)
+ self.updated_at = updated_at if updated_at is not None else datetime.now(timezone.utc)
+ self.completed_at = completed_at
+ self.status = status
+ self.response = response
+ self.execution_task = execution_task
+ self.cancel_requested = cancel_requested
+ self.client_disconnected = client_disconnected
+ self.response_created_seen = response_created_seen
+ self.subject = subject
+ self.cancel_signal = cancel_signal if cancel_signal is not None else asyncio.Event()
+ self.input_items: list[OutputItem] = input_items if input_items is not None else []
+ self.previous_response_id = previous_response_id
+ self.response_context = response_context
+ self.initial_model = initial_model
+ self.initial_agent_reference = initial_agent_reference or {}
+ self.response_created_signal: asyncio.Event = asyncio.Event()
+ self.response_failed_before_events: bool = False
+
+ def transition_to(self, next_status: ResponseStatus) -> None:
+ """Transition this execution to a valid lifecycle status.
+
+ Updates ``status``, ``updated_at``, and ``completed_at`` (for terminal states).
+ Re-entering the current status is a no-op that only refreshes ``updated_at``.
+
+ :param next_status: The target lifecycle status.
+ :type next_status: ResponseStatus
+ :raises ValueError: If the requested transition is not allowed.
+ """
+ allowed: dict[ResponseStatus, set[ResponseStatus]] = {
+ "queued": {"in_progress", "failed"},
+ "in_progress": {"completed", "failed", "cancelled", "incomplete"},
+ "completed": set(),
+ "failed": set(),
+ "cancelled": set(),
+ "incomplete": set(),
+ }
+
+ if next_status == self.status:
+ self.updated_at = datetime.now(timezone.utc)
+ return
+
+ if next_status not in allowed[self.status]:
+ raise ValueError(f"invalid status transition: {self.status} -> {next_status}")
+
+ self.status = next_status
+ now = datetime.now(timezone.utc)
+ self.updated_at = now
+ if self.is_terminal:
+ self.completed_at = now
+
+ @property
+ def is_terminal(self) -> bool:
+ """Return whether the execution has reached a terminal state.
+
+ :returns: True if the status is one of completed, failed, cancelled, or incomplete.
+ :rtype: bool
+ """
+ return self.status in {"completed", "failed", "cancelled", "incomplete"}
+
+ def set_response_snapshot(self, response: ResponseObject) -> None:
+ """Replace the current response snapshot from handler-emitted events.
+
+ :param response: The latest response snapshot to store.
+ :type response: ResponseObject
+ """
+ self.response = response
+ self.updated_at = datetime.now(timezone.utc)
+
+ @property
+ def replay_enabled(self) -> bool:
+ """SSE replay is only available for background+stream+store responses.
+
+ :returns: True if this execution supports SSE replay.
+ :rtype: bool
+ """
+ return self.mode_flags.stream and self.mode_flags.store and self.mode_flags.background
+
+ @property
+ def visible_via_get(self) -> bool:
+ """Non-streaming stored responses are retrievable via GET after completion.
+
+ For background non-stream responses, visibility is deferred until
+ ``response.created`` is processed (FR-001: response not accessible
+ before the handler emits ``response.created``).
+
+ :returns: True if this execution can be retrieved via GET.
+ :rtype: bool
+ """
+ if not self.mode_flags.store:
+ return False
+ # FR-001: bg non-stream responses are not visible until response.created.
+ if self.mode_flags.background and not self.mode_flags.stream:
+ return self.response_created_signal.is_set()
+ return True
+
+ def apply_event(self, normalized: ResponseStreamEvent, all_events: list[ResponseStreamEvent]) -> None:
+ """Apply a normalised stream event — updates self.response and self.status.
+
+ Does nothing if the execution is already ``"cancelled"``.
+
+ :param normalized: The normalised event (``ResponseStreamEvent`` model instance).
+ :type normalized: ResponseStreamEvent
+ :param all_events: The full ordered list of handler events seen so far
+ (used to extract the latest response snapshot).
+ :type all_events: list[ResponseStreamEvent]
+ """
+ # Lazy imports to avoid circular dependency (models.runtime ← streaming._helpers ← models.__init__)
+ from ..streaming._helpers import (
+ _extract_response_snapshot_from_events, # pylint: disable=import-outside-toplevel
+ )
+ from ..streaming._internals import _RESPONSE_SNAPSHOT_EVENT_TYPES # pylint: disable=import-outside-toplevel
+
+ if self.status == "cancelled":
+ return
+ event_type = normalized.get("type")
+ if event_type in _RESPONSE_SNAPSHOT_EVENT_TYPES:
+ agent_reference = (
+ self.response.get("agent_reference") if self.response is not None else {} # type: ignore[union-attr]
+ ) or {}
+ model = self.response.get("model") if self.response is not None else None # type: ignore[union-attr]
+ snapshot = _extract_response_snapshot_from_events(
+ all_events,
+ response_id=self.response_id,
+ agent_reference=agent_reference,
+ model=model,
+ )
+ self.set_response_snapshot(ResponseObject(snapshot))
+ resolved = snapshot.get("status")
+ if isinstance(resolved, str):
+ self.status = resolved
+ elif event_type == EVENT_TYPE.RESPONSE_OUTPUT_ITEM_ADDED.value:
+ item = normalized.get("item")
+ if item is not None and self.response is not None:
+ item_dict = item.as_dict() if hasattr(item, "as_dict") else item
+ if isinstance(item_dict, dict):
+ output = self.response.setdefault("output", [])
+ if isinstance(output, list):
+ output.append(deepcopy(item_dict))
+ elif event_type == EVENT_TYPE.RESPONSE_OUTPUT_ITEM_DONE.value:
+ item = normalized.get("item")
+ output_index = normalized.get("output_index")
+ if item is not None and isinstance(output_index, int) and self.response is not None:
+ item_dict = item.as_dict() if hasattr(item, "as_dict") else item
+ if isinstance(item_dict, dict):
+ output = self.response.get("output", [])
+ if isinstance(output, list) and 0 <= output_index < len(output):
+ output[output_index] = deepcopy(item_dict)
+
+ @property
+ def agent_reference(self) -> AgentReference | dict[str, Any]:
+ """Extract agent_reference from the stored response snapshot.
+
+ :returns: The agent reference model or dict, or empty dict if no response snapshot is set.
+ :rtype: AgentReference | dict[str, Any]
+ """
+ if self.response is not None:
+ return self.response.get("agent_reference") or {} # type: ignore[return-value]
+ return {}
+
+ @property
+ def model(self) -> str | None:
+ """Extract model name from the stored response snapshot.
+
+ :returns: The model name, or ``None`` if no response snapshot is set.
+ :rtype: str | None
+ """
+ if self.response is not None:
+ return self.response.get("model") # type: ignore[return-value]
+ return None
+
+
+class StreamReplayState:
+ """Persisted stream replay state for one response identifier."""
+
+ def __init__(
+ self,
+ *,
+ response_id: str,
+ events: list[StreamEventRecord] | None = None,
+ ) -> None:
+ self.response_id = response_id
+ self.events = events if events is not None else []
+
+ def append(self, event: StreamEventRecord) -> None:
+ """Append a stream event and enforce replay sequence integrity.
+
+ :param event: The stream event record to append.
+ :type event: StreamEventRecord
+ :raises ValueError: If the sequence number is not strictly increasing or
+ a terminal event has already been recorded.
+ """
+ if self.events and event.sequence_number <= self.events[-1].sequence_number:
+ raise ValueError("stream event sequence numbers must be strictly increasing")
+
+ if self.events and self.events[-1].terminal:
+ raise ValueError("cannot append events after a terminal event")
+
+ self.events.append(event)
+
+ @property
+ def terminal_event_seen(self) -> bool:
+ """Return whether replay state has already recorded a terminal event.
+
+ :returns: True if the last recorded event is terminal, False otherwise.
+ :rtype: bool
+ """
+ return bool(self.events and self.events[-1].terminal)
+
+
+def build_cancelled_response(
+ response_id: str,
+ agent_reference: AgentReference | dict[str, Any],
+ model: str | None,
+ created_at: datetime | None = None,
+) -> ResponseObject:
+ """Build a Response object representing a cancelled terminal state.
+
+ :param response_id: The response identifier.
+ :type response_id: str
+ :param agent_reference: The agent reference model or metadata dict.
+ :type agent_reference: AgentReference | dict[str, Any]
+ :param model: Optional model identifier.
+ :type model: str | None
+ :param created_at: Optional creation timestamp; defaults to now if omitted.
+ :type created_at: datetime | None
+ :returns: A Response object with status ``"cancelled"`` and empty output.
+ :rtype: ResponseObject
+ """
+ payload: dict[str, Any] = {
+ "id": response_id,
+ "response_id": response_id,
+ "agent_reference": deepcopy(agent_reference),
+ "object": "response",
+ "status": "cancelled",
+ "model": model,
+ "output": [],
+ }
+ if created_at is not None:
+ payload["created_at"] = created_at.isoformat()
+ return ResponseObject(payload)
+
+
+def build_failed_response(
+ response_id: str,
+ agent_reference: AgentReference | dict[str, Any],
+ model: str | None,
+ created_at: datetime | None = None,
+ error_message: str = "An internal server error occurred.",
+) -> ResponseObject:
+ """Build a ResponseObject representing a failed terminal state.
+
+ :param response_id: The response identifier.
+ :type response_id: str
+ :param agent_reference: The agent reference model or metadata dict.
+ :type agent_reference: AgentReference | dict[str, Any]
+ :param model: Optional model identifier.
+ :type model: str | None
+ :param created_at: Optional creation timestamp; defaults to now if omitted.
+ :type created_at: datetime | None
+ :param error_message: Human-readable error message.
+ :type error_message: str
+ :returns: A Response object with status ``"failed"`` and empty output.
+ :rtype: ResponseObject
+ """
+ payload: dict[str, Any] = {
+ "id": response_id,
+ "response_id": response_id,
+ "agent_reference": deepcopy(agent_reference),
+ "object": "response",
+ "status": "failed",
+ "model": model,
+ "output": [],
+ "error": {"code": "server_error", "message": error_message},
+ }
+ if created_at is not None:
+ payload["created_at"] = created_at.isoformat()
+ return ResponseObject(payload)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/py.typed b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/py.typed
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/__init__.py
new file mode 100644
index 000000000000..9a0454564dbb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_base.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_base.py
new file mode 100644
index 000000000000..83adfe6bed52
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_base.py
@@ -0,0 +1,212 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Persistence abstraction for response execution and replay state."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Iterable, Protocol, runtime_checkable
+
+from ..models._generated import OutputItem, ResponseObject, ResponseStreamEvent
+
+if TYPE_CHECKING:
+ from .._response_context import IsolationContext
+
+
+@runtime_checkable
+class ResponseProviderProtocol(Protocol):
+ """Protocol for response storage providers.
+
+ Implementations provide response envelope storage plus input/history item lookup.
+
+ Every operation accepts an optional ``isolation`` parameter (S-018).
+ Implementations MUST use it to partition data in multi-tenant
+ deployments. When ``None``, the provider operates without tenant
+ scoping (suitable for local development).
+ """
+
+ async def create_response(
+ self,
+ response: ResponseObject,
+ input_items: Iterable[OutputItem] | None,
+ history_item_ids: Iterable[str] | None,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ """Persist a new response envelope and optional input/history references.
+
+ :param response: The response envelope to persist.
+ :type response: ~azure.ai.agentserver.responses.models._generated.ResponseObject
+ :param input_items: Optional resolved output items to associate with the response.
+ :type input_items: Iterable[OutputItem] | None
+ :param history_item_ids: Optional history item IDs to link to the response.
+ :type history_item_ids: Iterable[str] | None
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :rtype: None
+ """
+
+ async def get_response(self, response_id: str, *, isolation: IsolationContext | None = None) -> ResponseObject:
+ """Load one response envelope by ID.
+
+ :param response_id: The unique identifier of the response to retrieve.
+ :type response_id: str
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: The response envelope matching the given ID.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseObject
+ :raises KeyError: If the response does not exist.
+ """
+ ...
+
+ async def update_response(self, response: ResponseObject, *, isolation: IsolationContext | None = None) -> None:
+ """Persist an updated response envelope.
+
+ :param response: The response envelope with updated fields to persist.
+ :type response: ~azure.ai.agentserver.responses.models._generated.ResponseObject
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :rtype: None
+ """
+
+ async def delete_response(self, response_id: str, *, isolation: IsolationContext | None = None) -> None:
+ """Delete a response envelope by ID.
+
+ :param response_id: The unique identifier of the response to delete.
+ :type response_id: str
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :rtype: None
+ :raises KeyError: If the response does not exist.
+ """
+
+ async def get_input_items(
+ self,
+ response_id: str,
+ limit: int = 20,
+ ascending: bool = False,
+ after: str | None = None,
+ before: str | None = None,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[OutputItem]:
+ """Get response input/history items for one response ID using cursor pagination.
+
+ :param response_id: The unique identifier of the response whose items to fetch.
+ :type response_id: str
+ :param limit: Maximum number of items to return. Defaults to 20.
+ :type limit: int
+ :param ascending: Whether to return items in ascending order. Defaults to False.
+ :type ascending: bool
+ :param after: Cursor ID; only return items after this ID.
+ :type after: str | None
+ :param before: Cursor ID; only return items before this ID.
+ :type before: str | None
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: A list of output items matching the pagination criteria.
+ :rtype: list[OutputItem]
+ """
+ ...
+
+ async def get_items(
+ self, item_ids: Iterable[str], *, isolation: IsolationContext | None = None
+ ) -> list[OutputItem | None]:
+ """Get items by ID (missing IDs produce ``None`` entries).
+
+ :param item_ids: The item identifiers to look up.
+ :type item_ids: Iterable[str]
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: A list of output items in the same order as *item_ids*; missing items are ``None``.
+ :rtype: list[OutputItem | None]
+ """
+ ...
+
+ async def get_history_item_ids(
+ self,
+ previous_response_id: str | None,
+ conversation_id: str | None,
+ limit: int,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[str]:
+ """Get history item IDs for a conversation chain scope.
+
+ :param previous_response_id: Optional response ID to chain history from.
+ :type previous_response_id: str | None
+ :param conversation_id: Optional conversation ID to scope history lookup.
+ :type conversation_id: str | None
+ :param limit: Maximum number of history item IDs to return.
+ :type limit: int
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: A list of history item IDs within the given scope.
+ :rtype: list[str]
+ """
+ ...
+
+
+@runtime_checkable
+class ResponseStreamProviderProtocol(Protocol):
+ """Protocol for providers that can persist and replay SSE stream events.
+
+ Implement this protocol alongside :class:`ResponseProviderProtocol` to enable
+ SSE replay for responses that are no longer resident in the in-process runtime
+ state (for example, after a process restart).
+ """
+
+ async def save_stream_events(
+ self,
+ response_id: str,
+ events: list[ResponseStreamEvent],
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ """Persist the complete ordered list of SSE events for a response.
+
+ Called once when the background+stream response reaches terminal state.
+ The *events* list contains ``ResponseStreamEvent`` model instances.
+
+ :param response_id: The unique identifier of the response.
+ :type response_id: str
+ :param events: Ordered list of event instances to persist.
+ :type events: list[ResponseStreamEvent]
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :rtype: None
+ """
+
+ async def get_stream_events(
+ self,
+ response_id: str,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[ResponseStreamEvent] | None:
+ """Retrieve the persisted SSE events for a response.
+
+ :param response_id: The unique identifier of the response whose events to retrieve.
+ :type response_id: str
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: The ordered list of event instances, or ``None`` if not found.
+ :rtype: list[ResponseStreamEvent] | None
+ """
+
+ async def delete_stream_events(
+ self,
+ response_id: str,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ """Delete persisted SSE events for a response.
+
+ Called when a response is deleted via ``DELETE /responses/{id}``.
+ Implementations should remove any stored event data for the given
+ response. No-op if no events exist for the ID.
+
+ :param response_id: The unique identifier of the response whose events to remove.
+ :type response_id: str
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :rtype: None
+ """
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_errors.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_errors.py
new file mode 100644
index 000000000000..67a942d67b41
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_errors.py
@@ -0,0 +1,81 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Exception hierarchy for Foundry storage API errors."""
+
+from __future__ import annotations
+
+import json
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from azure.core.rest import HttpResponse
+
+
+class FoundryStorageError(Exception):
+ """Base class for errors returned by the Foundry storage API."""
+
+ def __init__(self, message: str) -> None:
+ super().__init__(message)
+ self.message = message
+
+
+class FoundryResourceNotFoundError(FoundryStorageError):
+ """Raised when the requested resource does not exist (HTTP 404)."""
+
+
+class FoundryBadRequestError(FoundryStorageError):
+ """Raised for invalid-request or conflict errors (HTTP 400, 409)."""
+
+
+class FoundryApiError(FoundryStorageError):
+ """Raised for all other non-success HTTP responses."""
+
+ def __init__(self, message: str, status_code: int) -> None:
+ super().__init__(message)
+ self.status_code = status_code
+
+
+def raise_for_storage_error(response: "HttpResponse") -> None:
+ """Raise an appropriate :class:`FoundryStorageError` subclass if *response* is not successful.
+
+ :param response: The HTTP response to inspect.
+ :type response: ~azure.core.rest.HttpResponse
+ :raises FoundryResourceNotFoundError: For HTTP 404.
+ :raises FoundryBadRequestError: For HTTP 400 or 409.
+ :raises FoundryApiError: For all other non-2xx responses.
+ """
+ status = response.status_code
+ if 200 <= status < 300:
+ return
+
+ message = _extract_error_message(response, status)
+
+ if status == 404:
+ raise FoundryResourceNotFoundError(message)
+ if status in (400, 409):
+ raise FoundryBadRequestError(message)
+ raise FoundryApiError(message, status)
+
+
+def _extract_error_message(response: "HttpResponse", status: int) -> str:
+ """Extract an error message from *response*, falling back to a generic string.
+
+ :param response: The HTTP response whose body is inspected.
+ :type response: ~azure.core.rest.HttpResponse
+ :param status: The HTTP status code of the response.
+ :type status: int
+ :returns: A human-readable error message string.
+ :rtype: str
+ """
+ try:
+ body = response.text()
+ if body:
+ data = json.loads(body)
+ error = data.get("error")
+ if isinstance(error, dict):
+ msg = error.get("message")
+ if msg:
+ return str(msg)
+ except Exception: # pylint: disable=broad-except
+ pass
+ return f"Foundry storage request failed with HTTP {status}."
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_provider.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_provider.py
new file mode 100644
index 000000000000..e9452d0aed44
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_provider.py
@@ -0,0 +1,300 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""HTTP-backed Foundry storage provider for Azure AI Responses."""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Iterable
+from urllib.parse import quote as _url_quote
+
+from azure.core import AsyncPipelineClient
+from azure.core.credentials_async import AsyncTokenCredential
+from azure.core.pipeline import policies
+from azure.core.rest import HttpRequest
+
+from ..models._generated import OutputItem, ResponseObject # type: ignore[attr-defined]
+
+if TYPE_CHECKING:
+ from .._response_context import IsolationContext
+from ._foundry_errors import raise_for_storage_error
+from ._foundry_serializer import (
+ deserialize_history_ids,
+ deserialize_items_array,
+ deserialize_paged_items,
+ deserialize_response,
+ serialize_batch_request,
+ serialize_create_request,
+ serialize_response,
+)
+from ._foundry_settings import FoundryStorageSettings
+
+_FOUNDRY_TOKEN_SCOPE = "https://ai.azure.com/.default"
+_JSON_CONTENT_TYPE = "application/json; charset=utf-8"
+_USER_ISOLATION_HEADER = "x-agent-user-isolation-key"
+_CHAT_ISOLATION_HEADER = "x-agent-chat-isolation-key"
+
+
+def _encode(value: str) -> str:
+ return _url_quote(value, safe="")
+
+
+def _apply_isolation_headers(request: HttpRequest, isolation: IsolationContext | None) -> None:
+ """Add isolation key headers to an outbound HTTP request when present."""
+ if isolation is None:
+ return
+ if isolation.user_key is not None:
+ request.headers[_USER_ISOLATION_HEADER] = isolation.user_key
+ if isolation.chat_key is not None:
+ request.headers[_CHAT_ISOLATION_HEADER] = isolation.chat_key
+
+
+class FoundryStorageProvider:
+ """An HTTP-backed response storage provider that persists data via the Foundry storage API.
+
+ This class satisfies the
+ :class:`~azure.ai.agentserver.responses.store._base.ResponseProviderProtocol` structural
+ protocol. Obtain an instance through the constructor and supply it when building a
+ ``ResponsesServer``.
+
+ Uses :class:`~azure.core.AsyncPipelineClient` for HTTP transport, providing
+ built-in retry, logging, distributed tracing, and bearer-token authentication.
+
+ :param credential: An async credential used to obtain bearer tokens for the Foundry API.
+ :type credential: AsyncTokenCredential
+ :param settings: Storage settings. If omitted,
+ :meth:`~FoundryStorageSettings.from_env` is called automatically.
+ :type settings: FoundryStorageSettings | None
+
+ Example::
+
+ async with FoundryStorageProvider(credential=DefaultAzureCredential()) as provider:
+ app = ResponsesServer(handler=my_handler, provider=provider)
+ """
+
+ def __init__(
+ self,
+ credential: AsyncTokenCredential,
+ settings: FoundryStorageSettings | None = None,
+ ) -> None:
+ self._settings = settings or FoundryStorageSettings.from_env()
+ self._client: AsyncPipelineClient = AsyncPipelineClient(
+ base_url=self._settings.storage_base_url,
+ policies=[
+ policies.RequestIdPolicy(),
+ policies.HeadersPolicy(),
+ policies.UserAgentPolicy(
+ sdk_moniker="ai-agentserver-responses",
+ ),
+ policies.RetryPolicy(),
+ policies.AsyncBearerTokenCredentialPolicy(
+ credential,
+ _FOUNDRY_TOKEN_SCOPE,
+ ),
+ policies.ContentDecodePolicy(),
+ policies.DistributedTracingPolicy(),
+ policies.HttpLoggingPolicy(),
+ ],
+ )
+
+ # ------------------------------------------------------------------
+ # Async context-manager support
+ # ------------------------------------------------------------------
+
+ async def aclose(self) -> None:
+ """Close the underlying HTTP pipeline client."""
+ await self._client.close()
+
+ async def __aenter__(self) -> "FoundryStorageProvider":
+ return self
+
+ async def __aexit__(self, *args: Any) -> None:
+ await self.aclose()
+
+ # ------------------------------------------------------------------
+ # ResponseProviderProtocol implementation
+ # ------------------------------------------------------------------
+
+ async def create_response(
+ self,
+ response: ResponseObject,
+ input_items: Iterable[OutputItem] | None,
+ history_item_ids: Iterable[str] | None,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ """Persist a new response with its associated input items and history.
+
+ :param response: The initial response snapshot.
+ :type response: ResponseObject
+ :param input_items: Resolved output items for this response turn.
+ :type input_items: Iterable[OutputItem] | None
+ :param history_item_ids: Item IDs from the prior conversation turn, if any.
+ :type history_item_ids: Iterable[str] | None
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :raises FoundryApiError: On non-success HTTP response.
+ """
+ body = serialize_create_request(response, input_items, history_item_ids)
+ url = self._settings.build_url("responses")
+ request = HttpRequest("POST", url, content=body, headers={"Content-Type": _JSON_CONTENT_TYPE})
+ _apply_isolation_headers(request, isolation)
+ http_resp = await self._client.send_request(request)
+ raise_for_storage_error(http_resp)
+
+ async def get_response(self, response_id: str, *, isolation: IsolationContext | None = None) -> ResponseObject:
+ """Retrieve a stored response by its ID.
+
+ :param response_id: The response identifier.
+ :type response_id: str
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: The deserialized :class:`ResponseObject` model.
+ :rtype: ResponseObject
+ :raises FoundryResourceNotFoundError: If the response does not exist.
+ :raises FoundryApiError: On other non-success HTTP response.
+ """
+ url = self._settings.build_url(f"responses/{_encode(response_id)}")
+ request = HttpRequest("GET", url)
+ _apply_isolation_headers(request, isolation)
+ http_resp = await self._client.send_request(request)
+ raise_for_storage_error(http_resp)
+ return deserialize_response(http_resp.text())
+
+ async def update_response(self, response: ResponseObject, *, isolation: IsolationContext | None = None) -> None:
+ """Persist an updated response snapshot.
+
+ :param response: The updated response model. Must contain a valid ``id`` field.
+ :type response: ResponseObject
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :raises FoundryResourceNotFoundError: If the response does not exist.
+ :raises FoundryApiError: On other non-success HTTP response.
+ """
+ response_id = str(response["id"]) # type: ignore[index]
+ body = serialize_response(response)
+ url = self._settings.build_url(f"responses/{_encode(response_id)}")
+ request = HttpRequest("POST", url, content=body, headers={"Content-Type": _JSON_CONTENT_TYPE})
+ _apply_isolation_headers(request, isolation)
+ http_resp = await self._client.send_request(request)
+ raise_for_storage_error(http_resp)
+
+ async def delete_response(self, response_id: str, *, isolation: IsolationContext | None = None) -> None:
+ """Delete a stored response and its associated data.
+
+ :param response_id: The response identifier.
+ :type response_id: str
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :raises FoundryResourceNotFoundError: If the response does not exist.
+ :raises FoundryApiError: On other non-success HTTP response.
+ """
+ url = self._settings.build_url(f"responses/{_encode(response_id)}")
+ request = HttpRequest("DELETE", url)
+ _apply_isolation_headers(request, isolation)
+ http_resp = await self._client.send_request(request)
+ raise_for_storage_error(http_resp)
+
+ async def get_input_items(
+ self,
+ response_id: str,
+ limit: int = 20,
+ ascending: bool = False,
+ after: str | None = None,
+ before: str | None = None,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[OutputItem]:
+ """Retrieve a page of input items for the given response.
+
+ :param response_id: The response whose input items are being listed.
+ :type response_id: str
+ :param limit: Maximum number of items to return. Defaults to 20.
+ :type limit: int
+ :param ascending: ``True`` for oldest-first ordering; ``False`` (default) for newest-first.
+ :type ascending: bool
+ :param after: Start the page after this item ID (cursor-based pagination).
+ :type after: str | None
+ :param before: End the page before this item ID (cursor-based pagination).
+ :type before: str | None
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: A list of deserialized :class:`OutputItem` instances.
+ :rtype: list[OutputItem]
+ :raises FoundryResourceNotFoundError: If the response does not exist.
+ :raises FoundryApiError: On other non-success HTTP response.
+ """
+ extra: dict[str, str] = {
+ "limit": str(limit),
+ "order": "asc" if ascending else "desc",
+ }
+ if after is not None:
+ extra["after"] = after
+ if before is not None:
+ extra["before"] = before
+
+ url = self._settings.build_url(f"responses/{_encode(response_id)}/input_items", **extra)
+ request = HttpRequest("GET", url)
+ _apply_isolation_headers(request, isolation)
+ http_resp = await self._client.send_request(request)
+ raise_for_storage_error(http_resp)
+ return deserialize_paged_items(http_resp.text())
+
+ async def get_items(
+ self, item_ids: Iterable[str], *, isolation: IsolationContext | None = None
+ ) -> list[OutputItem | None]:
+ """Retrieve multiple items by their IDs in a single batch request.
+
+ Positions in the returned list correspond to positions in *item_ids*.
+ Entries are ``None`` where no item was found for the given ID.
+
+ :param item_ids: The item identifiers to retrieve.
+ :type item_ids: Iterable[str]
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: A list of :class:`OutputItem` instances (or ``None`` for missing items).
+ :rtype: list[OutputItem | None]
+ :raises FoundryApiError: On non-success HTTP response.
+ """
+ ids = list(item_ids)
+ body = serialize_batch_request(ids)
+ url = self._settings.build_url("items/batch/retrieve")
+ request = HttpRequest("POST", url, content=body, headers={"Content-Type": _JSON_CONTENT_TYPE})
+ _apply_isolation_headers(request, isolation)
+ http_resp = await self._client.send_request(request)
+ raise_for_storage_error(http_resp)
+ return deserialize_items_array(http_resp.text())
+
+ async def get_history_item_ids(
+ self,
+ previous_response_id: str | None,
+ conversation_id: str | None,
+ limit: int,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[str]:
+ """Retrieve the ordered list of item IDs that form the conversation history.
+
+ :param previous_response_id: The response whose prior turn should be the history anchor.
+ :type previous_response_id: str | None
+ :param conversation_id: An explicit conversation scope identifier, if available.
+ :type conversation_id: str | None
+ :param limit: Maximum number of item IDs to return.
+ :type limit: int
+ :keyword isolation: Isolation context for multi-tenant partitioning.
+ :paramtype isolation: ~azure.ai.agentserver.responses.IsolationContext | None
+ :returns: Ordered list of item ID strings.
+ :rtype: list[str]
+ :raises FoundryApiError: On non-success HTTP response.
+ """
+ extra: dict[str, str] = {"limit": str(limit)}
+ if previous_response_id is not None:
+ extra["previous_response_id"] = previous_response_id
+ if conversation_id is not None:
+ extra["conversation_id"] = conversation_id
+
+ url = self._settings.build_url("history/item_ids", **extra)
+ request = HttpRequest("GET", url)
+ _apply_isolation_headers(request, isolation)
+ http_resp = await self._client.send_request(request)
+ raise_for_storage_error(http_resp)
+ return deserialize_history_ids(http_resp.text())
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_serializer.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_serializer.py
new file mode 100644
index 000000000000..73ffa6783b64
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_serializer.py
@@ -0,0 +1,114 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""JSON serialization helpers for Foundry storage envelope payloads."""
+
+from __future__ import annotations
+
+import json
+from typing import Any, Iterable
+
+from ..models._generated import OutputItem, ResponseObject # type: ignore[attr-defined]
+
+
+def serialize_create_request(
+ response: ResponseObject,
+ input_items: Iterable[OutputItem] | None,
+ history_item_ids: Iterable[str] | None,
+) -> bytes:
+ """Serialize a create-response request envelope to JSON bytes.
+
+ :param response: The initial response snapshot.
+ :type response: ResponseObject
+ :param input_items: Resolved output items to store alongside the response.
+ :type input_items: Iterable[OutputItem] | None
+ :param history_item_ids: Item IDs drawn from a prior conversation turn.
+ :type history_item_ids: Iterable[str] | None
+ :returns: UTF-8 encoded JSON body.
+ :rtype: bytes
+ """
+ payload: dict[str, Any] = {
+ "response": response.as_dict(),
+ "input_items": [item.as_dict() for item in (input_items or [])],
+ "history_item_ids": list(history_item_ids or []),
+ }
+ return json.dumps(payload).encode("utf-8")
+
+
+def serialize_response(response: ResponseObject) -> bytes:
+ """Serialize a single :class:`ResponseObject` snapshot to JSON bytes.
+
+ :param response: The response model to encode.
+ :type response: ResponseObject
+ :returns: UTF-8 encoded JSON body.
+ :rtype: bytes
+ """
+ return json.dumps(response.as_dict()).encode("utf-8")
+
+
+def serialize_batch_request(item_ids: list[str]) -> bytes:
+ """Serialize a batch-retrieve request to JSON bytes.
+
+ :param item_ids: Ordered list of item IDs to retrieve.
+ :type item_ids: list[str]
+ :returns: UTF-8 encoded JSON body.
+ :rtype: bytes
+ """
+ return json.dumps({"item_ids": item_ids}).encode("utf-8")
+
+
+def deserialize_response(body: str) -> ResponseObject:
+ """Deserialize a JSON response body into a :class:`ResponseObject` model.
+
+ :param body: The raw JSON response text from the storage API.
+ :type body: str
+ :returns: A populated :class:`ResponseObject` model.
+ :rtype: ResponseObject
+ """
+ return ResponseObject(json.loads(body)) # type: ignore[call-arg]
+
+
+def deserialize_paged_items(body: str) -> list[OutputItem]:
+ """Deserialize a paged-response JSON body, extracting the ``data`` array.
+
+ The discriminator field ``type`` on each item determines the concrete
+ :class:`OutputItem` subclass returned.
+
+ :param body: The raw JSON response text from the storage API.
+ :type body: str
+ :returns: A list of deserialized :class:`OutputItem` instances.
+ :rtype: list[OutputItem]
+ """
+ data = json.loads(body)
+ return [OutputItem._deserialize(item, []) for item in data.get("data", [])] # type: ignore[attr-defined]
+
+
+def deserialize_items_array(body: str) -> list[OutputItem | None]:
+ """Deserialize a JSON array of items, preserving ``null`` gaps.
+
+ Null entries in the array indicate that no item was found for the
+ corresponding ID in a batch-retrieve response.
+
+ :param body: The raw JSON response text from the storage API.
+ :type body: str
+ :returns: A list of deserialized :class:`OutputItem` instances or ``None`` for missing items.
+ :rtype: list[OutputItem | None]
+ """
+ raw_items: list[dict | None] = json.loads(body)
+ result: list[OutputItem | None] = []
+ for item in raw_items:
+ if item is None:
+ result.append(None)
+ else:
+ result.append(OutputItem._deserialize(item, [])) # type: ignore[attr-defined]
+ return result
+
+
+def deserialize_history_ids(body: str) -> list[str]:
+ """Deserialize a JSON array of history item ID strings.
+
+ :param body: The raw JSON response text from the storage API.
+ :type body: str
+ :returns: List of item ID strings.
+ :rtype: list[str]
+ """
+ return list(json.loads(body))
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_settings.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_settings.py
new file mode 100644
index 000000000000..b2cbbd09fdb0
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_foundry_settings.py
@@ -0,0 +1,72 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Configuration helpers for the Foundry storage backend."""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from urllib.parse import quote as _url_quote
+
+from azure.ai.agentserver.core._config import AgentConfig
+
+_API_VERSION = "v1"
+
+
+def _encode(value: str) -> str:
+ return _url_quote(value, safe="")
+
+
+@dataclass(frozen=True)
+class FoundryStorageSettings:
+ """Immutable runtime configuration for :class:`FoundryStorageProvider`."""
+
+ storage_base_url: str
+
+ @classmethod
+ def from_env(cls) -> "FoundryStorageSettings":
+ """Create settings by reading the ``FOUNDRY_PROJECT_ENDPOINT`` environment variable.
+
+ :raises EnvironmentError: If the variable is missing or empty.
+ :raises ValueError: If the variable does not contain a valid absolute URL.
+ :returns: A new :class:`FoundryStorageSettings` configured from the environment.
+ :rtype: FoundryStorageSettings
+ """
+ config = AgentConfig.from_env()
+ if not config.project_endpoint:
+ raise EnvironmentError(
+ "The 'FOUNDRY_PROJECT_ENDPOINT' environment variable is required. "
+ "In hosted environments, the Azure AI Foundry platform must set this variable."
+ )
+ return cls.from_endpoint(config.project_endpoint)
+
+ @classmethod
+ def from_endpoint(cls, endpoint: str) -> "FoundryStorageSettings":
+ """Create settings from an explicit project endpoint URL.
+
+ :param endpoint: Foundry project endpoint URL (e.g. ``https://myproject.foundry.azure.com``).
+ :type endpoint: str
+ :raises ValueError: If the endpoint is empty or not a valid absolute URL.
+ :returns: A new :class:`FoundryStorageSettings`.
+ :rtype: FoundryStorageSettings
+ """
+ if not endpoint:
+ raise ValueError("endpoint must be a non-empty string")
+ if not (endpoint.startswith("http://") or endpoint.startswith("https://")):
+ raise ValueError(f"endpoint must be a valid absolute URL, got: {endpoint!r}")
+ base = endpoint.rstrip("/") + "/storage/"
+ return cls(storage_base_url=base)
+
+ def build_url(self, path: str, **extra_params: str) -> str:
+ """Build a full storage API URL for *path* with ``api-version`` appended.
+
+ :param path: The resource path segment, e.g. ``responses/abc123``.
+ :type path: str
+ :param extra_params: Additional query parameters; values are URL-encoded automatically.
+ :type extra_params: str
+ :returns: The complete URL string.
+ :rtype: str
+ """
+ url = f"{self.storage_base_url}{path}?api-version={_encode(_API_VERSION)}"
+ for key, value in extra_params.items():
+ url += f"&{key}={_encode(value)}"
+ return url
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_memory.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_memory.py
new file mode 100644
index 000000000000..a8966167d232
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/store/_memory.py
@@ -0,0 +1,666 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""In-memory response store implementation."""
+
+from __future__ import annotations
+
+import asyncio
+import contextlib
+from collections import defaultdict
+from copy import deepcopy
+from dataclasses import dataclass
+from datetime import datetime, timedelta, timezone
+from typing import Any, AsyncIterator, Dict, Iterable
+
+from .._response_context import IsolationContext
+from ..models._generated import OutputItem, ResponseObject, ResponseStreamEvent
+from ..models._helpers import get_conversation_id
+from ..models.runtime import ResponseExecution, ResponseModeFlags, ResponseStatus, StreamEventRecord, StreamReplayState
+from ._base import ResponseProviderProtocol, ResponseStreamProviderProtocol
+
+_DEFAULT_REPLAY_EVENT_TTL_SECONDS: int = 600
+"""Minimum per-event replay TTL (10 minutes) per spec B35."""
+
+
+@dataclass
+class _StoreEntry:
+ """Container for one response execution and its replay state."""
+
+ execution: ResponseExecution
+ replay: StreamReplayState
+ response: ResponseObject | None = None
+ input_item_ids: list[str] | None = None
+ output_item_ids: list[str] | None = None
+ history_item_ids: list[str] | None = None
+ deleted: bool = False
+ expires_at: datetime | None = None
+ replay_event_ttl_seconds: int = _DEFAULT_REPLAY_EVENT_TTL_SECONDS
+
+
+class InMemoryResponseProvider(ResponseProviderProtocol, ResponseStreamProviderProtocol):
+ """In-memory provider implementing both ``ResponseProviderProtocol`` and ``ResponseStreamProviderProtocol``."""
+
+ def __init__(self) -> None:
+ """Initialize in-memory state and an async mutation lock."""
+ self._entries: Dict[str, _StoreEntry] = {}
+ self._lock = asyncio.Lock()
+ self._item_store: Dict[str, OutputItem] = {}
+ self._conversation_responses: defaultdict[str, list[str]] = defaultdict(list)
+ self._stream_events: Dict[str, list[ResponseStreamEvent]] = {}
+
+ @contextlib.asynccontextmanager
+ async def _locked(self) -> AsyncIterator[None]:
+ """Acquire the lock and purge expired entries."""
+ async with self._lock:
+ self._purge_expired_unlocked()
+ yield
+
+ async def create_response(
+ self,
+ response: ResponseObject,
+ input_items: Iterable[OutputItem] | None,
+ history_item_ids: Iterable[str] | None,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ """Persist a new response envelope and optional input/history references.
+
+ Stores a deep copy of the response, indexes input items by their IDs,
+ and tracks conversation membership for history resolution.
+
+ :param response: The response envelope to persist.
+ :type response: ~azure.ai.agentserver.responses.models._generated.Response
+ :param input_items: Optional resolved output items to associate with the response.
+ :type input_items: Iterable[OutputItem] | None
+ :param history_item_ids: Optional history item IDs to link to the response.
+ :type history_item_ids: Iterable[str] | None
+ :rtype: None
+ :raises ValueError: If a non-deleted response with the same ID already exists.
+ """
+ response_id = str(getattr(response, "id"))
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is not None and not entry.deleted:
+ raise ValueError(f"response '{response_id}' already exists")
+
+ input_ids: list[str] = []
+ if input_items is not None:
+ for item in input_items:
+ item_id = self._extract_item_id(item)
+ if item_id is None:
+ continue
+ self._item_store[item_id] = deepcopy(item)
+ input_ids.append(item_id)
+
+ history_ids = list(history_item_ids) if history_item_ids is not None else []
+ output_ids = self._store_output_items_unlocked(response)
+ self._entries[response_id] = _StoreEntry(
+ execution=ResponseExecution(
+ response_id=response_id,
+ mode_flags=self._resolve_mode_flags_from_response(response),
+ ),
+ replay=StreamReplayState(response_id=response_id),
+ response=deepcopy(response),
+ input_item_ids=input_ids,
+ output_item_ids=output_ids,
+ history_item_ids=history_ids,
+ deleted=False,
+ )
+
+ conversation_id = get_conversation_id(response)
+ if conversation_id is not None:
+ self._conversation_responses[conversation_id].append(response_id)
+
+ async def get_response(self, response_id: str, *, isolation: IsolationContext | None = None) -> ResponseObject:
+ """Retrieve one response envelope by identifier.
+
+ :param response_id: The unique identifier of the response to retrieve.
+ :type response_id: str
+ :returns: A deep copy of the stored response envelope.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.Response
+ :raises KeyError: If the response does not exist or has been deleted.
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None or entry.deleted or entry.response is None:
+ raise KeyError(f"response '{response_id}' not found")
+ return deepcopy(entry.response)
+
+ async def update_response(self, response: ResponseObject, *, isolation: IsolationContext | None = None) -> None:
+ """Update a stored response envelope.
+
+ Replaces the stored response with a deep copy and updates
+ the execution snapshot.
+
+ :param response: The response envelope with updated fields.
+ :type response: ~azure.ai.agentserver.responses.models._generated.Response
+ :rtype: None
+ :raises KeyError: If the response does not exist or has been deleted.
+ """
+ response_id = str(getattr(response, "id"))
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None or entry.deleted:
+ raise KeyError(f"response '{response_id}' not found")
+
+ entry.response = deepcopy(response)
+ entry.execution.set_response_snapshot(deepcopy(response))
+ entry.output_item_ids = self._store_output_items_unlocked(response)
+
+ async def delete_response(self, response_id: str, *, isolation: IsolationContext | None = None) -> None:
+ """Delete a stored response envelope by identifier.
+
+ Marks the entry as deleted and clears the response payload.
+
+ :param response_id: The unique identifier of the response to delete.
+ :type response_id: str
+ :rtype: None
+ :raises KeyError: If the response does not exist or has already been deleted.
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None or entry.deleted:
+ raise KeyError(f"response '{response_id}' not found")
+ entry.deleted = True
+ entry.response = None
+
+ async def get_input_items(
+ self,
+ response_id: str,
+ limit: int = 20,
+ ascending: bool = False,
+ after: str | None = None,
+ before: str | None = None,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[OutputItem]:
+ """Retrieve input/history items for a response with basic cursor paging.
+
+ Returns deep copies of stored items, combining history and input item IDs
+ with optional cursor-based pagination.
+
+ :param response_id: The unique identifier of the response whose items to fetch.
+ :type response_id: str
+ :param limit: Maximum number of items to return (clamped to 1–100). Defaults to 20.
+ :type limit: int
+ :param ascending: Whether to return items in ascending order. Defaults to False.
+ :type ascending: bool
+ :param after: Cursor ID; only return items after this ID.
+ :type after: str | None
+ :param before: Cursor ID; only return items before this ID.
+ :type before: str | None
+ :returns: A list of input/history items matching the pagination criteria.
+ :rtype: list[OutputItem]
+ :raises KeyError: If the response does not exist.
+ :raises ValueError: If the response has been deleted.
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None:
+ raise KeyError(f"response '{response_id}' not found")
+ if entry.deleted:
+ raise ValueError(f"response '{response_id}' has been deleted")
+
+ item_ids = [
+ *(entry.history_item_ids or []),
+ *(entry.input_item_ids or []),
+ ]
+ ordered_ids = item_ids if ascending else list(reversed(item_ids))
+
+ if after is not None:
+ try:
+ ordered_ids = ordered_ids[ordered_ids.index(after) + 1 :]
+ except ValueError:
+ pass
+ if before is not None:
+ try:
+ ordered_ids = ordered_ids[: ordered_ids.index(before)]
+ except ValueError:
+ pass
+
+ safe_limit = max(1, min(100, int(limit)))
+ return [
+ deepcopy(self._item_store[item_id])
+ for item_id in ordered_ids[:safe_limit]
+ if item_id in self._item_store
+ ]
+
+ async def get_items(
+ self,
+ item_ids: Iterable[str],
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[OutputItem | None]:
+ """Retrieve items by ID, preserving request order.
+
+ Returns deep copies of stored items. Missing IDs produce ``None`` entries.
+
+ :param item_ids: The item identifiers to look up.
+ :type item_ids: Iterable[str]
+ :returns: A list of output items in the same order as *item_ids*; missing items are ``None``.
+ :rtype: list[OutputItem | None]
+ """
+ async with self._locked():
+ return [
+ deepcopy(self._item_store[item_id]) if item_id in self._item_store else None for item_id in item_ids
+ ]
+
+ async def get_history_item_ids(
+ self,
+ previous_response_id: str | None,
+ conversation_id: str | None,
+ limit: int,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[str]:
+ """Resolve history item IDs from previous response and/or conversation scope.
+
+ Collects history item IDs from the previous response chain and/or
+ all responses within the given conversation, up to *limit*.
+
+ :param previous_response_id: Optional response ID to chain history from.
+ :type previous_response_id: str | None
+ :param conversation_id: Optional conversation ID to scope history lookup.
+ :type conversation_id: str | None
+ :param limit: Maximum number of history item IDs to return.
+ :type limit: int
+ :returns: A list of history item IDs within the given scope.
+ :rtype: list[str]
+ """
+ async with self._locked():
+ resolved: list[str] = []
+
+ if previous_response_id is not None:
+ entry = self._entries.get(previous_response_id)
+ if entry is not None and not entry.deleted:
+ # Resolve history chain for the previous response:
+ # return historyItemIds + inputItemIds + outputItemIds of the previous response
+ resolved.extend(entry.history_item_ids or [])
+ resolved.extend(entry.input_item_ids or [])
+ resolved.extend(entry.output_item_ids or [])
+
+ if conversation_id is not None:
+ for response_id in self._conversation_responses.get(conversation_id, []):
+ entry = self._entries.get(response_id)
+ if entry is None or entry.deleted:
+ continue
+ resolved.extend(entry.history_item_ids or [])
+ resolved.extend(entry.input_item_ids or [])
+ resolved.extend(entry.output_item_ids or [])
+
+ if limit <= 0:
+ return []
+ return resolved[:limit]
+
+ async def create_execution(self, execution: ResponseExecution, *, ttl_seconds: int | None = None) -> None:
+ """Create a new execution and replay container for ``execution.response_id``.
+
+ :param execution: The execution state to store.
+ :type execution: ~azure.ai.agentserver.responses.models.runtime.ResponseExecution
+ :keyword int or None ttl_seconds: Optional time-to-live in seconds for automatic expiration.
+ :rtype: None
+ :raises ValueError: If an entry with the same response ID already exists.
+ """
+ async with self._locked():
+ if execution.response_id in self._entries:
+ raise ValueError(f"response '{execution.response_id}' already exists")
+
+ self._entries[execution.response_id] = _StoreEntry(
+ execution=deepcopy(execution),
+ replay=StreamReplayState(response_id=execution.response_id),
+ expires_at=self._compute_expiry(ttl_seconds),
+ )
+
+ async def get_execution(self, response_id: str) -> ResponseExecution | None:
+ """Get a defensive copy of execution state for ``response_id`` if present.
+
+ :param response_id: The unique identifier of the response execution to retrieve.
+ :type response_id: str
+ :returns: A deep copy of the execution state, or ``None`` if not found.
+ :rtype: ~azure.ai.agentserver.responses.models.runtime.ResponseExecution | None
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None:
+ return None
+ return deepcopy(entry.execution)
+
+ async def set_response_snapshot(
+ self,
+ response_id: str,
+ response: ResponseObject,
+ *,
+ ttl_seconds: int | None = None,
+ ) -> bool:
+ """Set the latest response snapshot for an existing response execution.
+
+ :param response_id: The unique identifier of the response to update.
+ :type response_id: str
+ :param response: The response snapshot to associate with the execution.
+ :type response: ~azure.ai.agentserver.responses.models._generated.Response
+ :keyword int or None ttl_seconds: Optional time-to-live in seconds to refresh expiration.
+ :returns: ``True`` if the entry was found and updated, ``False`` otherwise.
+ :rtype: bool
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None:
+ return False
+
+ entry.execution.set_response_snapshot(response)
+ self._apply_ttl_unlocked(entry, ttl_seconds)
+ return True
+
+ async def transition_execution_status(
+ self,
+ response_id: str,
+ next_status: ResponseStatus,
+ *,
+ ttl_seconds: int | None = None,
+ ) -> bool:
+ """Transition execution state while preserving lifecycle invariants.
+
+ :param response_id: The unique identifier of the response execution to transition.
+ :type response_id: str
+ :param next_status: The target status to transition to.
+ :type next_status: ~azure.ai.agentserver.responses.models.runtime.ResponseStatus
+ :keyword int or None ttl_seconds: Optional time-to-live in seconds to refresh expiration.
+ :returns: ``True`` if the entry was found and transitioned, ``False`` otherwise.
+ :rtype: bool
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None:
+ return False
+
+ entry.execution.transition_to(next_status)
+ self._apply_ttl_unlocked(entry, ttl_seconds)
+ return True
+
+ async def set_cancel_requested(self, response_id: str, *, ttl_seconds: int | None = None) -> bool:
+ """Mark cancellation requested and enforce lifecycle-safe cancel transitions.
+
+ :param response_id: The unique identifier of the response to cancel.
+ :type response_id: str
+ :keyword int or None ttl_seconds: Optional time-to-live in seconds to refresh expiration.
+ :returns: ``True`` if the entry was found and cancel was applied, ``False`` otherwise.
+ :rtype: bool
+ :raises ValueError: If the execution is already terminal in a non-cancelled state.
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None:
+ return False
+
+ self._apply_cancel_transition_unlocked(entry)
+ self._apply_ttl_unlocked(entry, ttl_seconds)
+ return True
+
+ @staticmethod
+ def _apply_cancel_transition_unlocked(entry: _StoreEntry) -> None:
+ """Apply deterministic and lifecycle-safe cancellation status updates.
+
+ Transitions the entry through ``queued -> in_progress -> cancelled`` when
+ applicable, and sets the ``cancel_requested`` flag.
+
+ :param entry: The store entry whose execution state will be updated.
+ :type entry: _StoreEntry
+ :rtype: None
+ :raises ValueError: If the execution is in a terminal non-cancelled state.
+ """
+ status = entry.execution.status
+
+ if status == "cancelled":
+ entry.execution.cancel_requested = True
+ entry.execution.updated_at = datetime.now(timezone.utc)
+ return
+
+ if status in {"completed", "failed", "incomplete"}:
+ raise ValueError(f"cannot cancel terminal execution in status '{status}'")
+
+ if status == "queued":
+ entry.execution.transition_to("in_progress")
+
+ entry.execution.transition_to("cancelled")
+ entry.execution.cancel_requested = True
+
+ async def append_stream_event(
+ self,
+ response_id: str,
+ event: StreamEventRecord,
+ *,
+ ttl_seconds: int | None = None,
+ ) -> bool:
+ """Append one stream event to replay state for an existing execution.
+
+ :param response_id: The unique identifier of the response to append the event to.
+ :type response_id: str
+ :param event: The stream event record to append.
+ :type event: ~azure.ai.agentserver.responses.models.runtime.StreamEventRecord
+ :keyword int or None ttl_seconds: Optional time-to-live in seconds to refresh expiration.
+ :returns: ``True`` if the entry was found and the event was appended, ``False`` otherwise.
+ :rtype: bool
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None:
+ return False
+
+ entry.replay.append(deepcopy(event))
+ self._apply_ttl_unlocked(entry, ttl_seconds)
+ return True
+
+ async def get_replay_events(self, response_id: str) -> list[StreamEventRecord] | None:
+ """Get defensive copies of replay events for ``response_id``, filtering out expired events.
+
+ Events older than the entry's ``replay_event_ttl_seconds`` (default 600s / 10 minutes,
+ per spec B35) are excluded from the returned list.
+
+ :param response_id: The unique identifier of the response whose events to retrieve.
+ :type response_id: str
+ :returns: A list of deep-copied stream event records, or ``None`` if not found.
+ :rtype: list[~azure.ai.agentserver.responses.models.runtime.StreamEventRecord] | None
+ """
+ async with self._locked():
+ entry = self._entries.get(response_id)
+ if entry is None:
+ return None
+ cutoff = datetime.now(timezone.utc) - timedelta(seconds=entry.replay_event_ttl_seconds)
+ live = [e for e in entry.replay.events if e.emitted_at >= cutoff]
+ return deepcopy(live)
+
+ async def delete(self, response_id: str) -> bool:
+ """Delete all state for a response ID if present.
+
+ Removes the entry entirely from the store (unlike ``delete_response``
+ which soft-deletes).
+
+ :param response_id: The unique identifier of the response to remove.
+ :type response_id: str
+ :returns: ``True`` if an entry was found and removed, ``False`` otherwise.
+ :rtype: bool
+ """
+ async with self._locked():
+ self._stream_events.pop(response_id, None)
+ return self._entries.pop(response_id, None) is not None
+
+ async def save_stream_events(
+ self,
+ response_id: str,
+ events: list[ResponseStreamEvent],
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ """Persist the complete ordered list of SSE events for ``response_id``.
+
+ Each event is stamped with ``_saved_at`` (UTC) so that :meth:`get_stream_events`
+ can enforce per-event replay TTL (B35).
+
+ :param response_id: The unique identifier of the response.
+ :type response_id: str
+ :param events: Ordered list of event instances.
+ :type events: list[ResponseStreamEvent]
+ :rtype: None
+ """
+ now = datetime.now(timezone.utc)
+ stamped: list[ResponseStreamEvent] = []
+ for ev in events:
+ copy = deepcopy(ev)
+ copy.setdefault("_saved_at", now)
+ stamped.append(copy)
+ async with self._locked():
+ self._stream_events[response_id] = stamped
+
+ async def get_stream_events(
+ self,
+ response_id: str,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> list[ResponseStreamEvent] | None:
+ """Retrieve the persisted SSE events for ``response_id``, excluding expired events.
+
+ Events older than the entry's ``replay_event_ttl_seconds`` (default 600s / 10 minutes,
+ per spec B35) are filtered out.
+
+ :param response_id: The unique identifier of the response whose events to retrieve.
+ :type response_id: str
+ :returns: A deep-copied list of event instances, or ``None`` if not found.
+ :rtype: list[ResponseStreamEvent] | None
+ """
+ async with self._locked():
+ events = self._stream_events.get(response_id)
+ if events is None:
+ return None
+ entry = self._entries.get(response_id)
+ ttl = entry.replay_event_ttl_seconds if entry is not None else _DEFAULT_REPLAY_EVENT_TTL_SECONDS
+ cutoff = datetime.now(timezone.utc) - timedelta(seconds=ttl)
+ live = [e for e in events if e.get("_saved_at", cutoff) >= cutoff]
+ return deepcopy(live)
+
+ async def delete_stream_events(
+ self,
+ response_id: str,
+ *,
+ isolation: IsolationContext | None = None,
+ ) -> None:
+ """Delete persisted SSE events for ``response_id``.
+
+ :param response_id: The unique identifier of the response whose events to remove.
+ :type response_id: str
+ :rtype: None
+ """
+ async with self._locked():
+ self._stream_events.pop(response_id, None)
+
+ async def purge_expired(self, *, now: datetime | None = None) -> int:
+ """Remove expired entries and return count.
+
+ :keyword ~datetime.datetime or None now: Optional override for the current time (useful for testing).
+ :returns: The number of expired entries that were removed.
+ :rtype: int
+ """
+ async with self._locked():
+ return self._purge_expired_unlocked(now=now)
+
+ @staticmethod
+ def _compute_expiry(ttl_seconds: int | None) -> datetime | None:
+ """Compute an absolute expiration timestamp from a TTL.
+
+ :param ttl_seconds: Time-to-live in seconds, or ``None`` for no expiration.
+ :type ttl_seconds: int | None
+ :returns: A UTC datetime for the expiry, or ``None`` if *ttl_seconds* is ``None``.
+ :rtype: ~datetime.datetime | None
+ :raises ValueError: If *ttl_seconds* is <= 0.
+ """
+ if ttl_seconds is None:
+ return None
+ if ttl_seconds <= 0:
+ raise ValueError("ttl_seconds must be > 0 when set")
+ return datetime.now(timezone.utc) + timedelta(seconds=ttl_seconds)
+
+ def _apply_ttl_unlocked(self, entry: _StoreEntry, ttl_seconds: int | None) -> None:
+ """Update entry expiration timestamp when a TTL value is supplied.
+
+ :param entry: The store entry whose expiration to update.
+ :type entry: _StoreEntry
+ :param ttl_seconds: Time-to-live in seconds, or ``None`` to leave unchanged.
+ :type ttl_seconds: int | None
+ :rtype: None
+ """
+ if ttl_seconds is not None:
+ entry.expires_at = self._compute_expiry(ttl_seconds)
+
+ def _purge_expired_unlocked(self, *, now: datetime | None = None) -> int:
+ """Remove expired entries without acquiring the lock.
+
+ :keyword ~datetime.datetime or None now: Optional override for the current time (useful for testing).
+ :returns: The number of expired entries that were removed.
+ :rtype: int
+ """
+ current_time = now or datetime.now(timezone.utc)
+ expired_ids = [
+ response_id
+ for response_id, entry in self._entries.items()
+ if entry.expires_at is not None and entry.expires_at <= current_time
+ ]
+
+ for response_id in expired_ids:
+ del self._entries[response_id]
+
+ return len(expired_ids)
+
+ def _store_output_items_unlocked(self, response: ResponseObject) -> list[str]:
+ """Extract output items from a response, store them in the item store, and return their IDs.
+
+ Must be called while holding ``self._lock``.
+
+ :param response: The response envelope whose output items should be stored.
+ :type response: ~azure.ai.agentserver.responses.models._generated.Response
+ :returns: Ordered list of output item IDs.
+ :rtype: list[str]
+ """
+ output = getattr(response, "output", None)
+ if not output:
+ return []
+ output_ids: list[str] = []
+ for item in output:
+ item_id = self._extract_item_id(item)
+ if item_id is not None:
+ self._item_store[item_id] = deepcopy(item)
+ output_ids.append(item_id)
+ return output_ids
+
+ @staticmethod
+ def _extract_item_id(item: Any) -> str | None:
+ """Extract item identifier from object-like or mapping-like values.
+
+ Supports both dict-like (``item["id"]``) and attribute-like (``item.id``)
+ access patterns.
+
+ :param item: The item to extract an ID from.
+ :type item: Any
+ :returns: The string ID if found, or ``None``.
+ :rtype: str | None
+ """
+ if item is None:
+ return None
+ if isinstance(item, dict):
+ value = item.get("id")
+ return str(value) if value is not None else None
+ value = getattr(item, "id", None)
+ return str(value) if value is not None else None
+
+ @staticmethod
+ def _resolve_mode_flags_from_response(response: ResponseObject) -> ResponseModeFlags:
+ """Build mode flags from a response snapshot where available.
+
+ :param response: The response envelope to extract mode flags from.
+ :type response: ~azure.ai.agentserver.responses.models._generated.Response
+ :returns: Mode flags derived from the response's ``stream``, ``store``, and ``background`` attributes.
+ :rtype: ~azure.ai.agentserver.responses.models.runtime.ResponseModeFlags
+ """
+ return ResponseModeFlags(
+ stream=bool(getattr(response, "stream", False)),
+ store=bool(getattr(response, "store", True)),
+ background=bool(getattr(response, "background", False)),
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/__init__.py
new file mode 100644
index 000000000000..74ad4767ddf4
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Event streaming, SSE encoding, and output item builders."""
+
+from ._helpers import (
+ EVENT_TYPE,
+)
+from ._sse import encode_keep_alive_comment, encode_sse_event
+from ._state_machine import (
+ EventStreamValidator,
+ LifecycleStateMachineError,
+ normalize_lifecycle_events,
+ validate_response_event_stream,
+)
+
+__all__ = [
+ "EVENT_TYPE",
+ "EventStreamValidator",
+ "LifecycleStateMachineError",
+ "encode_sse_event",
+ "encode_keep_alive_comment",
+ "normalize_lifecycle_events",
+ "validate_response_event_stream",
+]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/__init__.py
new file mode 100644
index 000000000000..8abb117f6b0a
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/__init__.py
@@ -0,0 +1,53 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Streaming output-item builders."""
+
+from ._base import (
+ BaseOutputItemBuilder,
+ BuilderLifecycleState,
+ OutputItemBuilder,
+ _require_non_empty,
+)
+from ._function import (
+ OutputItemFunctionCallBuilder,
+ OutputItemFunctionCallOutputBuilder,
+)
+from ._message import (
+ OutputItemMessageBuilder,
+ RefusalContentBuilder,
+ TextContentBuilder,
+)
+from ._reasoning import (
+ OutputItemReasoningItemBuilder,
+ ReasoningSummaryPartBuilder,
+)
+from ._tools import (
+ OutputItemCodeInterpreterCallBuilder,
+ OutputItemCustomToolCallBuilder,
+ OutputItemFileSearchCallBuilder,
+ OutputItemImageGenCallBuilder,
+ OutputItemMcpCallBuilder,
+ OutputItemMcpListToolsBuilder,
+ OutputItemWebSearchCallBuilder,
+)
+
+__all__ = [
+ "BaseOutputItemBuilder",
+ "BuilderLifecycleState",
+ "OutputItemBuilder",
+ "OutputItemCodeInterpreterCallBuilder",
+ "OutputItemCustomToolCallBuilder",
+ "OutputItemFileSearchCallBuilder",
+ "OutputItemFunctionCallBuilder",
+ "OutputItemFunctionCallOutputBuilder",
+ "OutputItemImageGenCallBuilder",
+ "OutputItemMcpCallBuilder",
+ "OutputItemMcpListToolsBuilder",
+ "OutputItemMessageBuilder",
+ "OutputItemReasoningItemBuilder",
+ "OutputItemWebSearchCallBuilder",
+ "ReasoningSummaryPartBuilder",
+ "RefusalContentBuilder",
+ "TextContentBuilder",
+ "_require_non_empty",
+]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_base.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_base.py
new file mode 100644
index 000000000000..5171ef01d06d
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_base.py
@@ -0,0 +1,191 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Base builder infrastructure: lifecycle state, base class, and generic builder."""
+
+from __future__ import annotations
+
+from copy import deepcopy
+from enum import Enum
+from typing import TYPE_CHECKING, Any
+
+from ...models import _generated as generated_models
+
+EVENT_TYPE = generated_models.ResponseStreamEventType
+
+if TYPE_CHECKING:
+ from .._event_stream import ResponseEventStream
+
+
+def _require_non_empty(value: str, field_name: str) -> str:
+ """Validate that a string value is non-empty.
+
+ :param value: The string value to check.
+ :type value: str
+ :param field_name: The field name to include in the error message.
+ :type field_name: str
+ :returns: The validated non-empty string.
+ :rtype: str
+ :raises ValueError: If *value* is not a non-empty string.
+ """
+ if not isinstance(value, str) or not value.strip():
+ raise ValueError(f"{field_name} must be a non-empty string")
+ return value
+
+
+class BuilderLifecycleState(Enum):
+ NOT_STARTED = "not_started"
+ ADDED = "added"
+ DONE = "done"
+
+
+class BaseOutputItemBuilder:
+ """Base output-item builder with lifecycle guards for added/done events."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, item_id: str) -> None:
+ """Initialize the base output-item builder.
+
+ :param stream: The parent event stream to emit events into.
+ :type stream: ResponseEventStream
+ :param output_index: The zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ """
+ self._stream = stream
+ self._output_index = output_index
+ self._item_id = item_id
+ self._lifecycle_state = BuilderLifecycleState.NOT_STARTED
+
+ @property
+ def item_id(self) -> str:
+ """Return the output item identifier.
+
+ :returns: The item ID.
+ :rtype: str
+ """
+ return self._item_id
+
+ @property
+ def output_index(self) -> int:
+ """Return the zero-based output index.
+
+ :returns: The output index.
+ :rtype: int
+ """
+ return self._output_index
+
+ def _ensure_transition(self, expected: BuilderLifecycleState, new_state: BuilderLifecycleState) -> None:
+ """Guard a lifecycle state transition.
+
+ :param expected: The expected current lifecycle state.
+ :type expected: BuilderLifecycleState
+ :param new_state: The target state to transition to.
+ :type new_state: BuilderLifecycleState
+ :rtype: None
+ :raises ValueError: If the current state does not match *expected*.
+ """
+ if self._lifecycle_state is not expected:
+ raise ValueError(
+ "cannot transition to "
+ f"'{new_state.value}' from '{self._lifecycle_state.value}' "
+ f"(expected '{expected.value}')"
+ )
+ self._lifecycle_state = new_state
+
+ def _emit_added(self, item: dict[str, Any]) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event with lifecycle guard.
+
+ :param item: The output item dict to include in the event.
+ :type item: dict[str, Any]
+ :returns: The emitted event.
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If the builder is not in ``NOT_STARTED`` state.
+ """
+ self._ensure_transition(BuilderLifecycleState.NOT_STARTED, BuilderLifecycleState.ADDED)
+ stamped_item = self._stream.with_output_item_defaults(item)
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_OUTPUT_ITEM_ADDED.value,
+ "output_index": self._output_index,
+ "item": stamped_item,
+ }
+ )
+
+ def _emit_done(self, item: dict[str, Any]) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event with lifecycle guard.
+
+ :param item: The completed output item dict to include in the event.
+ :type item: dict[str, Any]
+ :returns: The emitted event.
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If the builder is not in ``ADDED`` state.
+ """
+ self._ensure_transition(BuilderLifecycleState.ADDED, BuilderLifecycleState.DONE)
+ stamped_item = self._stream.with_output_item_defaults(item)
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_OUTPUT_ITEM_DONE.value,
+ "output_index": self._output_index,
+ "item": stamped_item,
+ }
+ )
+
+ def _emit_item_state_event(
+ self, event_type: str, *, extra_payload: dict[str, Any] | None = None
+ ) -> generated_models.ResponseStreamEvent:
+ """Emit an item-level state event (e.g., in-progress, searching, completed).
+
+ :param event_type: The event type string.
+ :type event_type: str
+ :keyword extra_payload: Optional additional fields to merge.
+ :paramtype extra_payload: dict[str, Any] | None
+ :returns: The emitted event.
+ :rtype: ResponseStreamEvent
+ """
+ event: dict[str, Any] = {
+ "type": event_type,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ }
+ if extra_payload:
+ event.update(deepcopy(extra_payload))
+ return self._stream.emit_event(event)
+
+
+class OutputItemBuilder(BaseOutputItemBuilder):
+ """Generic output-item builder for item types without dedicated scoped builders."""
+
+ def _coerce_item(self, item: generated_models.OutputItem | dict[str, Any]) -> dict[str, Any]:
+ """Coerce an item to a plain dict.
+
+ :param item: A dict or a generated model with ``as_dict()``.
+ :type item: OutputItem | dict[str, Any]
+ :returns: A deep-copied dict representation of the item.
+ :rtype: dict[str, Any]
+ :raises TypeError: If *item* is not a dict or model with ``as_dict()``.
+ """
+ if isinstance(item, dict):
+ return deepcopy(item)
+ if hasattr(item, "as_dict"):
+ return item.as_dict()
+ raise TypeError("item must be a dict or a generated model with as_dict()")
+
+ def emit_added(self, item: generated_models.OutputItem | dict[str, Any]) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for a generic item.
+
+ :param item: The output item (dict or model with ``as_dict()``).
+ :type item: OutputItem | dict[str, Any]
+ :returns: The emitted event.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(self._coerce_item(item))
+
+ def emit_done(self, item: generated_models.OutputItem | dict[str, Any]) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for a generic item.
+
+ :param item: The completed output item (dict or model with ``as_dict()``).
+ :type item: OutputItem | dict[str, Any]
+ :returns: The emitted event.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done(self._coerce_item(item))
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_function.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_function.py
new file mode 100644
index 000000000000..5e6122c9c230
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_function.py
@@ -0,0 +1,245 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Function call builders: function-call and function-call-output output items."""
+
+from __future__ import annotations
+
+from collections.abc import AsyncIterable
+from copy import deepcopy
+from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator
+
+from ._base import EVENT_TYPE, BaseOutputItemBuilder, _require_non_empty
+
+if TYPE_CHECKING:
+ from .._event_stream import ResponseEventStream
+
+
+class OutputItemFunctionCallBuilder(BaseOutputItemBuilder):
+ """Scoped builder for a function-call output item in stream mode."""
+
+ def __init__(
+ self,
+ stream: "ResponseEventStream",
+ output_index: int,
+ item_id: str,
+ name: str,
+ call_id: str,
+ ) -> None:
+ """Initialize the function-call output item builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ :param name: The function name being called.
+ :type name: str
+ :param call_id: Unique identifier for this function call.
+ :type call_id: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._name = _require_non_empty(name, "name")
+ self._call_id = _require_non_empty(call_id, "call_id")
+ self._final_arguments: str | None = None
+
+ @property
+ def name(self) -> str:
+ """Return the function name.
+
+ :returns: The function name.
+ :rtype: str
+ """
+ return self._name
+
+ @property
+ def call_id(self) -> str:
+ """Return the function call identifier.
+
+ :returns: The call ID.
+ :rtype: str
+ """
+ return self._call_id
+
+ def emit_added(self) -> dict[str, Any]:
+ """Emit an ``output_item.added`` event for this function call.
+
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ return self._emit_added(
+ {
+ "type": "function_call",
+ "id": self._item_id,
+ "call_id": self._call_id,
+ "name": self._name,
+ "arguments": "",
+ "status": "in_progress",
+ }
+ )
+
+ def emit_arguments_delta(self, delta: str) -> dict[str, Any]:
+ """Emit a function-call arguments delta event.
+
+ :param delta: The incremental arguments text fragment.
+ :type delta: str
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "delta": delta,
+ }
+ )
+
+ def emit_arguments_done(self, arguments: str) -> dict[str, Any]:
+ """Emit a function-call arguments done event.
+
+ :param arguments: The final, complete arguments string.
+ :type arguments: str
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ self._final_arguments = arguments
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "name": self._name,
+ "arguments": arguments,
+ }
+ )
+
+ def emit_done(self) -> dict[str, Any]:
+ """Emit an ``output_item.done`` event for this function call.
+
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ return self._emit_done(
+ {
+ "type": "function_call",
+ "id": self._item_id,
+ "call_id": self._call_id,
+ "name": self._name,
+ "arguments": self._final_arguments or "",
+ "status": "completed",
+ }
+ )
+
+ # ---- Sub-item convenience generators (S-053) ----
+
+ def arguments(self, args: str) -> Iterator[dict[str, Any]]:
+ """Yield the argument delta and done events.
+
+ Emits ``function_call_arguments.delta`` followed by
+ ``function_call_arguments.done``.
+
+ :param args: The complete arguments string.
+ :type args: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[dict[str, Any]]
+ """
+ yield self.emit_arguments_delta(args)
+ yield self.emit_arguments_done(args)
+
+ async def aarguments(self, args: str | AsyncIterable[str]) -> AsyncIterator[dict[str, Any]]:
+ """Async variant of :meth:`arguments` with streaming support.
+
+ When *args* is a string, behaves identically to :meth:`arguments`.
+ When *args* is an async iterable of string chunks, emits one
+ ``function_call_arguments.delta`` per chunk in real time (S-055),
+ then ``function_call_arguments.done`` with the accumulated text.
+
+ :param args: Complete arguments string or async iterable of chunks.
+ :type args: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[dict[str, Any]]
+ """
+ if isinstance(args, str):
+ for event in self.arguments(args):
+ yield event
+ return
+ accumulated: list[str] = []
+ async for chunk in args:
+ accumulated.append(chunk)
+ yield self.emit_arguments_delta(chunk)
+ yield self.emit_arguments_done("".join(accumulated))
+
+
+class OutputItemFunctionCallOutputBuilder(BaseOutputItemBuilder):
+ """Scoped builder for a function-call-output item in stream mode."""
+
+ def __init__(
+ self,
+ stream: "ResponseEventStream",
+ output_index: int,
+ item_id: str,
+ call_id: str,
+ ) -> None:
+ """Initialize the function-call-output item builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ :param call_id: The call ID of the function call this output belongs to.
+ :type call_id: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._call_id = _require_non_empty(call_id, "call_id")
+ self._final_output: str | list[Any] | None = None
+
+ @property
+ def call_id(self) -> str:
+ """Return the function call identifier.
+
+ :returns: The call ID.
+ :rtype: str
+ """
+ return self._call_id
+
+ def emit_added(self, output: str | list[Any] | None = None) -> dict[str, Any]:
+ """Emit an ``output_item.added`` event for this function-call output.
+
+ :param output: Optional initial output value.
+ :type output: str | list[Any] | None
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ return self._emit_added(
+ {
+ "type": "function_call_output",
+ "id": self._item_id,
+ "call_id": self._call_id,
+ "output": deepcopy(output) if output is not None else "",
+ "status": "in_progress",
+ }
+ )
+
+ def emit_done(self, output: str | list[Any] | None = None) -> dict[str, Any]:
+ """Emit an ``output_item.done`` event for this function-call output.
+
+ :param output: Optional final output value. Uses previously set output if ``None``.
+ :type output: str | list[Any] | None
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ if output is not None:
+ self._final_output = deepcopy(output)
+
+ return self._emit_done(
+ {
+ "type": "function_call_output",
+ "id": self._item_id,
+ "call_id": self._call_id,
+ "output": deepcopy(self._final_output) if self._final_output is not None else "",
+ "status": "completed",
+ }
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_message.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_message.py
new file mode 100644
index 000000000000..8c37ab3f5b24
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_message.py
@@ -0,0 +1,457 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Message-related builders: text content, refusal content, and message output item."""
+
+from __future__ import annotations
+
+from collections.abc import AsyncIterable
+from copy import deepcopy
+from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator
+
+from ...models import _generated as generated_models
+from ._base import EVENT_TYPE, BaseOutputItemBuilder, BuilderLifecycleState
+
+if TYPE_CHECKING:
+ from .._event_stream import ResponseEventStream
+
+
+class TextContentBuilder:
+ """Scoped builder for a text content part within an output message item."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, content_index: int, item_id: str) -> None:
+ """Initialize the text content builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of the parent output item.
+ :type output_index: int
+ :param content_index: Zero-based index of this content part.
+ :type content_index: int
+ :param item_id: Identifier of the parent output item.
+ :type item_id: str
+ """
+ self._stream = stream
+ self._output_index = output_index
+ self._content_index = content_index
+ self._item_id = item_id
+ self._final_text: str | None = None
+ self._delta_fragments: list[str] = []
+ self._annotation_index = 0
+ self._lifecycle_state = BuilderLifecycleState.NOT_STARTED
+
+ @property
+ def final_text(self) -> str | None:
+ """Return the final merged text, or ``None`` if not yet done.
+
+ :returns: The final text string.
+ :rtype: str | None
+ """
+ return self._final_text
+
+ @property
+ def content_index(self) -> int:
+ """Return the zero-based content part index.
+
+ :returns: The content index.
+ :rtype: int
+ """
+ return self._content_index
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit a ``content_part.added`` event for this text content.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If the builder is not in ``NOT_STARTED`` state.
+ """
+ if self._lifecycle_state is not BuilderLifecycleState.NOT_STARTED:
+ raise ValueError(f"cannot call emit_added in '{self._lifecycle_state.value}' state")
+ self._lifecycle_state = BuilderLifecycleState.ADDED
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_CONTENT_PART_ADDED.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": self._content_index,
+ "part": {"type": "output_text", "text": "", "annotations": [], "logprobs": []},
+ }
+ )
+
+ def emit_delta(self, text: str) -> generated_models.ResponseStreamEvent:
+ if self._lifecycle_state is not BuilderLifecycleState.ADDED:
+ raise ValueError(f"cannot call emit_delta in '{self._lifecycle_state.value}' state")
+ self._delta_fragments.append(text)
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_OUTPUT_TEXT_DELTA.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": self._content_index,
+ "delta": text,
+ "logprobs": [],
+ }
+ )
+
+ def emit_done(self, final_text: str | None = None) -> generated_models.ResponseStreamEvent:
+ """Emit a text done event with the merged final text.
+
+ :param final_text: Optional override for the final text; uses merged deltas if ``None``.
+ :type final_text: str | None
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If the builder is not in ``ADDED`` state.
+ """
+ if self._lifecycle_state is not BuilderLifecycleState.ADDED:
+ raise ValueError(f"cannot call emit_done in '{self._lifecycle_state.value}' state")
+ self._lifecycle_state = BuilderLifecycleState.DONE
+ merged_text = "".join(self._delta_fragments)
+ if not merged_text and final_text is not None:
+ merged_text = final_text
+ self._final_text = merged_text
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_OUTPUT_TEXT_DONE.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": self._content_index,
+ "text": merged_text,
+ "logprobs": [],
+ }
+ )
+
+ def emit_annotation_added(self, annotation: dict[str, Any]) -> generated_models.ResponseStreamEvent:
+ """Emit a text annotation added event.
+
+ :param annotation: The annotation dict to attach.
+ :type annotation: dict[str, Any]
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ annotation_index = self._annotation_index
+ self._annotation_index += 1
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_OUTPUT_TEXT_ANNOTATION_ADDED.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": self._content_index,
+ "annotation_index": annotation_index,
+ "annotation": deepcopy(annotation),
+ }
+ )
+
+
+class RefusalContentBuilder:
+ """Scoped builder for a refusal content part within an output message item."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, content_index: int, item_id: str) -> None:
+ """Initialize the refusal content builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of the parent output item.
+ :type output_index: int
+ :param content_index: Zero-based index of this content part.
+ :type content_index: int
+ :param item_id: Identifier of the parent output item.
+ :type item_id: str
+ """
+ self._stream = stream
+ self._output_index = output_index
+ self._content_index = content_index
+ self._item_id = item_id
+ self._final_refusal: str | None = None
+ self._lifecycle_state = BuilderLifecycleState.NOT_STARTED
+
+ @property
+ def final_refusal(self) -> str | None:
+ """Return the final refusal text, or ``None`` if not yet done.
+
+ :returns: The final refusal string.
+ :rtype: str | None
+ """
+ return self._final_refusal
+
+ @property
+ def content_index(self) -> int:
+ """Return the zero-based content part index.
+
+ :returns: The content index.
+ :rtype: int
+ """
+ return self._content_index
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit a ``content_part.added`` event for this refusal content.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If the builder is not in ``NOT_STARTED`` state.
+ """
+ if self._lifecycle_state is not BuilderLifecycleState.NOT_STARTED:
+ raise ValueError(f"cannot call emit_added in '{self._lifecycle_state.value}' state")
+ self._lifecycle_state = BuilderLifecycleState.ADDED
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_CONTENT_PART_ADDED.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": self._content_index,
+ "part": {"type": "refusal", "refusal": ""},
+ }
+ )
+
+ def emit_delta(self, text: str) -> generated_models.ResponseStreamEvent:
+ """Emit a refusal delta event.
+
+ :param text: The incremental refusal text fragment.
+ :type text: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_REFUSAL_DELTA.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": self._content_index,
+ "delta": text,
+ }
+ )
+
+ def emit_done(self, final_refusal: str) -> generated_models.ResponseStreamEvent:
+ """Emit a refusal done event.
+
+ :param final_refusal: The final, complete refusal text.
+ :type final_refusal: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If the builder is not in ``ADDED`` state.
+ """
+ if self._lifecycle_state is not BuilderLifecycleState.ADDED:
+ raise ValueError(f"cannot call emit_done in '{self._lifecycle_state.value}' state")
+ self._lifecycle_state = BuilderLifecycleState.DONE
+ self._final_refusal = final_refusal
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_REFUSAL_DONE.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": self._content_index,
+ "refusal": final_refusal,
+ }
+ )
+
+
+class OutputItemMessageBuilder(BaseOutputItemBuilder):
+ """Scoped builder for a message output item in stream mode."""
+
+ def __init__(
+ self,
+ stream: "ResponseEventStream",
+ output_index: int,
+ item_id: str,
+ ) -> None:
+ """Initialize the message output item builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._content_index = 0
+ self._completed_contents: list[dict[str, Any]] = []
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for this message item.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(
+ {
+ "type": "message",
+ "id": self._item_id,
+ "role": "assistant",
+ "content": [],
+ "status": "in_progress",
+ }
+ )
+
+ def add_text_content(self) -> TextContentBuilder:
+ """Create and return a text content part builder.
+
+ :returns: A new text content builder scoped to this message.
+ :rtype: TextContentBuilder
+ """
+ content_index = self._content_index
+ self._content_index += 1
+ return TextContentBuilder(
+ stream=self._stream,
+ output_index=self._output_index,
+ content_index=content_index,
+ item_id=self._item_id,
+ )
+
+ def add_refusal_content(self) -> RefusalContentBuilder:
+ """Create and return a refusal content part builder.
+
+ :returns: A new refusal content builder scoped to this message.
+ :rtype: RefusalContentBuilder
+ """
+ content_index = self._content_index
+ self._content_index += 1
+ return RefusalContentBuilder(
+ stream=self._stream,
+ output_index=self._output_index,
+ content_index=content_index,
+ item_id=self._item_id,
+ )
+
+ def emit_content_done(
+ self, content_builder: TextContentBuilder | RefusalContentBuilder
+ ) -> generated_models.ResponseStreamEvent:
+ """Emit a ``content_part.done`` event for a completed content part.
+
+ :param content_builder: The content builder whose final state to emit.
+ :type content_builder: TextContentBuilder | RefusalContentBuilder
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ if isinstance(content_builder, TextContentBuilder):
+ part = {
+ "type": "output_text",
+ "text": content_builder.final_text or "",
+ "annotations": [],
+ "logprobs": [],
+ }
+ content_index = content_builder.content_index
+ else:
+ part = {
+ "type": "refusal",
+ "refusal": content_builder.final_refusal or "",
+ }
+ content_index = content_builder.content_index
+
+ self._completed_contents.append(deepcopy(part))
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_CONTENT_PART_DONE.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "content_index": content_index,
+ "part": deepcopy(part),
+ }
+ )
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for this message item.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ :raises ValueError: If no content parts have been completed.
+ """
+ if len(self._completed_contents) == 0:
+ raise ValueError("message output item requires at least one content part before emit_done")
+ return self._emit_done(
+ {
+ "type": "message",
+ "id": self._item_id,
+ "role": "assistant",
+ "content": deepcopy(self._completed_contents),
+ "status": "completed",
+ }
+ )
+
+ # ---- Sub-item convenience generators (S-053) ----
+
+ def text_content(self, text: str) -> Iterator[generated_models.ResponseStreamEvent]:
+ """Yield the full lifecycle for a text content part.
+
+ Creates the sub-builder, emits ``content_part.added``,
+ ``output_text.delta``, ``output_text.done``, and ``content_part.done``.
+
+ :param text: The complete text content.
+ :type text: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[ResponseStreamEvent]
+ """
+ tc = self.add_text_content()
+ yield tc.emit_added()
+ yield tc.emit_delta(text)
+ yield tc.emit_done(text)
+ yield self.emit_content_done(tc)
+
+ async def atext_content(
+ self, text: str | AsyncIterable[str]
+ ) -> AsyncIterator[generated_models.ResponseStreamEvent]:
+ """Async variant of :meth:`text_content` with streaming support.
+
+ When *text* is a string, behaves identically to :meth:`text_content`.
+ When *text* is an async iterable of string chunks, emits one
+ ``output_text.delta`` per chunk in real time (S-055), then
+ ``output_text.done`` with the accumulated text.
+
+ :param text: Complete text or async iterable of text chunks.
+ :type text: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ if isinstance(text, str):
+ for event in self.text_content(text):
+ yield event
+ return
+ tc = self.add_text_content()
+ yield tc.emit_added()
+ async for chunk in text:
+ yield tc.emit_delta(chunk)
+ yield tc.emit_done()
+ yield self.emit_content_done(tc)
+
+ def refusal_content(self, text: str) -> Iterator[generated_models.ResponseStreamEvent]:
+ """Yield the full lifecycle for a refusal content part.
+
+ Creates the sub-builder, emits ``content_part.added``,
+ ``refusal.delta``, ``refusal.done``, and ``content_part.done``.
+
+ :param text: The complete refusal text.
+ :type text: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[ResponseStreamEvent]
+ """
+ rc = self.add_refusal_content()
+ yield rc.emit_added()
+ yield rc.emit_delta(text)
+ yield rc.emit_done(text)
+ yield self.emit_content_done(rc)
+
+ async def arefusal_content(
+ self, text: str | AsyncIterable[str]
+ ) -> AsyncIterator[generated_models.ResponseStreamEvent]:
+ """Async variant of :meth:`refusal_content` with streaming support.
+
+ When *text* is a string, behaves identically to :meth:`refusal_content`.
+ When *text* is an async iterable of string chunks, emits one
+ ``refusal.delta`` per chunk in real time (S-055), then
+ ``refusal.done`` with the accumulated text.
+
+ :param text: Complete refusal text or async iterable of text chunks.
+ :type text: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ if isinstance(text, str):
+ for event in self.refusal_content(text):
+ yield event
+ return
+ rc = self.add_refusal_content()
+ yield rc.emit_added()
+ accumulated: list[str] = []
+ async for chunk in text:
+ accumulated.append(chunk)
+ yield rc.emit_delta(chunk)
+ yield rc.emit_done("".join(accumulated))
+ yield self.emit_content_done(rc)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_reasoning.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_reasoning.py
new file mode 100644
index 000000000000..a30f43358dc2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_reasoning.py
@@ -0,0 +1,242 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Reasoning-related builders: summary parts and reasoning output items."""
+
+from __future__ import annotations
+
+from collections.abc import AsyncIterable
+from copy import deepcopy
+from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator
+
+from ._base import EVENT_TYPE, BaseOutputItemBuilder, BuilderLifecycleState
+
+if TYPE_CHECKING:
+ from .._event_stream import ResponseEventStream
+
+
+class ReasoningSummaryPartBuilder:
+ """Scoped builder for a single reasoning summary part."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, summary_index: int, item_id: str) -> None:
+ """Initialize the reasoning summary part builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of the parent output item.
+ :type output_index: int
+ :param summary_index: Zero-based index of this summary part.
+ :type summary_index: int
+ :param item_id: Identifier of the parent output item.
+ :type item_id: str
+ """
+ self._stream = stream
+ self._output_index = output_index
+ self._summary_index = summary_index
+ self._item_id = item_id
+ self._final_text: str | None = None
+ self._lifecycle_state = BuilderLifecycleState.NOT_STARTED
+
+ @property
+ def final_text(self) -> str | None:
+ """Return the final summary text, or ``None`` if not yet done.
+
+ :returns: The final text string.
+ :rtype: str | None
+ """
+ return self._final_text
+
+ @property
+ def summary_index(self) -> int:
+ """Return the zero-based summary part index.
+
+ :returns: The summary index.
+ :rtype: int
+ """
+ return self._summary_index
+
+ def emit_added(self) -> dict[str, Any]:
+ """Emit a ``reasoning_summary_part.added`` event.
+
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ :raises ValueError: If the builder is not in ``NOT_STARTED`` state.
+ """
+ if self._lifecycle_state is not BuilderLifecycleState.NOT_STARTED:
+ raise ValueError(f"cannot call emit_added in '{self._lifecycle_state.value}' state")
+ self._lifecycle_state = BuilderLifecycleState.ADDED
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_REASONING_SUMMARY_PART_ADDED.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "summary_index": self._summary_index,
+ "part": {"type": "summary_text", "text": ""},
+ }
+ )
+
+ def emit_text_delta(self, text: str) -> dict[str, Any]:
+ """Emit a reasoning summary text delta event.
+
+ :param text: The incremental summary text fragment.
+ :type text: str
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_REASONING_SUMMARY_TEXT_DELTA.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "summary_index": self._summary_index,
+ "delta": text,
+ }
+ )
+
+ def emit_text_done(self, final_text: str) -> dict[str, Any]:
+ """Emit a reasoning summary text done event.
+
+ :param final_text: The final, complete summary text.
+ :type final_text: str
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ self._final_text = final_text
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_REASONING_SUMMARY_TEXT_DONE.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "summary_index": self._summary_index,
+ "text": final_text,
+ }
+ )
+
+ def emit_done(self) -> dict[str, Any]:
+ """Emit a ``reasoning_summary_part.done`` event.
+
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ :raises ValueError: If the builder is not in ``ADDED`` state.
+ """
+ if self._lifecycle_state is not BuilderLifecycleState.ADDED:
+ raise ValueError(f"cannot call emit_done in '{self._lifecycle_state.value}' state")
+ self._lifecycle_state = BuilderLifecycleState.DONE
+ return self._stream.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_REASONING_SUMMARY_PART_DONE.value,
+ "item_id": self._item_id,
+ "output_index": self._output_index,
+ "summary_index": self._summary_index,
+ "part": {"type": "summary_text", "text": self._final_text or ""},
+ }
+ )
+
+
+class OutputItemReasoningItemBuilder(BaseOutputItemBuilder):
+ """Scoped builder for reasoning output items with summary part support."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, item_id: str) -> None:
+ """Initialize the reasoning output item builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._summary_index = 0
+ self._completed_summaries: list[dict[str, Any]] = []
+
+ def emit_added(self) -> dict[str, Any]:
+ """Emit an ``output_item.added`` event for this reasoning item.
+
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ return self._emit_added({"type": "reasoning", "id": self._item_id, "summary": [], "status": "in_progress"})
+
+ def add_summary_part(self) -> ReasoningSummaryPartBuilder:
+ """Create and return a reasoning summary part builder.
+
+ :returns: A new summary part builder scoped to this reasoning item.
+ :rtype: ReasoningSummaryPartBuilder
+ """
+ summary_index = self._summary_index
+ self._summary_index += 1
+ return ReasoningSummaryPartBuilder(self._stream, self._output_index, summary_index, self._item_id)
+
+ def emit_summary_part_done(self, summary_part: ReasoningSummaryPartBuilder) -> None:
+ """Record a completed summary part for inclusion in the done event.
+
+ :param summary_part: The completed summary part builder.
+ :type summary_part: ReasoningSummaryPartBuilder
+ :rtype: None
+ """
+ self._completed_summaries.append({"type": "summary_text", "text": summary_part.final_text or ""})
+
+ def emit_done(self) -> dict[str, Any]:
+ """Emit an ``output_item.done`` event for this reasoning item.
+
+ :returns: The emitted event dict.
+ :rtype: dict[str, Any]
+ """
+ return self._emit_done(
+ {
+ "type": "reasoning",
+ "id": self._item_id,
+ "summary": deepcopy(self._completed_summaries),
+ "status": "completed",
+ }
+ )
+
+ # ---- Sub-item convenience generators (S-053) ----
+
+ def summary_part(self, text: str) -> Iterator[dict[str, Any]]:
+ """Yield the full lifecycle for a reasoning summary part.
+
+ Creates the sub-builder, emits ``reasoning_summary_part.added``,
+ ``reasoning_summary_text.delta``, ``reasoning_summary_text.done``,
+ ``reasoning_summary_part.done``, and registers the part with the
+ parent via :meth:`emit_summary_part_done`.
+
+ :param text: The complete summary text.
+ :type text: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[dict[str, Any]]
+ """
+ part = self.add_summary_part()
+ yield part.emit_added()
+ yield part.emit_text_delta(text)
+ yield part.emit_text_done(text)
+ yield part.emit_done()
+ self.emit_summary_part_done(part)
+
+ async def asummary_part(self, text: str | AsyncIterable[str]) -> AsyncIterator[dict[str, Any]]:
+ """Async variant of :meth:`summary_part` with streaming support.
+
+ When *text* is a string, behaves identically to :meth:`summary_part`.
+ When *text* is an async iterable of string chunks, emits one
+ ``reasoning_summary_text.delta`` per chunk in real time (S-055),
+ then ``reasoning_summary_text.done`` with the accumulated text.
+
+ :param text: Complete summary text or async iterable of text chunks.
+ :type text: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[dict[str, Any]]
+ """
+ if isinstance(text, str):
+ for event in self.summary_part(text):
+ yield event
+ return
+ part = self.add_summary_part()
+ yield part.emit_added()
+ accumulated: list[str] = []
+ async for chunk in text:
+ accumulated.append(chunk)
+ yield part.emit_text_delta(chunk)
+ final = "".join(accumulated)
+ yield part.emit_text_done(final)
+ yield part.emit_done()
+ self.emit_summary_part_done(part)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_tools.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_tools.py
new file mode 100644
index 000000000000..b2652dd4ddaf
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_builders/_tools.py
@@ -0,0 +1,743 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Tool call builders: file search, web search, code interpreter, image gen, MCP, and custom tools."""
+
+from __future__ import annotations
+
+from collections.abc import AsyncIterable
+from typing import TYPE_CHECKING, AsyncIterator, Iterator
+
+from ...models import _generated as generated_models
+from ._base import EVENT_TYPE, BaseOutputItemBuilder, _require_non_empty
+
+if TYPE_CHECKING:
+ from .._event_stream import ResponseEventStream
+
+
+class OutputItemFileSearchCallBuilder(BaseOutputItemBuilder):
+ """Scoped builder for file search tool call events."""
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for a file search call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(
+ {
+ "type": "file_search_call",
+ "id": self._item_id,
+ "status": "in_progress",
+ "queries": [],
+ }
+ )
+
+ def emit_in_progress(self) -> generated_models.ResponseStreamEvent:
+ """Emit a file-search in-progress state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_FILE_SEARCH_CALL_IN_PROGRESS.value)
+
+ def emit_searching(self) -> generated_models.ResponseStreamEvent:
+ """Emit a file-search searching state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_FILE_SEARCH_CALL_SEARCHING.value)
+
+ def emit_completed(self) -> generated_models.ResponseStreamEvent:
+ """Emit a file-search completed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_FILE_SEARCH_CALL_COMPLETED.value)
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for this file search call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done({"type": "file_search_call", "id": self._item_id, "status": "completed", "queries": []})
+
+
+class OutputItemWebSearchCallBuilder(BaseOutputItemBuilder):
+ """Scoped builder for web search tool call events."""
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for a web search call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added({"type": "web_search_call", "id": self._item_id, "status": "in_progress", "action": {}})
+
+ def emit_in_progress(self) -> generated_models.ResponseStreamEvent:
+ """Emit a web-search in-progress state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_WEB_SEARCH_CALL_IN_PROGRESS.value)
+
+ def emit_searching(self) -> generated_models.ResponseStreamEvent:
+ """Emit a web-search searching state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_WEB_SEARCH_CALL_SEARCHING.value)
+
+ def emit_completed(self) -> generated_models.ResponseStreamEvent:
+ """Emit a web-search completed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_WEB_SEARCH_CALL_COMPLETED.value)
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for this web search call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done({"type": "web_search_call", "id": self._item_id, "status": "completed", "action": {}})
+
+
+class OutputItemCodeInterpreterCallBuilder(BaseOutputItemBuilder):
+ """Scoped builder for code interpreter tool call events."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, item_id: str) -> None:
+ """Initialize the code-interpreter call builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._final_code: str | None = None
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for a code interpreter call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(
+ {
+ "type": "code_interpreter_call",
+ "id": self._item_id,
+ "status": "in_progress",
+ "container_id": "",
+ "code": "",
+ "outputs": [],
+ }
+ )
+
+ def emit_in_progress(self) -> generated_models.ResponseStreamEvent:
+ """Emit a code-interpreter in-progress state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_CODE_INTERPRETER_CALL_IN_PROGRESS.value)
+
+ def emit_interpreting(self) -> generated_models.ResponseStreamEvent:
+ """Emit a code-interpreter interpreting state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_CODE_INTERPRETER_CALL_INTERPRETING.value)
+
+ def emit_code_delta(self, delta: str) -> generated_models.ResponseStreamEvent:
+ """Emit a code-interpreter code delta event.
+
+ :param delta: The incremental code fragment.
+ :type delta: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(
+ EVENT_TYPE.RESPONSE_CODE_INTERPRETER_CALL_CODE_DELTA.value,
+ extra_payload={"delta": delta},
+ )
+
+ def emit_code_done(self, code: str) -> generated_models.ResponseStreamEvent:
+ """Emit a code-interpreter code done event.
+
+ :param code: The final, complete code string.
+ :type code: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ self._final_code = code
+ return self._emit_item_state_event(
+ EVENT_TYPE.RESPONSE_CODE_INTERPRETER_CALL_CODE_DONE.value,
+ extra_payload={"code": code},
+ )
+
+ def emit_completed(self) -> generated_models.ResponseStreamEvent:
+ """Emit a code-interpreter completed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_CODE_INTERPRETER_CALL_COMPLETED.value)
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for this code interpreter call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done(
+ {
+ "type": "code_interpreter_call",
+ "id": self._item_id,
+ "status": "completed",
+ "container_id": "",
+ "code": self._final_code or "",
+ "outputs": [],
+ }
+ )
+
+ # ---- Sub-item convenience generators (S-053) ----
+
+ def code(self, code_text: str) -> Iterator[generated_models.ResponseStreamEvent]:
+ """Yield the code delta and code done events.
+
+ Emits ``code_interpreter_call.code.delta`` followed by
+ ``code_interpreter_call.code.done``.
+
+ :param code_text: The complete code string.
+ :type code_text: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[ResponseStreamEvent]
+ """
+ yield self.emit_code_delta(code_text)
+ yield self.emit_code_done(code_text)
+
+ async def acode(self, code_text: str | AsyncIterable[str]) -> AsyncIterator[generated_models.ResponseStreamEvent]:
+ """Async variant of :meth:`code` with streaming support.
+
+ When *code_text* is a string, behaves identically to :meth:`code`.
+ When *code_text* is an async iterable of string chunks, emits one
+ ``code_interpreter_call.code.delta`` per chunk in real time (S-055),
+ then ``code_interpreter_call.code.done`` with the accumulated text.
+
+ :param code_text: Complete code string or async iterable of chunks.
+ :type code_text: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ if isinstance(code_text, str):
+ for event in self.code(code_text):
+ yield event
+ return
+ accumulated: list[str] = []
+ async for chunk in code_text:
+ accumulated.append(chunk)
+ yield self.emit_code_delta(chunk)
+ yield self.emit_code_done("".join(accumulated))
+
+
+class OutputItemImageGenCallBuilder(BaseOutputItemBuilder):
+ """Scoped builder for image generation tool call events."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, item_id: str) -> None:
+ """Initialize the image-generation call builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._partial_image_index = 0
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for an image generation call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(
+ {
+ "type": "image_generation_call",
+ "id": self._item_id,
+ "status": "in_progress",
+ "result": "",
+ }
+ )
+
+ def emit_in_progress(self) -> generated_models.ResponseStreamEvent:
+ """Emit an image-generation in-progress state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_IMAGE_GENERATION_CALL_IN_PROGRESS.value)
+
+ def emit_generating(self) -> generated_models.ResponseStreamEvent:
+ """Emit an image-generation generating state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_IMAGE_GENERATION_CALL_GENERATING.value)
+
+ def emit_partial_image(self, partial_image_b64: str) -> generated_models.ResponseStreamEvent:
+ """Emit a partial image event with base64-encoded image data.
+
+ :param partial_image_b64: Base64-encoded partial image data.
+ :type partial_image_b64: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ partial_index = self._partial_image_index
+ self._partial_image_index += 1
+ return self._emit_item_state_event(
+ EVENT_TYPE.RESPONSE_IMAGE_GENERATION_CALL_PARTIAL_IMAGE.value,
+ extra_payload={"partial_image_index": partial_index, "partial_image_b64": partial_image_b64},
+ )
+
+ def emit_completed(self) -> generated_models.ResponseStreamEvent:
+ """Emit an image-generation completed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_IMAGE_GENERATION_CALL_COMPLETED.value)
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for this image generation call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done(
+ {
+ "type": "image_generation_call",
+ "id": self._item_id,
+ "status": "completed",
+ "result": "",
+ }
+ )
+
+
+class OutputItemMcpCallBuilder(BaseOutputItemBuilder):
+ """Scoped builder for MCP tool call events."""
+
+ def __init__(
+ self,
+ stream: "ResponseEventStream",
+ output_index: int,
+ item_id: str,
+ server_label: str,
+ name: str,
+ ) -> None:
+ """Initialize the MCP call builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ :param server_label: Label identifying the MCP server.
+ :type server_label: str
+ :param name: Name of the MCP tool being called.
+ :type name: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._server_label = _require_non_empty(server_label, "server_label")
+ self._name = _require_non_empty(name, "name")
+ self._final_arguments: str | None = None
+ self._terminal_status: str | None = None
+
+ @property
+ def server_label(self) -> str:
+ """Return the MCP server label.
+
+ :returns: The server label.
+ :rtype: str
+ """
+ return self._server_label
+
+ @property
+ def name(self) -> str:
+ """Return the MCP tool name.
+
+ :returns: The tool name.
+ :rtype: str
+ """
+ return self._name
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for an MCP call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(
+ {
+ "type": "mcp_call",
+ "id": self._item_id,
+ "server_label": self._server_label,
+ "name": self._name,
+ "arguments": "",
+ "status": "in_progress",
+ }
+ )
+
+ def emit_in_progress(self) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP call in-progress state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_MCP_CALL_IN_PROGRESS.value)
+
+ def emit_arguments_delta(self, delta: str) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP call arguments delta event.
+
+ :param delta: The incremental arguments text fragment.
+ :type delta: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(
+ EVENT_TYPE.RESPONSE_MCP_CALL_ARGUMENTS_DELTA.value,
+ extra_payload={"delta": delta},
+ )
+
+ def emit_arguments_done(self, arguments: str) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP call arguments done event.
+
+ :param arguments: The final, complete arguments string.
+ :type arguments: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ self._final_arguments = arguments
+ return self._emit_item_state_event(
+ EVENT_TYPE.RESPONSE_MCP_CALL_ARGUMENTS_DONE.value,
+ extra_payload={"arguments": arguments},
+ )
+
+ def emit_completed(self) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP call completed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ self._terminal_status = "completed"
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_MCP_CALL_COMPLETED.value)
+
+ def emit_failed(self) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP call failed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ self._terminal_status = "failed"
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_MCP_CALL_FAILED.value)
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for this MCP call.
+
+ The ``status`` field reflects the most recent terminal state event
+ (``emit_completed`` or ``emit_failed``). Defaults to ``"completed"``
+ if neither was called.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done(
+ {
+ "type": "mcp_call",
+ "id": self._item_id,
+ "server_label": self._server_label,
+ "name": self._name,
+ "arguments": self._final_arguments or "",
+ "status": self._terminal_status or "completed",
+ }
+ )
+
+ # ---- Sub-item convenience generators (S-053) ----
+
+ def arguments(self, args: str) -> Iterator[generated_models.ResponseStreamEvent]:
+ """Yield the argument delta and done events.
+
+ Emits ``mcp_call_arguments.delta`` followed by
+ ``mcp_call_arguments.done``.
+
+ :param args: The complete arguments string.
+ :type args: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[ResponseStreamEvent]
+ """
+ yield self.emit_arguments_delta(args)
+ yield self.emit_arguments_done(args)
+
+ async def aarguments(self, args: str | AsyncIterable[str]) -> AsyncIterator[generated_models.ResponseStreamEvent]:
+ """Async variant of :meth:`arguments` with streaming support.
+
+ When *args* is a string, behaves identically to :meth:`arguments`.
+ When *args* is an async iterable of string chunks, emits one
+ ``mcp_call_arguments.delta`` per chunk in real time (S-055),
+ then ``mcp_call_arguments.done`` with the accumulated text.
+
+ :param args: Complete arguments string or async iterable of chunks.
+ :type args: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ if isinstance(args, str):
+ for event in self.arguments(args):
+ yield event
+ return
+ accumulated: list[str] = []
+ async for chunk in args:
+ accumulated.append(chunk)
+ yield self.emit_arguments_delta(chunk)
+ yield self.emit_arguments_done("".join(accumulated))
+
+
+class OutputItemMcpListToolsBuilder(BaseOutputItemBuilder):
+ """Scoped builder for MCP list-tools lifecycle events."""
+
+ def __init__(self, stream: "ResponseEventStream", output_index: int, item_id: str, server_label: str) -> None:
+ """Initialize the MCP list-tools builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ :param server_label: Label identifying the MCP server.
+ :type server_label: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._server_label = _require_non_empty(server_label, "server_label")
+
+ @property
+ def server_label(self) -> str:
+ """Return the MCP server label.
+
+ :returns: The server label.
+ :rtype: str
+ """
+ return self._server_label
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for MCP list-tools.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(
+ {
+ "type": "mcp_list_tools",
+ "id": self._item_id,
+ "server_label": self._server_label,
+ "tools": [],
+ }
+ )
+
+ def emit_in_progress(self) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP list-tools in-progress state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_MCP_LIST_TOOLS_IN_PROGRESS.value)
+
+ def emit_completed(self) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP list-tools completed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_MCP_LIST_TOOLS_COMPLETED.value)
+
+ def emit_failed(self) -> generated_models.ResponseStreamEvent:
+ """Emit an MCP list-tools failed state event.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(EVENT_TYPE.RESPONSE_MCP_LIST_TOOLS_FAILED.value)
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for MCP list-tools.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done(
+ {
+ "type": "mcp_list_tools",
+ "id": self._item_id,
+ "server_label": self._server_label,
+ "tools": [],
+ }
+ )
+
+
+class OutputItemCustomToolCallBuilder(BaseOutputItemBuilder):
+ """Scoped builder for custom tool call events."""
+
+ def __init__(
+ self,
+ stream: "ResponseEventStream",
+ output_index: int,
+ item_id: str,
+ call_id: str,
+ name: str,
+ ) -> None:
+ """Initialize the custom tool call builder.
+
+ :param stream: The parent event stream.
+ :type stream: ResponseEventStream
+ :param output_index: Zero-based index of this output item.
+ :type output_index: int
+ :param item_id: Unique identifier for this output item.
+ :type item_id: str
+ :param call_id: Unique identifier for this tool call.
+ :type call_id: str
+ :param name: Name of the custom tool being called.
+ :type name: str
+ """
+ super().__init__(stream=stream, output_index=output_index, item_id=item_id)
+ self._call_id = _require_non_empty(call_id, "call_id")
+ self._name = _require_non_empty(name, "name")
+ self._final_input: str | None = None
+
+ @property
+ def call_id(self) -> str:
+ """Return the tool call identifier.
+
+ :returns: The call ID.
+ :rtype: str
+ """
+ return self._call_id
+
+ @property
+ def name(self) -> str:
+ """Return the custom tool name.
+
+ :returns: The tool name.
+ :rtype: str
+ """
+ return self._name
+
+ def emit_added(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.added`` event for a custom tool call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_added(
+ {
+ "type": "custom_tool_call",
+ "id": self._item_id,
+ "call_id": self._call_id,
+ "name": self._name,
+ "input": "",
+ }
+ )
+
+ def emit_input_delta(self, delta: str) -> generated_models.ResponseStreamEvent:
+ """Emit a custom tool call input delta event.
+
+ :param delta: The incremental input text fragment.
+ :type delta: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_item_state_event(
+ EVENT_TYPE.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DELTA.value,
+ extra_payload={"delta": delta},
+ )
+
+ def emit_input_done(self, input_text: str) -> generated_models.ResponseStreamEvent:
+ """Emit a custom tool call input done event.
+
+ :param input_text: The final, complete input text.
+ :type input_text: str
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ self._final_input = input_text
+ return self._emit_item_state_event(
+ EVENT_TYPE.RESPONSE_CUSTOM_TOOL_CALL_INPUT_DONE.value,
+ extra_payload={"input": input_text},
+ )
+
+ def emit_done(self) -> generated_models.ResponseStreamEvent:
+ """Emit an ``output_item.done`` event for this custom tool call.
+
+ :returns: The emitted event dict.
+ :rtype: ResponseStreamEvent
+ """
+ return self._emit_done(
+ {
+ "type": "custom_tool_call",
+ "id": self._item_id,
+ "call_id": self._call_id,
+ "name": self._name,
+ "input": self._final_input or "",
+ }
+ )
+
+ # ---- Sub-item convenience generators (S-053) ----
+
+ def input(self, input_text: str) -> Iterator[generated_models.ResponseStreamEvent]:
+ """Yield the input delta and input done events.
+
+ Emits ``custom_tool_call_input.delta`` followed by
+ ``custom_tool_call_input.done``.
+
+ :param input_text: The complete input text.
+ :type input_text: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[ResponseStreamEvent]
+ """
+ yield self.emit_input_delta(input_text)
+ yield self.emit_input_done(input_text)
+
+ async def ainput(self, input_text: str | AsyncIterable[str]) -> AsyncIterator[generated_models.ResponseStreamEvent]:
+ """Async variant of :meth:`input` with streaming support.
+
+ When *input_text* is a string, behaves identically to :meth:`input`.
+ When *input_text* is an async iterable of string chunks, emits one
+ ``custom_tool_call_input.delta`` per chunk in real time (S-055),
+ then ``custom_tool_call_input.done`` with the accumulated text.
+
+ :param input_text: Complete input text or async iterable of chunks.
+ :type input_text: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[ResponseStreamEvent]
+ """
+ if isinstance(input_text, str):
+ for event in self.input(input_text):
+ yield event
+ return
+ accumulated: list[str] = []
+ async for chunk in input_text:
+ accumulated.append(chunk)
+ yield self.emit_input_delta(chunk)
+ yield self.emit_input_done("".join(accumulated))
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_event_stream.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_event_stream.py
new file mode 100644
index 000000000000..c94de6054797
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_event_stream.py
@@ -0,0 +1,723 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Response event stream builders for lifecycle and output item events."""
+
+from __future__ import annotations
+
+from collections.abc import AsyncIterable
+from copy import deepcopy
+from datetime import datetime, timezone
+from typing import Any, AsyncIterator, Iterator
+
+from .._id_generator import IdGenerator
+from ..models import _generated as generated_models
+from ..models._generated import AgentReference
+from . import _internals
+from ._builders import (
+ OutputItemBuilder,
+ OutputItemCodeInterpreterCallBuilder,
+ OutputItemCustomToolCallBuilder,
+ OutputItemFileSearchCallBuilder,
+ OutputItemFunctionCallBuilder,
+ OutputItemFunctionCallOutputBuilder,
+ OutputItemImageGenCallBuilder,
+ OutputItemMcpCallBuilder,
+ OutputItemMcpListToolsBuilder,
+ OutputItemMessageBuilder,
+ OutputItemReasoningItemBuilder,
+ OutputItemWebSearchCallBuilder,
+)
+from ._internals import construct_event_model
+from ._state_machine import EventStreamValidator
+
+EVENT_TYPE = generated_models.ResponseStreamEventType
+
+# Event types whose payload is a full Response snapshot.
+# Lifecycle events nest under a "response" key on the wire.
+_RESPONSE_SNAPSHOT_EVENT_TYPES = _internals._RESPONSE_SNAPSHOT_EVENT_TYPES
+
+
+def _resolve_conversation_param(raw: Any) -> str | None:
+ """Normalize a polymorphic conversation value to a plain string ID.
+
+ The input side of ``CreateResponse.conversation`` is ``Union[str, ConversationParam_2]``
+ whereas the output side ``ResponseObject.conversation`` is always a ``ConversationReference``
+ (object form ``{"id": "..."}``. This helper extracts the string ID from whichever
+ form was supplied.
+
+ :param raw: The raw conversation value from the request (string, dict, model, or None).
+ :type raw: Any
+ :returns: The conversation ID string, or ``None`` if absent/empty.
+ :rtype: str | None
+ """
+ if raw is None:
+ return None
+ if isinstance(raw, str):
+ return raw or None
+ if isinstance(raw, dict):
+ cid = raw.get("id")
+ return str(cid) if cid else None
+ if hasattr(raw, "id"):
+ cid = raw.id
+ return str(cid) if cid else None
+ return None
+
+
+class ResponseEventStream: # pylint: disable=too-many-public-methods
+ """Response event stream with deterministic sequence numbers."""
+
+ def __init__(
+ self,
+ *,
+ response_id: str | None = None,
+ agent_reference: AgentReference | dict[str, Any] | None = None,
+ model: str | None = None,
+ request: generated_models.CreateResponse | dict[str, Any] | None = None,
+ response: generated_models.ResponseObject | dict[str, Any] | None = None,
+ ) -> None:
+ """Initialize a new response event stream.
+
+ :param response_id: Unique identifier for the response. Inferred from *response* if omitted.
+ :type response_id: str | None
+ :param agent_reference: Optional agent reference model or metadata dict.
+ :type agent_reference: AgentReference | dict[str, Any] | None
+ :param model: Optional model identifier to stamp on the response.
+ :type model: str | None
+ :param request: Optional create-response request to seed the response envelope from.
+ :type request: ~azure.ai.agentserver.responses.models._generated.CreateResponse | dict[str, Any] | None
+ :param response: Optional pre-existing response envelope to build upon.
+ :type response: ~azure.ai.agentserver.responses.models._generated.ResponseObject | dict[str, Any] | None
+ :raises ValueError: If both *request* and *response* are provided, or if *response_id* cannot be resolved.
+ """
+ if request is not None and response is not None:
+ raise ValueError("request and response cannot both be provided")
+
+ request_mapping = _internals.coerce_model_mapping(request)
+ response_mapping = _internals.coerce_model_mapping(response)
+
+ resolved_response_id = response_id
+ if resolved_response_id is None and response_mapping is not None:
+ candidate_id = response_mapping.get("id")
+ if isinstance(candidate_id, str) and candidate_id:
+ resolved_response_id = candidate_id
+
+ if not isinstance(resolved_response_id, str) or not resolved_response_id:
+ raise ValueError("response_id is required")
+
+ self._response_id = resolved_response_id
+
+ if response_mapping is not None:
+ payload = deepcopy(response_mapping)
+ payload["id"] = self._response_id
+ payload.setdefault("object", "response")
+ payload.setdefault("output", [])
+ self._response = generated_models.ResponseObject(payload)
+ else:
+ self._response = generated_models.ResponseObject(
+ {
+ "id": self._response_id,
+ "object": "response",
+ "output": [],
+ "created_at": datetime.now(timezone.utc),
+ }
+ )
+ if request_mapping is not None:
+ for field_name in ("metadata", "background", "previous_response_id"):
+ value = request_mapping.get(field_name)
+ if value is not None:
+ setattr(self._response, field_name, deepcopy(value))
+ # Normalize polymorphic conversation (str | ConversationParam_2)
+ # to the response-side ConversationReference object form.
+ conversation_id = _resolve_conversation_param(request_mapping.get("conversation"))
+ if conversation_id is not None:
+ self._response.conversation = generated_models.ConversationReference(id=conversation_id)
+ request_model = request_mapping.get("model")
+ if isinstance(request_model, str) and request_model:
+ self._response.model = request_model
+ request_agent_reference = request_mapping.get("agent_reference")
+ if isinstance(request_agent_reference, dict):
+ self._response.agent_reference = deepcopy(request_agent_reference) # type: ignore[assignment]
+
+ if model is not None:
+ self._response.model = model
+
+ if agent_reference is not None:
+ self._response.agent_reference = deepcopy(agent_reference) # type: ignore[assignment]
+
+ self._agent_reference, self._model = _internals.extract_response_fields(self._response)
+ self._events: list[generated_models.ResponseStreamEvent] = []
+ self._validator = EventStreamValidator()
+ self._output_index = 0
+
+ @property
+ def response(self) -> generated_models.ResponseObject:
+ """Return the current response envelope.
+
+ :returns: The mutable response envelope being built by this stream.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseObject
+ """
+ return self._response
+
+ def emit_queued(self) -> generated_models.ResponseStreamEvent:
+ """Emit a ``response.queued`` lifecycle event.
+
+ :returns: The emitted event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ self._response.status = "queued"
+ return self.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_QUEUED.value,
+ "response": self._response_payload(),
+ }
+ )
+
+ def emit_created(self, *, status: str = "in_progress") -> generated_models.ResponseStreamEvent:
+ """Emit a ``response.created`` lifecycle event.
+
+ :keyword status: Initial status to set on the response. Defaults to ``"in_progress"``.
+ :keyword type status: str
+ :returns: The emitted event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ self._response.status = status # type: ignore[assignment]
+ return self.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_CREATED.value,
+ "response": self._response_payload(),
+ }
+ )
+
+ def emit_in_progress(self) -> generated_models.ResponseStreamEvent:
+ """Emit a ``response.in_progress`` lifecycle event.
+
+ :returns: The emitted event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ self._response.status = "in_progress"
+ return self.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_IN_PROGRESS.value,
+ "response": self._response_payload(),
+ }
+ )
+
+ def emit_completed(
+ self, *, usage: generated_models.ResponseUsage | dict[str, Any] | None = None
+ ) -> generated_models.ResponseStreamEvent:
+ """Emit a ``response.completed`` terminal lifecycle event.
+
+ :keyword usage: Optional usage statistics to attach to the response.
+ :keyword type usage: ~azure.ai.agentserver.responses.models._generated.ResponseUsage | dict[str, Any] | None
+ :returns: The emitted event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ self._response.status = "completed"
+ self._response.error = None # type: ignore[assignment]
+ self._response.incomplete_details = None # type: ignore[assignment]
+ self._set_terminal_fields(usage=usage)
+ return self.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_COMPLETED.value,
+ "response": self._response_payload(),
+ }
+ )
+
+ def emit_failed(
+ self,
+ *,
+ code: str | generated_models.ResponseErrorCode = "server_error",
+ message: str = "An internal server error occurred.",
+ usage: generated_models.ResponseUsage | dict[str, Any] | None = None,
+ ) -> generated_models.ResponseStreamEvent:
+ """Emit a ``response.failed`` terminal lifecycle event.
+
+ :keyword code: Error code describing the failure.
+ :keyword type code: str | ~azure.ai.agentserver.responses.models._generated.ResponseErrorCode
+ :keyword message: Human-readable error message.
+ :keyword type message: str
+ :keyword usage: Optional usage statistics to attach to the response.
+ :keyword type usage: ~azure.ai.agentserver.responses.models._generated.ResponseUsage | dict[str, Any] | None
+ :returns: The emitted event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ self._response.status = "failed"
+ self._response.incomplete_details = None # type: ignore[assignment]
+ self._response.error = generated_models.ResponseErrorInfo(
+ {
+ "code": _internals.enum_value(code),
+ "message": message,
+ }
+ )
+ self._set_terminal_fields(usage=usage)
+ return self.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_FAILED.value,
+ "response": self._response_payload(),
+ }
+ )
+
+ def emit_incomplete(
+ self,
+ *,
+ reason: str | None = None,
+ usage: generated_models.ResponseUsage | dict[str, Any] | None = None,
+ ) -> generated_models.ResponseStreamEvent:
+ """Emit a ``response.incomplete`` terminal lifecycle event.
+
+ :keyword reason: Optional reason for incompleteness.
+ :keyword type reason: str | ~azure.ai.agentserver.responses.models._generated.ResponseIncompleteReason
+ | None
+ :keyword usage: Optional usage statistics to attach to the response.
+ :keyword type usage: ~azure.ai.agentserver.responses.models._generated.ResponseUsage | dict[str, Any]
+ | None
+ :returns: The emitted event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ self._response.status = "incomplete"
+ self._response.error = None # type: ignore[assignment]
+ if reason is None:
+ self._response.incomplete_details = None # type: ignore[assignment]
+ else:
+ self._response.incomplete_details = generated_models.ResponseIncompleteDetails(
+ {
+ "reason": _internals.enum_value(reason),
+ }
+ )
+ self._set_terminal_fields(usage=usage)
+ return self.emit_event(
+ {
+ "type": EVENT_TYPE.RESPONSE_INCOMPLETE.value,
+ "response": self._response_payload(),
+ }
+ )
+
+ def add_output_item(self, item_id: str) -> OutputItemBuilder:
+ """Add a generic output item and return its builder.
+
+ :param item_id: Unique identifier for the output item.
+ :type item_id: str
+ :returns: A builder for emitting added/done events for the output item.
+ :rtype: OutputItemBuilder
+ :raises TypeError: If *item_id* is None.
+ :raises ValueError: If *item_id* is empty or has an invalid format.
+ """
+ if item_id is None:
+ raise TypeError("item_id must not be None")
+ if not isinstance(item_id, str) or not item_id.strip():
+ raise ValueError("item_id must be a non-empty string")
+
+ is_valid_id, error = IdGenerator.is_valid(item_id)
+ if not is_valid_id:
+ raise ValueError(f"invalid item_id '{item_id}': {error}")
+
+ output_index = self._output_index
+ self._output_index += 1
+ return OutputItemBuilder(self, output_index=output_index, item_id=item_id)
+
+ def add_output_item_message(self) -> OutputItemMessageBuilder:
+ """Add a message output item and return its scoped builder.
+
+ :returns: A builder for emitting message content, text deltas, and lifecycle events.
+ :rtype: OutputItemMessageBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_message_item_id(self._response_id)
+ return OutputItemMessageBuilder(self, output_index=output_index, item_id=item_id)
+
+ def add_output_item_function_call(self, name: str, call_id: str) -> OutputItemFunctionCallBuilder:
+ """Add a function-call output item and return its scoped builder.
+
+ :param name: The function name being called.
+ :type name: str
+ :param call_id: Unique identifier for this function call.
+ :type call_id: str
+ :returns: A builder for emitting function-call argument deltas and lifecycle events.
+ :rtype: OutputItemFunctionCallBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_function_call_item_id(self._response_id)
+ return OutputItemFunctionCallBuilder(
+ self,
+ output_index=output_index,
+ item_id=item_id,
+ name=name,
+ call_id=call_id,
+ )
+
+ def add_output_item_function_call_output(self, call_id: str) -> OutputItemFunctionCallOutputBuilder:
+ """Add a function-call-output item and return its scoped builder.
+
+ :param call_id: The call ID of the function call this output belongs to.
+ :type call_id: str
+ :returns: A builder for emitting function-call output lifecycle events.
+ :rtype: OutputItemFunctionCallOutputBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_function_call_output_item_id(self._response_id)
+ return OutputItemFunctionCallOutputBuilder(
+ self,
+ output_index=output_index,
+ item_id=item_id,
+ call_id=call_id,
+ )
+
+ def add_output_item_reasoning_item(self) -> OutputItemReasoningItemBuilder:
+ """Add a reasoning output item and return its scoped builder.
+
+ :returns: A builder for emitting reasoning summary parts and lifecycle events.
+ :rtype: OutputItemReasoningItemBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_reasoning_item_id(self._response_id)
+ return OutputItemReasoningItemBuilder(self, output_index=output_index, item_id=item_id)
+
+ def add_output_item_file_search_call(self) -> OutputItemFileSearchCallBuilder:
+ """Add a file-search tool call output item and return its scoped builder.
+
+ :returns: A builder for emitting file-search call lifecycle events.
+ :rtype: OutputItemFileSearchCallBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_file_search_call_item_id(self._response_id)
+ return OutputItemFileSearchCallBuilder(self, output_index=output_index, item_id=item_id)
+
+ def add_output_item_web_search_call(self) -> OutputItemWebSearchCallBuilder:
+ """Add a web-search tool call output item and return its scoped builder.
+
+ :returns: A builder for emitting web-search call lifecycle events.
+ :rtype: OutputItemWebSearchCallBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_web_search_call_item_id(self._response_id)
+ return OutputItemWebSearchCallBuilder(self, output_index=output_index, item_id=item_id)
+
+ def add_output_item_code_interpreter_call(self) -> OutputItemCodeInterpreterCallBuilder:
+ """Add a code-interpreter tool call output item and return its scoped builder.
+
+ :returns: A builder for emitting code-interpreter call lifecycle events.
+ :rtype: OutputItemCodeInterpreterCallBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_code_interpreter_call_item_id(self._response_id)
+ return OutputItemCodeInterpreterCallBuilder(self, output_index=output_index, item_id=item_id)
+
+ def add_output_item_image_gen_call(self) -> OutputItemImageGenCallBuilder:
+ """Add an image-generation tool call output item and return its scoped builder.
+
+ :returns: A builder for emitting image-generation call lifecycle events.
+ :rtype: OutputItemImageGenCallBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_image_gen_call_item_id(self._response_id)
+ return OutputItemImageGenCallBuilder(self, output_index=output_index, item_id=item_id)
+
+ def add_output_item_mcp_call(self, server_label: str, name: str) -> OutputItemMcpCallBuilder:
+ """Add an MCP tool call output item and return its scoped builder.
+
+ :param server_label: Label identifying the MCP server.
+ :type server_label: str
+ :param name: Name of the MCP tool being called.
+ :type name: str
+ :returns: A builder for emitting MCP call argument deltas and lifecycle events.
+ :rtype: OutputItemMcpCallBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_mcp_call_item_id(self._response_id)
+ return OutputItemMcpCallBuilder(
+ self,
+ output_index=output_index,
+ item_id=item_id,
+ server_label=server_label,
+ name=name,
+ )
+
+ def add_output_item_mcp_list_tools(self, server_label: str) -> OutputItemMcpListToolsBuilder:
+ """Add an MCP list-tools output item and return its scoped builder.
+
+ :param server_label: Label identifying the MCP server.
+ :type server_label: str
+ :returns: A builder for emitting MCP list-tools lifecycle events.
+ :rtype: OutputItemMcpListToolsBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_mcp_list_tools_item_id(self._response_id)
+ return OutputItemMcpListToolsBuilder(
+ self,
+ output_index=output_index,
+ item_id=item_id,
+ server_label=server_label,
+ )
+
+ def add_output_item_custom_tool_call(self, call_id: str, name: str) -> OutputItemCustomToolCallBuilder:
+ """Add a custom tool call output item and return its scoped builder.
+
+ :param call_id: Unique identifier for this tool call.
+ :type call_id: str
+ :param name: Name of the custom tool being called.
+ :type name: str
+ :returns: A builder for emitting custom tool call input deltas and lifecycle events.
+ :rtype: OutputItemCustomToolCallBuilder
+ """
+ output_index = self._output_index
+ self._output_index += 1
+ item_id = IdGenerator.new_custom_tool_call_item_id(self._response_id)
+ return OutputItemCustomToolCallBuilder(
+ self,
+ output_index=output_index,
+ item_id=item_id,
+ call_id=call_id,
+ name=name,
+ )
+
+ def events(self) -> list[generated_models.ResponseStreamEvent]:
+ """Return copies of all events emitted so far as typed model instances.
+
+ :returns: A list of ``ResponseStreamEvent`` model instances.
+ :rtype: list[~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent]
+ """
+ return [construct_event_model(event.as_dict()) for event in self._events]
+
+ def emit_event(self, event: dict[str, Any]) -> generated_models.ResponseStreamEvent:
+ """Emit a single event, applying defaults and validating the stream.
+
+ Accepts a **wire-format** dict (no ``"payload"`` wrapper), constructs
+ a typed ``ResponseStreamEvent`` model instance via polymorphic
+ deserialization, stamps defaults and sequence number, stores the
+ model, and returns it.
+
+ :param event: A wire-format event dict.
+ :type event: dict[str, Any]
+ :returns: The typed event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ candidate = deepcopy(event)
+ # Stamp sequence number before model construction
+ candidate["sequence_number"] = len(self._events)
+
+ # Construct typed model via polymorphic deserialization
+ model = construct_event_model(candidate)
+
+ # Apply response-level defaults to lifecycle events
+ _internals.apply_common_defaults(
+ [model], response_id=self._response_id, agent_reference=self._agent_reference, model=self._model
+ )
+ # Track completed output items on the response envelope
+ _internals.track_completed_output_item(self._response, model)
+
+ self._events.append(model)
+ self._validator.validate_next(model)
+ return model
+
+ # ---- Generator convenience methods (S-056/S-057) ----
+ # Output-item convenience generators that encapsulate the full lifecycle.
+ # Names mirror the add_* factories with the add_ prefix removed.
+
+ def output_item_message(self, text: str) -> Iterator[dict[str, Any]]:
+ """Yield the full lifecycle for a text message output item.
+
+ Emits output_item.added, content_part.added, output_text.delta,
+ output_text.done, content_part.done, and output_item.done.
+
+ :param text: The text content of the message.
+ :type text: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[dict[str, Any]]
+ """
+ message = self.add_output_item_message()
+ yield message.emit_added()
+ yield from message.text_content(text)
+ yield message.emit_done()
+
+ def output_item_function_call(self, name: str, call_id: str, arguments: str) -> Iterator[dict[str, Any]]:
+ """Yield the full lifecycle for a function call output item.
+
+ Emits output_item.added, function_call_arguments.delta,
+ function_call_arguments.done, and output_item.done.
+
+ :param name: The function name being called.
+ :type name: str
+ :param call_id: Unique identifier for this function call.
+ :type call_id: str
+ :param arguments: The function call arguments as a string.
+ :type arguments: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[dict[str, Any]]
+ """
+ fc = self.add_output_item_function_call(name=name, call_id=call_id)
+ yield fc.emit_added()
+ yield from fc.arguments(arguments)
+ yield fc.emit_done()
+
+ def output_item_function_call_output(self, call_id: str, output: str) -> Iterator[dict[str, Any]]:
+ """Yield the full lifecycle for a function call output item.
+
+ Emits output_item.added and output_item.done.
+
+ :param call_id: The call ID of the function call this output belongs to.
+ :type call_id: str
+ :param output: The output value for the function call.
+ :type output: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[dict[str, Any]]
+ """
+ fco = self.add_output_item_function_call_output(call_id=call_id)
+ yield fco.emit_added(output)
+ yield fco.emit_done(output)
+
+ def output_item_reasoning_item(self, summary_text: str) -> Iterator[dict[str, Any]]:
+ """Yield the full lifecycle for a reasoning output item.
+
+ Emits output_item.added, reasoning_summary_part.added,
+ reasoning_summary_text.delta, reasoning_summary_text.done,
+ reasoning_summary_part.done, and output_item.done.
+
+ :param summary_text: The reasoning summary text.
+ :type summary_text: str
+ :returns: An iterator of event dicts.
+ :rtype: Iterator[dict[str, Any]]
+ """
+ item = self.add_output_item_reasoning_item()
+ yield item.emit_added()
+ yield from item.summary_part(summary_text)
+ yield item.emit_done()
+
+ # ---- Async generator convenience methods (S-058) ----
+ # Async variants with AsyncIterable[str] support for real-time delta streaming.
+
+ async def aoutput_item_message(self, text: str | AsyncIterable[str]) -> AsyncIterator[dict[str, Any]]:
+ """Async variant of :meth:`output_item_message` with streaming support.
+
+ When *text* is a string, emits the same events as the sync variant.
+ When *text* is an async iterable of string chunks, emits one
+ ``output_text.delta`` per chunk in real time, enabling token-by-token
+ streaming from upstream LLMs.
+
+ :param text: Complete text or async iterable of text chunks.
+ :type text: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[dict[str, Any]]
+ """
+ if isinstance(text, str):
+ for event in self.output_item_message(text):
+ yield event
+ return
+ message = self.add_output_item_message()
+ yield message.emit_added()
+ async for event in message.atext_content(text):
+ yield event
+ yield message.emit_done()
+
+ async def aoutput_item_function_call(
+ self, name: str, call_id: str, arguments: str | AsyncIterable[str]
+ ) -> AsyncIterator[dict[str, Any]]:
+ """Async variant of :meth:`output_item_function_call` with streaming support.
+
+ When *arguments* is a string, emits the same events as the sync variant.
+ When *arguments* is an async iterable of string chunks, emits one
+ ``function_call_arguments.delta`` per chunk in real time.
+
+ :param name: The function name being called.
+ :type name: str
+ :param call_id: Unique identifier for this function call.
+ :type call_id: str
+ :param arguments: Complete arguments string or async iterable of chunks.
+ :type arguments: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[dict[str, Any]]
+ """
+ if isinstance(arguments, str):
+ for event in self.output_item_function_call(name, call_id, arguments):
+ yield event
+ return
+ fc = self.add_output_item_function_call(name=name, call_id=call_id)
+ yield fc.emit_added()
+ async for event in fc.aarguments(arguments):
+ yield event
+ yield fc.emit_done()
+
+ async def aoutput_item_function_call_output(self, call_id: str, output: str) -> AsyncIterator[dict[str, Any]]:
+ """Async variant of :meth:`output_item_function_call_output`.
+
+ :param call_id: The call ID of the function call this output belongs to.
+ :type call_id: str
+ :param output: The output value for the function call.
+ :type output: str
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[dict[str, Any]]
+ """
+ for event in self.output_item_function_call_output(call_id, output):
+ yield event
+
+ async def aoutput_item_reasoning_item(
+ self, summary_text: str | AsyncIterable[str]
+ ) -> AsyncIterator[dict[str, Any]]:
+ """Async variant of :meth:`output_item_reasoning_item` with streaming support.
+
+ When *summary_text* is a string, emits the same events as the sync variant.
+ When *summary_text* is an async iterable of string chunks, emits one
+ ``reasoning_summary_text.delta`` per chunk in real time.
+
+ :param summary_text: Complete summary text or async iterable of chunks.
+ :type summary_text: str | AsyncIterable[str]
+ :returns: An async iterator of event dicts.
+ :rtype: AsyncIterator[dict[str, Any]]
+ """
+ if isinstance(summary_text, str):
+ for event in self.output_item_reasoning_item(summary_text):
+ yield event
+ return
+ item = self.add_output_item_reasoning_item()
+ yield item.emit_added()
+ async for event in item.asummary_part(summary_text):
+ yield event
+ yield item.emit_done()
+
+ # ---- Private helpers ----
+
+ def _response_payload(self) -> dict[str, Any]:
+ """Serialize the current response envelope to a plain dict.
+
+ :returns: A materialized dict representation of the response.
+ :rtype: dict[str, Any]
+ """
+ return _internals.materialize_generated_payload(self._response.as_dict())
+
+ def with_output_item_defaults(self, item: dict[str, Any]) -> dict[str, Any]:
+ """Stamp an output item dict with response-level defaults.
+
+ :param item: The item dict to stamp.
+ :type item: dict[str, Any]
+ :returns: A deep copy of the item with ``response_id`` and ``agent_reference`` defaults applied.
+ :rtype: dict[str, Any]
+ """
+ stamped = deepcopy(item)
+ if "response_id" not in stamped or stamped["response_id"] is None:
+ stamped["response_id"] = self._response_id
+ if "agent_reference" not in stamped or stamped["agent_reference"] is None:
+ stamped["agent_reference"] = self._agent_reference
+ return stamped
+
+ def _set_terminal_fields(self, *, usage: generated_models.ResponseUsage | dict[str, Any] | None) -> None:
+ """Set terminal fields on the response envelope (completed_at, usage).
+
+ :keyword usage: Optional usage statistics to attach.
+ :keyword type usage: ~azure.ai.agentserver.responses.models._generated.ResponseUsage | dict[str, Any] | None
+ :rtype: None
+ """
+ # B6: completed_at is non-null only for completed status
+ if self._response.status == "completed":
+ self._response.completed_at = datetime.now(timezone.utc)
+ else:
+ self._response.completed_at = None # type: ignore[assignment]
+ self._response.usage = _internals.coerce_usage(usage)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_helpers.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_helpers.py
new file mode 100644
index 000000000000..ac3d6985a7eb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_helpers.py
@@ -0,0 +1,271 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Event coercion, defaults application, and snapshot extraction helpers."""
+
+from __future__ import annotations
+
+from collections.abc import MutableMapping
+from copy import deepcopy
+from typing import Any, AsyncIterator
+
+from ..models import _generated as generated_models
+from ..models._generated import AgentReference
+from . import _internals
+from ._event_stream import ResponseEventStream
+from ._internals import _RESPONSE_SNAPSHOT_EVENT_TYPES
+from ._sse import encode_sse_event
+
+EVENT_TYPE = generated_models.ResponseStreamEventType
+
+
+def strip_nulls(d: dict) -> dict:
+ """Recursively remove keys whose values are ``None`` from a dict.
+
+ Only dict values are recursed into; lists and other containers are
+ left untouched so that ``0``, ``False``, ``""``, and ``[]`` are
+ preserved.
+
+ :param d: The dictionary to strip.
+ :type d: dict
+ :returns: A new dictionary with ``None``-valued keys removed.
+ :rtype: dict
+ """
+ return {k: strip_nulls(v) if isinstance(v, dict) else v for k, v in d.items() if v is not None}
+
+
+def _build_events(
+ response_id: str,
+ *,
+ include_progress: bool,
+ agent_reference: AgentReference | dict[str, Any],
+ model: str | None,
+) -> list[generated_models.ResponseStreamEvent]:
+ """Build a minimal lifecycle event sequence for a response.
+
+ Returns ``ResponseStreamEvent`` model instances representing the standard
+ lifecycle: ``response.created`` → (optionally) ``response.in_progress`` →
+ ``response.completed``.
+
+ :param response_id: Unique identifier for the response.
+ :type response_id: str
+ :keyword include_progress: Whether to include an ``in_progress`` event.
+ :keyword type include_progress: bool
+ :keyword agent_reference: Agent reference model or metadata dict.
+ :keyword type agent_reference: AgentReference | dict[str, Any]
+ :keyword model: Optional model identifier.
+ :keyword type model: str | None
+ :returns: A list of typed ``ResponseStreamEvent`` model instances.
+ :rtype: list[~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent]
+ """
+ stream = ResponseEventStream(
+ response_id=response_id,
+ agent_reference=agent_reference,
+ model=model,
+ )
+ stream.emit_created(status="in_progress")
+ if include_progress:
+ stream.emit_in_progress()
+ stream.emit_completed()
+ return list(stream._events) # pylint: disable=protected-access
+
+
+async def _encode_sse(events: list[generated_models.ResponseStreamEvent]) -> AsyncIterator[str]:
+ """Encode a list of ``ResponseStreamEvent`` model instances as SSE-formatted strings.
+
+ :param events: The events to encode.
+ :type events: list[ResponseStreamEvent]
+ :returns: An async iterator yielding SSE-formatted strings.
+ :rtype: AsyncIterator[str]
+ """
+ for event in events:
+ yield encode_sse_event(event)
+
+
+def _coerce_handler_event(
+ handler_event: generated_models.ResponseStreamEvent | dict[str, Any],
+) -> generated_models.ResponseStreamEvent:
+ """Coerce a handler event to a ``ResponseStreamEvent`` model instance.
+
+ Handlers may yield events in any of these shapes:
+
+ - **Generated event models** (already typed)::
+
+ ResponseCreatedEvent(response={...}, sequence_number=0)
+
+ - **Wire / SSE format** for lifecycle events::
+
+ {"type": "response.created", "response": {"id": "...", "status": "in_progress", ...}, "sequence_number": 0}
+
+ - **Wire / SSE format** for content events::
+
+ {"type": "response.output_text.delta", "output_index": 0, "delta": "Hello", "sequence_number": 3}
+
+ All shapes are normalised to a ``ResponseStreamEvent`` model instance
+ for typed internal pipeline processing.
+
+ :param handler_event: The event to normalize (dict or model instance).
+ :type handler_event: ResponseStreamEvent | dict[str, Any]
+ :returns: A typed ``ResponseStreamEvent`` model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ :raises TypeError: If the event is not a dict or a model with ``as_dict()``.
+ :raises ValueError: If the event does not include a non-empty ``type``.
+ """
+ from ._internals import construct_event_model # pylint: disable=import-outside-toplevel
+
+ # Already a typed model — return a copy via as_dict() round-trip.
+ if isinstance(handler_event, generated_models.ResponseStreamEvent):
+ return construct_event_model(handler_event.as_dict())
+
+ if isinstance(handler_event, dict):
+ event_data = deepcopy(handler_event)
+ elif hasattr(handler_event, "as_dict"):
+ event_data = handler_event.as_dict()
+ else:
+ raise TypeError("handler events must be dictionaries or generated event models")
+
+ event_type = event_data.get("type")
+ if not isinstance(event_type, str) or not event_type:
+ raise ValueError("handler event must include a non-empty 'type'")
+
+ return construct_event_model(event_data)
+
+
+def _apply_stream_event_defaults(
+ event: generated_models.ResponseStreamEvent,
+ *,
+ response_id: str,
+ agent_reference: AgentReference | dict[str, Any],
+ model: str | None,
+ sequence_number: int | None,
+ agent_session_id: str | None = None,
+ conversation_id: str | None = None,
+) -> generated_models.ResponseStreamEvent:
+ """Apply response-level defaults to a ``ResponseStreamEvent`` model instance.
+
+ For lifecycle events whose ``response`` attribute carries a ``Response``
+ snapshot, stamps ``id``, ``response_id``, ``object``, ``agent_reference``,
+ ``model``, and ``agent_session_id`` using ``setdefault`` so handler-supplied
+ values are not overwritten (except ``agent_session_id`` which is forcibly
+ stamped per S-038).
+
+ ``sequence_number`` is always applied at the top level of the event,
+ because it lives on the ``ResponseStreamEvent`` base class.
+
+ :param event: The event model instance to enrich.
+ :type event: ResponseStreamEvent
+ :keyword response_id: Response ID to stamp in lifecycle-event payloads.
+ :keyword type response_id: str
+ :keyword agent_reference: Agent reference model or metadata dict.
+ :keyword type agent_reference: AgentReference | dict[str, Any]
+ :keyword model: Optional model identifier.
+ :keyword type model: str | None
+ :keyword sequence_number: Optional sequence number to set; ``None`` leaves it unchanged.
+ :keyword type sequence_number: int | None
+ :keyword agent_session_id: Resolved session ID (S-038).
+ :keyword type agent_session_id: str | None
+ :returns: The event with defaults applied (same object, mutated in-place).
+ :rtype: ResponseStreamEvent
+ """
+ normalized = event # caller (_coerce_handler_event) already deep-copied
+ # Delegate lifecycle-event stamping to the canonical implementation in _internals.
+ _internals.apply_common_defaults(
+ [normalized],
+ response_id=response_id,
+ agent_reference=agent_reference if agent_reference else {},
+ model=model,
+ agent_session_id=agent_session_id,
+ conversation_id=conversation_id,
+ )
+ # Stamp response_id and agent_reference on output items (B20/B21)
+ event_type = normalized.get("type", "")
+ if event_type in ("response.output_item.added", "response.output_item.done"):
+ item = normalized.get("item")
+ if isinstance(item, (dict, MutableMapping)):
+ item.setdefault("response_id", response_id)
+ if agent_reference:
+ # Use explicit None check instead of setdefault so that
+ # builder items with agent_reference=None are overridden.
+ if item.get("agent_reference") is None:
+ item["agent_reference"] = agent_reference
+
+ if sequence_number is not None:
+ normalized["sequence_number"] = sequence_number
+ return normalized
+
+
+def _extract_response_snapshot_from_events(
+ events: list[generated_models.ResponseStreamEvent],
+ *,
+ response_id: str,
+ agent_reference: AgentReference | dict[str, Any],
+ model: str | None,
+ remove_sequence_number: bool = False,
+ agent_session_id: str | None = None,
+ conversation_id: str | None = None,
+) -> dict[str, Any]:
+ """Extract the latest response snapshot payload from a list of events.
+
+ Scans events in reverse for the most recent response-level lifecycle event
+ and returns its payload enriched with defaults. Falls back to building a
+ synthetic completed lifecycle if no snapshot event is found.
+
+ :param events: The event stream to search.
+ :type events: list[dict[str, Any]]
+ :keyword response_id: Response ID for default stamping.
+ :keyword type response_id: str
+ :keyword agent_reference: Agent reference model or metadata dict.
+ :keyword type agent_reference: AgentReference | dict[str, Any]
+ :keyword model: Optional model identifier.
+ :keyword type model: str | None
+ :keyword remove_sequence_number: Whether to strip ``sequence_number`` from the result.
+ :keyword type remove_sequence_number: bool
+ :keyword agent_session_id: Resolved session ID (S-038).
+ :keyword type agent_session_id: str | None
+ :returns: A dict representing the response snapshot payload.
+ :rtype: dict[str, Any]
+ """
+ for event in reversed(events):
+ event_type = event.get("type")
+ snapshot_source = event.get("response")
+ if event_type in _RESPONSE_SNAPSHOT_EVENT_TYPES and isinstance(snapshot_source, MutableMapping):
+ if hasattr(snapshot_source, "as_dict"):
+ snapshot = snapshot_source.as_dict()
+ else:
+ snapshot = deepcopy(dict(snapshot_source))
+ snapshot.setdefault("id", response_id)
+ snapshot.setdefault("response_id", response_id)
+ snapshot.setdefault("agent_reference", deepcopy(agent_reference))
+ snapshot.setdefault("object", "response")
+ snapshot.setdefault("output", [])
+ if model is not None:
+ snapshot.setdefault("model", model)
+ # S-038: forcibly stamp session ID on snapshot
+ if agent_session_id is not None:
+ snapshot["agent_session_id"] = agent_session_id
+ # S-040: forcibly stamp conversation on snapshot
+ if conversation_id is not None:
+ snapshot["conversation"] = {"id": conversation_id}
+ if remove_sequence_number:
+ snapshot.pop("sequence_number", None)
+ return strip_nulls(snapshot)
+
+ fallback_events = _build_events(
+ response_id,
+ include_progress=True,
+ agent_reference=agent_reference,
+ model=model,
+ )
+ # _build_events returns model instances — extract snapshot from the last lifecycle event.
+ last_event = fallback_events[-1]
+ last_wire = last_event.as_dict()
+ fallback_snapshot = dict(last_wire.get("response", {}))
+ fallback_snapshot.setdefault("output", [])
+ # S-038: forcibly stamp session ID on fallback snapshot
+ if agent_session_id is not None:
+ fallback_snapshot["agent_session_id"] = agent_session_id
+ # S-040: forcibly stamp conversation on fallback snapshot
+ if conversation_id is not None:
+ fallback_snapshot["conversation"] = {"id": conversation_id}
+ if remove_sequence_number:
+ fallback_snapshot.pop("sequence_number", None)
+ return strip_nulls(fallback_snapshot)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_internals.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_internals.py
new file mode 100644
index 000000000000..74c1c7ded934
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_internals.py
@@ -0,0 +1,258 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Internal helper functions extracted from ResponseEventStream.
+
+These are pure or near-pure functions that operate on event dicts
+and generated model objects. They carry no mutable state of their own.
+"""
+
+from __future__ import annotations
+
+from collections.abc import MutableMapping
+from copy import deepcopy
+from types import GeneratorType
+from typing import Any
+
+from ..models import _generated as generated_models
+from ..models._generated import AgentReference
+
+EVENT_TYPE = generated_models.ResponseStreamEventType
+
+
+# Event types whose ``response`` field is a full Response snapshot.
+# Only these events should carry id/response_id/object/agent_reference/model.
+_RESPONSE_SNAPSHOT_EVENT_TYPES: frozenset[str] = frozenset(
+ {
+ EVENT_TYPE.RESPONSE_QUEUED.value,
+ EVENT_TYPE.RESPONSE_CREATED.value,
+ EVENT_TYPE.RESPONSE_IN_PROGRESS.value,
+ EVENT_TYPE.RESPONSE_COMPLETED.value,
+ EVENT_TYPE.RESPONSE_FAILED.value,
+ EVENT_TYPE.RESPONSE_INCOMPLETE.value,
+ }
+)
+
+
+# ---------------------------------------------------------------------------
+# Pure / near-pure helpers
+# ---------------------------------------------------------------------------
+
+
+def construct_event_model(wire_dict: dict[str, Any]) -> generated_models.ResponseStreamEvent:
+ """Construct a typed ``ResponseStreamEvent`` subclass from a wire-format dict.
+
+ Uses the discriminator-based ``__mapping__`` on the base class for
+ polymorphic dispatch. For example, a dict with ``"type": "response.created"``
+ produces a ``ResponseCreatedEvent`` instance.
+
+ :param wire_dict: A wire-format event dict.
+ :type wire_dict: dict[str, Any]
+ :returns: A typed event model instance.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ """
+ event_type = wire_dict.get("type", "")
+ if isinstance(event_type, str):
+ event_class = generated_models.ResponseStreamEvent.__mapping__.get(event_type)
+ if event_class is not None:
+ return event_class(wire_dict)
+ return generated_models.ResponseStreamEvent(wire_dict)
+
+
+def enum_value(value: Any) -> Any:
+ """Return the ``.value`` of an enum member, or the value itself.
+
+ :param value: An enum member or a plain value.
+ :type value: Any
+ :returns: The ``.value`` attribute if present, otherwise *value* unchanged.
+ :rtype: Any
+ """
+ return getattr(value, "value", value)
+
+
+def coerce_model_mapping(value: Any) -> dict[str, Any] | None:
+ """Normalise a generated model, dict, or ``None`` to a plain dict copy.
+
+ :param value: A generated model, a dict, or ``None``.
+ :type value: Any
+ :returns: A deep-copied plain dict, or ``None`` if *value* is ``None`` or not coercible.
+ :rtype: dict[str, Any] | None
+ """
+ if value is None:
+ return None
+ if isinstance(value, dict):
+ return deepcopy(value)
+ if hasattr(value, "as_dict"):
+ result = value.as_dict()
+ if isinstance(result, dict):
+ return result
+ return None
+
+
+def materialize_generated_payload(value: Any) -> Any:
+ """Recursively resolve generators/tuples to plain lists/dicts.
+
+ :param value: A nested structure that may contain generators or tuples.
+ :type value: Any
+ :returns: A fully materialized structure using only dicts and lists.
+ :rtype: Any
+ """
+ if isinstance(value, dict):
+ return {key: materialize_generated_payload(item) for key, item in value.items()}
+ if isinstance(value, list):
+ return [materialize_generated_payload(item) for item in value]
+ if isinstance(value, tuple):
+ return [materialize_generated_payload(item) for item in value]
+ if isinstance(value, GeneratorType):
+ return [materialize_generated_payload(item) for item in value]
+ return value
+
+
+def apply_common_defaults(
+ events: list[generated_models.ResponseStreamEvent],
+ *,
+ response_id: str,
+ agent_reference: AgentReference | dict[str, Any] | None,
+ model: str | None,
+ agent_session_id: str | None = None,
+ conversation_id: str | None = None,
+) -> None:
+ """Stamp lifecycle event snapshots with response-level defaults.
+
+ Only events whose type is a ``Response`` snapshot
+ (``response.queued``, ``response.created``, ``response.in_progress``,
+ ``response.completed``, ``response.failed``, ``response.incomplete``)
+ receive ``id``, ``response_id``, ``object``, ``agent_reference``,
+ ``model``, ``agent_session_id``, and ``conversation`` defaults. Other
+ event types carry different schemas per the contract and are left untouched.
+
+ Events must use wire format where the snapshot is nested under the
+ ``"response"`` key (``ResponseStreamEvent`` models or equivalent dicts).
+
+ **S-038**: ``agent_session_id`` is forcibly stamped (not ``setdefault``)
+ on every ``response.*`` event so the resolved session ID is always
+ present regardless of what the handler emits.
+
+ **S-040**: ``conversation`` is forcibly stamped on every ``response.*``
+ event so the resolved conversation round-trips on all lifecycle events.
+
+ :param events: The list of events to mutate (``ResponseStreamEvent`` models).
+ :type events: list[ResponseStreamEvent]
+ :keyword response_id: Response ID to set as default.
+ :keyword type response_id: str
+ :keyword agent_reference: Optional agent reference model or metadata dict.
+ :keyword type agent_reference: AgentReference | dict[str, Any] | None
+ :keyword model: Optional model identifier.
+ :keyword type model: str | None
+ :keyword agent_session_id: Resolved session ID (S-038).
+ :keyword type agent_session_id: str | None
+ :keyword conversation_id: Resolved conversation ID (S-040).
+ :keyword type conversation_id: str | None
+ :rtype: None
+ """
+ for event in events:
+ event_type = event.get("type")
+ if event_type not in _RESPONSE_SNAPSHOT_EVENT_TYPES:
+ continue
+ snapshot = event.get("response")
+ if not isinstance(snapshot, MutableMapping):
+ continue
+ snapshot.setdefault("id", response_id)
+ snapshot.setdefault("response_id", response_id)
+ snapshot.setdefault("object", "response")
+ if agent_reference is not None:
+ snapshot.setdefault("agent_reference", deepcopy(agent_reference))
+ if model is not None:
+ snapshot.setdefault("model", model)
+ # S-038: forcibly stamp session ID on every response.* event
+ if agent_session_id is not None:
+ snapshot["agent_session_id"] = agent_session_id
+ # S-040: forcibly stamp conversation on every response.* event
+ if conversation_id is not None:
+ snapshot["conversation"] = {"id": conversation_id}
+
+
+def track_completed_output_item(
+ response: generated_models.ResponseObject,
+ event: generated_models.ResponseStreamEvent,
+) -> None:
+ """When an output-item-done event arrives, persist the item on the response.
+
+ Checks if the event is of type ``response.output_item.done`` and, if so,
+ stores the item at the appropriate index in ``response.output``.
+
+ :param response: The response envelope to which the completed item is attached.
+ :type response: ~azure.ai.agentserver.responses.models._generated.Response
+ :param event: The event to inspect (``ResponseStreamEvent`` model instance).
+ :type event: ResponseStreamEvent
+ :rtype: None
+ """
+ if event.get("type") != EVENT_TYPE.RESPONSE_OUTPUT_ITEM_DONE.value:
+ return
+
+ output_index = event.get("output_index")
+ item_raw = event.get("item")
+
+ if not isinstance(output_index, int) or output_index < 0 or item_raw is None:
+ return
+
+ # Coerce item to a plain dict for the OutputItem constructor
+ if hasattr(item_raw, "as_dict"):
+ item_dict = item_raw.as_dict()
+ elif isinstance(item_raw, dict):
+ item_dict = deepcopy(item_raw)
+ else:
+ return
+
+ output_items: list[Any] = response.output if isinstance(response.output, list) else []
+ if not isinstance(response.output, list):
+ response.output = output_items
+
+ try:
+ typed_item: Any = generated_models.OutputItem(item_dict)
+ except Exception: # pylint: disable=broad-exception-caught
+ typed_item = deepcopy(item_dict)
+
+ while len(output_items) <= output_index:
+ output_items.append(None)
+
+ output_items[output_index] = typed_item
+
+
+def coerce_usage(
+ usage: generated_models.ResponseUsage | dict[str, Any] | None,
+) -> generated_models.ResponseUsage | None:
+ """Normalise a usage value to a generated ``ResponseUsage`` instance.
+
+ :param usage: A usage dict, a ``ResponseUsage`` model, or ``None``.
+ :type usage: ~azure.ai.agentserver.responses.models._generated.ResponseUsage | dict[str, Any] | None
+ :returns: A ``ResponseUsage`` instance, or ``None`` if *usage* is ``None``.
+ :rtype: ~azure.ai.agentserver.responses.models._generated.ResponseUsage | None
+ :raises TypeError: If *usage* is not a dict or a generated ``ResponseUsage`` model.
+ """
+ if usage is None:
+ return None
+ if isinstance(usage, dict):
+ return generated_models.ResponseUsage(deepcopy(usage))
+ if hasattr(usage, "as_dict"):
+ return generated_models.ResponseUsage(usage.as_dict())
+ raise TypeError("usage must be a dict or a generated ResponseUsage model")
+
+
+def extract_response_fields(
+ response: generated_models.ResponseObject,
+) -> tuple[AgentReference | dict[str, Any] | None, str | None]:
+ """Pull ``agent_reference`` and ``model`` from a response in one pass.
+
+ :param response: The response envelope to inspect.
+ :returns: Tuple of (agent_reference or None, model string or None).
+ """
+ payload = coerce_model_mapping(response)
+ if not isinstance(payload, dict):
+ return None, None
+ agent_reference = payload.get("agent_reference")
+ agent_ref: AgentReference | dict[str, Any] | None = (
+ agent_reference if isinstance(agent_reference, (dict, MutableMapping)) else None
+ )
+ model = payload.get("model")
+ model_str = model if isinstance(model, str) and model else None
+ return agent_ref, model_str
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_sse.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_sse.py
new file mode 100644
index 000000000000..e2f61908806e
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_sse.py
@@ -0,0 +1,151 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Server-sent events helpers for Responses streaming."""
+
+from __future__ import annotations
+
+import itertools
+import json
+from contextvars import ContextVar
+from typing import Any, Mapping
+
+from ..models._generated import ResponseStreamEvent
+
+_stream_counter_var: ContextVar[itertools.count] = ContextVar("_stream_counter_var")
+
+
+def new_stream_counter() -> None:
+ """Initialize a fresh per-stream SSE sequence number counter for the current context.
+
+ Call this once at the start of each streaming response so that concurrent
+ streams are numbered independently, each starting from 0.
+
+ :rtype: None
+ """
+ _stream_counter_var.set(itertools.count())
+
+
+def _next_sequence_number() -> int:
+ """Return the next SSE sequence number for the current stream context.
+
+ Initializes a new per-stream counter if none has been set for the current
+ context (e.g. direct calls from tests or outside a streaming request).
+
+ :returns: A monotonically increasing integer, starting from 0 for each stream.
+ :rtype: int
+ """
+ counter = _stream_counter_var.get(None)
+ if counter is None:
+ counter = itertools.count()
+ _stream_counter_var.set(counter)
+ return next(counter)
+
+
+def _coerce_payload(event: Any) -> tuple[str, dict[str, Any]]:
+ """Extract and normalize event type and payload from an event object.
+
+ Supports dict-like, model-with-``as_dict()``, and plain-object event sources.
+
+ :param event: The SSE event object to coerce.
+ :type event: Any
+ :returns: A tuple of ``(event_type, payload_dict)``.
+ :rtype: tuple[str, dict[str, Any]]
+ :raises ValueError: If the event does not include a non-empty ``type``.
+ """
+ event_type = getattr(event, "type", None)
+
+ if isinstance(event, Mapping):
+ payload = dict(event)
+ if event_type is None:
+ event_type = payload.get("type")
+ elif hasattr(event, "as_dict"):
+ payload = event.as_dict() # type: ignore[assignment]
+ if event_type is None:
+ event_type = payload.get("type")
+ else:
+ payload = {key: value for key, value in vars(event).items() if not key.startswith("_")}
+
+ if not event_type:
+ raise ValueError("SSE event must include a non-empty 'type'")
+
+ payload.pop("type", None)
+ return str(event_type), payload
+
+
+def _ensure_sequence_number(event: Any, payload: dict[str, Any]) -> None:
+ """Ensure the payload has a valid ``sequence_number``, assigning one if missing.
+
+ :param event: The original event object (used for attribute fallback).
+ :type event: Any
+ :param payload: The payload dict to mutate.
+ :type payload: dict[str, Any]
+ :rtype: None
+ """
+ explicit = payload.get("sequence_number")
+ event_value = getattr(event, "sequence_number", None)
+ candidate = explicit if explicit is not None else event_value
+
+ if not isinstance(candidate, int) or candidate < 0:
+ candidate = _next_sequence_number()
+
+ payload["sequence_number"] = candidate
+
+
+def _build_sse_frame(event_type: str, payload: dict[str, Any]) -> str:
+ """Build a single SSE frame string from event type and payload.
+
+ :param event_type: The SSE event type name.
+ :type event_type: str
+ :param payload: The payload dict to serialize as JSON.
+ :type payload: dict[str, Any]
+ :returns: A complete SSE frame string with trailing newlines.
+ :rtype: str
+ """
+ lines = [f"event: {event_type}"]
+ lines.append(f"data: {json.dumps(payload)}")
+ lines.append("")
+ lines.append("")
+ return "\n".join(lines)
+
+
+def encode_sse_event(event: ResponseStreamEvent) -> str:
+ """Encode a response stream event into SSE wire format.
+
+ :param event: Generated response stream event model.
+ :type event: ~azure.ai.agentserver.responses.models._generated.ResponseStreamEvent
+ :returns: Encoded SSE payload string.
+ :rtype: str
+ """
+ if hasattr(event, "as_dict"):
+ wire = event.as_dict()
+ event_type = str(wire.get("type", ""))
+ _ensure_sequence_number(event, wire)
+ return _build_sse_frame(event_type, wire)
+ # Fallback for non-model event objects (e.g. plain dataclass-like)
+ event_type, payload = _coerce_payload(event)
+ _ensure_sequence_number(event, payload)
+ return _build_sse_frame(event_type, {"type": event_type, **payload})
+
+
+def encode_sse_any_event(event: ResponseStreamEvent) -> str:
+ """Encode a ``ResponseStreamEvent`` model instance to SSE format.
+
+ Delegates to :func:`encode_sse_event`.
+
+ :param event: The event to encode.
+ :type event: ResponseStreamEvent
+ :returns: Encoded SSE frame string.
+ :rtype: str
+ """
+ return encode_sse_event(event)
+
+
+def encode_keep_alive_comment(comment: str = "keep-alive") -> str:
+ """Encode an SSE comment frame used for keep-alive traffic.
+
+ :param comment: The comment text to include. Defaults to ``"keep-alive"``.
+ :type comment: str
+ :returns: An SSE comment frame string.
+ :rtype: str
+ """
+ return f": {comment}\n\n"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_state_machine.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_state_machine.py
new file mode 100644
index 000000000000..906706388b01
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_state_machine.py
@@ -0,0 +1,212 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Lifecycle event state machine for Responses streaming."""
+
+from __future__ import annotations
+
+from copy import deepcopy
+from typing import Any, Mapping, MutableMapping, Sequence, cast
+
+from ..models import _generated as generated_models
+
+EVENT_TYPE = generated_models.ResponseStreamEventType
+OUTPUT_ITEM_DELTA_EVENT_TYPE = "response.output_item.delta"
+
+_TERMINAL_EVENT_TYPES = {
+ EVENT_TYPE.RESPONSE_COMPLETED.value,
+ EVENT_TYPE.RESPONSE_FAILED.value,
+ EVENT_TYPE.RESPONSE_INCOMPLETE.value,
+}
+_TERMINAL_TYPE_STATUS: dict[str, set[str]] = {
+ "response.completed": {"completed"},
+ "response.failed": {"failed", "cancelled"},
+ "response.incomplete": {"incomplete"},
+}
+_OUTPUT_ITEM_EVENT_TYPES = {
+ EVENT_TYPE.RESPONSE_OUTPUT_ITEM_ADDED.value,
+ OUTPUT_ITEM_DELTA_EVENT_TYPE,
+ EVENT_TYPE.RESPONSE_OUTPUT_ITEM_DONE.value,
+}
+_EVENT_STAGES = {
+ EVENT_TYPE.RESPONSE_CREATED.value: 0,
+ EVENT_TYPE.RESPONSE_IN_PROGRESS.value: 1,
+ EVENT_TYPE.RESPONSE_COMPLETED.value: 2,
+ EVENT_TYPE.RESPONSE_FAILED.value: 2,
+ EVENT_TYPE.RESPONSE_INCOMPLETE.value: 2,
+}
+
+
+class LifecycleStateMachineError(ValueError):
+ """Raised when lifecycle events violate ordering constraints."""
+
+
+class EventStreamValidator:
+ """Incremental validator that maintains state across calls.
+
+ Unlike :func:`validate_response_event_stream` which re-scans the full
+ event list each time, this class validates one event at a time in O(1).
+ """
+
+ def __init__(self) -> None:
+ self._last_stage: int = -1
+ self._terminal_count: int = 0
+ self._terminal_seen: bool = False
+ self._added_indexes: set[int] = set()
+ self._done_indexes: set[int] = set()
+ self._event_count: int = 0
+
+ def validate_next(self, event: Mapping[str, Any]) -> None:
+ """Validate one new event against accumulated state.
+
+ :param event: The event mapping to validate.
+ :type event: Mapping[str, Any]
+ :rtype: None
+ :raises LifecycleStateMachineError: If any ordering or structural constraint is violated.
+ """
+ event_type = event.get("type")
+ if not isinstance(event_type, str) or not event_type:
+ raise LifecycleStateMachineError("each lifecycle event must include a non-empty type")
+
+ if self._event_count == 0 and event_type != EVENT_TYPE.RESPONSE_CREATED.value:
+ raise LifecycleStateMachineError("first lifecycle event must be response.created")
+
+ self._event_count += 1
+
+ stage = _EVENT_STAGES.get(event_type)
+ if stage is not None:
+ if stage < self._last_stage:
+ raise LifecycleStateMachineError("lifecycle events are out of order")
+ if event_type in _TERMINAL_EVENT_TYPES:
+ self._terminal_count += 1
+ if self._terminal_count > 1:
+ raise LifecycleStateMachineError("multiple terminal lifecycle events are not allowed")
+ allowed_statuses = _TERMINAL_TYPE_STATUS.get(event_type)
+ if allowed_statuses is not None:
+ response = event.get("response")
+ actual_status = response.get("status") if isinstance(response, Mapping) else None
+ if actual_status and actual_status not in allowed_statuses:
+ expected = " or ".join(sorted(allowed_statuses))
+ raise LifecycleStateMachineError(
+ f"terminal event '{event_type}' has status '{actual_status}', expected {expected}"
+ )
+ self._terminal_seen = True
+ self._last_stage = stage
+ return
+
+ if event_type not in _OUTPUT_ITEM_EVENT_TYPES:
+ return
+
+ if self._last_stage < 0:
+ raise LifecycleStateMachineError("output item events cannot appear before response.created")
+ if self._terminal_seen:
+ raise LifecycleStateMachineError("output item events cannot appear after terminal lifecycle event")
+
+ output_index_raw = event.get("output_index", 0)
+ output_index = output_index_raw if isinstance(output_index_raw, int) and output_index_raw >= 0 else 0
+
+ if event_type == EVENT_TYPE.RESPONSE_OUTPUT_ITEM_ADDED.value:
+ if output_index in self._done_indexes:
+ raise LifecycleStateMachineError("cannot add output item after it has been marked done")
+ self._added_indexes.add(output_index)
+ return
+
+ if output_index not in self._added_indexes:
+ raise LifecycleStateMachineError("output item delta/done requires a preceding output_item.added")
+
+ if event_type == EVENT_TYPE.RESPONSE_OUTPUT_ITEM_DONE.value:
+ self._done_indexes.add(output_index)
+ return
+
+ if event_type == OUTPUT_ITEM_DELTA_EVENT_TYPE and output_index in self._done_indexes:
+ raise LifecycleStateMachineError("output item delta cannot appear after output_item.done")
+
+
+def validate_response_event_stream(events: Sequence[Mapping[str, Any]]) -> None:
+ """Validate lifecycle and output-item event ordering for a response stream.
+
+ Checks that the first event is ``response.created``, lifecycle events
+ are in monotonically non-decreasing order, at most one terminal event
+ exists, and output-item events obey added/delta/done constraints.
+
+ :param events: The sequence of event mappings to validate.
+ :type events: Sequence[Mapping[str, Any]]
+ :rtype: None
+ :raises LifecycleStateMachineError: If any ordering or structural constraint is violated.
+ """
+ if not events:
+ raise LifecycleStateMachineError("event stream cannot be empty")
+
+ validator = EventStreamValidator()
+ for event in events:
+ validator.validate_next(event)
+
+
+def normalize_lifecycle_events(
+ *, response_id: str, events: Sequence[Mapping[str, Any]], default_model: str | None = None
+) -> list[dict[str, Any]]:
+ """Normalize lifecycle events with ordering and terminal-state guarantees.
+
+ Applies ``id`` and ``model`` defaults to each event, validates ordering,
+ and appends a synthetic ``response.failed`` terminal event when none is present.
+
+ :keyword response_id: Response ID to stamp in each event.
+ :keyword type response_id: str
+ :keyword events: The sequence of raw lifecycle event mappings.
+ :keyword type events: Sequence[Mapping[str, Any]]
+ :keyword default_model: Optional default model identifier to set.
+ :keyword type default_model: str | None
+ :returns: A list of normalized event dicts with guaranteed terminal event.
+ :rtype: list[dict[str, Any]]
+ :raises LifecycleStateMachineError: If a lifecycle event has no type or ordering is invalid.
+ """
+ normalized: list[dict[str, Any]] = []
+
+ for raw_event in events:
+ event_type = raw_event.get("type")
+ if not isinstance(event_type, str) or not event_type:
+ raise LifecycleStateMachineError("each lifecycle event must include a non-empty type")
+
+ payload_raw = raw_event.get("response")
+ payload_raw_dict = deepcopy(payload_raw) if isinstance(payload_raw, Mapping) else {}
+ payload = cast(MutableMapping[str, Any], payload_raw_dict)
+
+ payload.setdefault("id", response_id)
+ payload.setdefault("object", "response")
+ if default_model is not None:
+ payload.setdefault("model", default_model)
+
+ normalized.append({"type": event_type, "response": payload, "sequence_number": 0})
+
+ if not normalized:
+ normalized = [
+ {
+ "type": EVENT_TYPE.RESPONSE_CREATED.value,
+ "response": {
+ "id": response_id,
+ "object": "response",
+ "status": "in_progress",
+ "model": default_model,
+ },
+ "sequence_number": 0,
+ }
+ ]
+
+ validate_response_event_stream(normalized)
+
+ terminal_count = sum(1 for event in normalized if event["type"] in _TERMINAL_EVENT_TYPES)
+
+ if terminal_count == 0:
+ normalized.append(
+ {
+ "type": EVENT_TYPE.RESPONSE_FAILED.value,
+ "response": {
+ "id": response_id,
+ "object": "response",
+ "status": "failed",
+ "model": default_model,
+ },
+ "sequence_number": 0,
+ }
+ )
+
+ return normalized
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_text_response.py b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_text_response.py
new file mode 100644
index 000000000000..29a1ecd369b0
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/azure/ai/agentserver/responses/streaming/_text_response.py
@@ -0,0 +1,133 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""TextResponse — high-level convenience for producing text-message responses.
+
+Handles the full SSE lifecycle automatically:
+``response.created`` → ``response.in_progress`` → message/content events
+→ ``response.completed``.
+
+Use ``create_text`` when the complete text is available (or produced by a
+single async call). Use ``create_text_stream`` when text arrives
+incrementally (e.g., token-by-token from an LLM).
+"""
+
+from __future__ import annotations
+
+from collections.abc import AsyncIterable
+from typing import TYPE_CHECKING, Any, AsyncIterator, Awaitable, Callable
+
+from ._event_stream import ResponseEventStream
+
+if TYPE_CHECKING:
+ from .._response_context import ResponseContext
+ from ..models._generated import CreateResponse, ResponseObject
+
+
+class TextResponse:
+ """A high-level convenience that produces a complete text-message response stream.
+
+ Implements :class:`AsyncIterable` so it can be returned directly from a
+ ``create_handler``.
+
+ Handles the full SSE lifecycle automatically:
+
+ * ``response.created`` → ``response.in_progress``
+ * ``response.output_item.added`` (message)
+ * ``response.content_part.added`` (text)
+ * ``response.output_text.delta`` (one or more)
+ * ``response.output_text.done``
+ * ``response.content_part.done``
+ * ``response.output_item.done``
+ * ``response.completed``
+
+ **Complete text mode** — provide ``create_text``::
+
+ return TextResponse(context, request,
+ create_text=lambda: "Hello!")
+
+ **Streaming mode** — provide ``create_text_stream``::
+
+ async def tokens():
+ for t in ["Hello", ", ", "world!"]:
+ yield t
+
+ return TextResponse(context, request,
+ create_text_stream=tokens)
+
+ :param context: The response context (provides the response ID).
+ :type context: ~azure.ai.agentserver.responses.ResponseContext
+ :param request: The incoming create-response request.
+ :type request: ~azure.ai.agentserver.responses.models.CreateResponse
+ :param create_text: An async or sync callable that returns the complete
+ response text. Mutually exclusive with *create_text_stream*.
+ :type create_text: Callable[[], str | Awaitable[str]] | None
+ :param create_text_stream: A callable returning an async iterable of text
+ chunks. Mutually exclusive with *create_text*.
+ :type create_text_stream: Callable[[], AsyncIterable[str]] | None
+ :param configure: An optional callback to configure the
+ :class:`ResponseObject` (e.g. set Temperature, Instructions, Metadata)
+ before ``response.created`` is emitted.
+ :type configure: Callable[[ResponseObject], None] | None
+ :raises ValueError: If neither or both of *create_text* and
+ *create_text_stream* are provided.
+ """
+
+ def __init__(
+ self,
+ context: "ResponseContext",
+ request: "CreateResponse",
+ *,
+ create_text: Callable[[], str | Awaitable[str]] | None = None,
+ create_text_stream: Callable[[], AsyncIterable[str]] | None = None,
+ configure: Callable[["ResponseObject"], None] | None = None,
+ ) -> None:
+ if create_text is not None and create_text_stream is not None:
+ raise ValueError("Provide either create_text or create_text_stream, not both.")
+ if create_text is None and create_text_stream is None:
+ raise ValueError("Provide either create_text or create_text_stream.")
+ self._context = context
+ self._request = request
+ self._create_text = create_text
+ self._create_text_stream = create_text_stream
+ self._configure = configure
+
+ def __aiter__(self) -> AsyncIterator[dict[str, Any]]:
+ return self._generate()
+
+ async def _generate(self) -> AsyncIterator[dict[str, Any]]:
+ stream = ResponseEventStream(
+ response_id=self._context.response_id,
+ request=self._request,
+ )
+
+ if self._configure is not None:
+ self._configure(stream.response)
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message = stream.add_output_item_message()
+ yield message.emit_added()
+
+ text = message.add_text_content()
+ yield text.emit_added()
+
+ if self._create_text is not None:
+ # Complete-text mode: one delta with the full text.
+ result = self._create_text()
+ if hasattr(result, "__await__"):
+ result = await result # type: ignore[misc]
+ yield text.emit_delta(result) # type: ignore[arg-type]
+ yield text.emit_done(result) # type: ignore[arg-type]
+ else:
+ # Streaming mode: N deltas, accumulate final text.
+ assert self._create_text_stream is not None
+ accumulated: list[str] = []
+ async for chunk in self._create_text_stream():
+ accumulated.append(chunk)
+ yield text.emit_delta(chunk)
+ yield text.emit_done("".join(accumulated))
+
+ yield message.emit_content_done(text)
+ yield message.emit_done()
+ yield stream.emit_completed()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/cspell.json b/sdk/agentserver/azure-ai-agentserver-responses/cspell.json
new file mode 100644
index 000000000000..9b3d0673a5fb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/cspell.json
@@ -0,0 +1,19 @@
+{
+ "ignoreWords": [
+ "mcpl",
+ "mcpr",
+ "mcpa",
+ "ctco",
+ "lsho",
+ "funcs",
+ "addl",
+ "badid"
+ ],
+ "ignorePaths": [
+ "*.csv",
+ "*.json",
+ "*.rst",
+ "samples/**",
+ "Makefile"
+ ]
+ }
\ No newline at end of file
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/dev_requirements.txt b/sdk/agentserver/azure-ai-agentserver-responses/dev_requirements.txt
new file mode 100644
index 000000000000..e81d26759452
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/dev_requirements.txt
@@ -0,0 +1,5 @@
+-e ../../../eng/tools/azure-sdk-tools
+../azure-ai-agentserver-core
+httpx
+pytest
+pytest-asyncio
\ No newline at end of file
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/docs/handler-implementation-guide.md b/sdk/agentserver/azure-ai-agentserver-responses/docs/handler-implementation-guide.md
new file mode 100644
index 000000000000..bada78b599fd
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/docs/handler-implementation-guide.md
@@ -0,0 +1,1232 @@
+# Handler Implementation Guide
+
+> Developer guidance for implementing response handlers — the single integration point for building Azure AI Responses API servers with this library.
+
+---
+
+## Table of Contents
+
+- [Overview](#overview)
+- [Getting Started](#getting-started)
+- [TextResponse](#textresponse)
+- [Server Registration](#server-registration)
+- [Handler Signature](#handler-signature)
+- [ResponseEventStream](#responseeventstream)
+ - [Method Naming Conventions](#method-naming-conventions)
+ - [Setting Custom Metadata](#setting-custom-metadata)
+ - [Builder Pattern](#builder-pattern)
+- [ResponseContext](#responsecontext)
+- [Emitting Output](#emitting-output)
+ - [Text Messages](#text-messages)
+ - [Function Calls (Tool Use)](#function-calls-tool-use)
+ - [Function Call Output](#function-call-output)
+ - [Reasoning Items](#reasoning-items)
+ - [Multiple Output Items](#multiple-output-items)
+ - [Other Tool Call Types](#other-tool-call-types)
+- [Handling Input](#handling-input)
+- [Cancellation](#cancellation)
+- [Error Handling](#error-handling)
+ - [Validation Pipeline](#validation-pipeline)
+- [Response Lifecycle](#response-lifecycle)
+ - [Terminal Event Requirement](#terminal-event-requirement)
+ - [Signalling Incomplete](#signalling-incomplete)
+ - [Token Usage Reporting](#token-usage-reporting)
+- [Configuration](#configuration)
+ - [Distributed Tracing](#distributed-tracing)
+ - [SSE Keep-Alive](#sse-keep-alive)
+- [Best Practices](#best-practices)
+- [Common Mistakes](#common-mistakes)
+
+---
+
+## Overview
+
+The library handles all protocol concerns — routing, serialization, SSE framing,
+`stream`/`background` mode negotiation, status lifecycle, and error shapes. You
+register one handler function via the `@app.create_handler` decorator. Your handler
+receives a `CreateResponse` request and produces response events. The library wraps
+these events into the correct HTTP response format based on the client's requested
+mode.
+
+You do **not** need to think about:
+
+- Whether the client requested JSON or SSE streaming
+- Whether the response is running in the foreground or background
+- HTTP status codes, content types, or error envelopes
+- Sequence numbers or response IDs
+
+The library manages all of this. Your handler just provides text or yields events.
+
+For most handlers, `TextResponse` eliminates even the event plumbing — you provide
+text (or a stream of tokens) and the library does the rest. For full control over
+every SSE event, use `ResponseEventStream`.
+
+---
+
+## Getting Started
+
+### Minimal Handler
+
+The simplest handler uses `TextResponse` — a convenience class that handles the
+full SSE event lifecycle for text-only responses:
+
+```python
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ TextResponse,
+)
+
+app = ResponsesAgentServerHost()
+
+
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal):
+ text = await context.get_input_text()
+ return TextResponse(
+ context,
+ request,
+ create_text=lambda: f"Echo: {text}",
+ )
+```
+
+### Running the Server
+
+```python
+app.run()
+```
+
+That's it. One call starts a Hypercorn host with OpenTelemetry, health checks,
+identity headers, and all Responses protocol endpoints (`POST /responses`,
+`GET /responses/{id}`, `POST /responses/{id}/cancel`, and more).
+
+**Next steps:** See [TextResponse](#textresponse) for streaming text and more
+patterns. For full SSE control (function calls, reasoning items, multiple outputs),
+see [ResponseEventStream](#responseeventstream). For hosting options beyond the
+default, see [Server Registration](#server-registration).
+
+---
+
+## TextResponse
+
+A standalone convenience class for the most common case — returning a single text
+message. `TextResponse` handles the full event lifecycle internally
+(`response.created` → `response.in_progress` → message/content events →
+`response.completed`).
+
+### Complete Text
+
+When you have the full text available at once:
+
+```python
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal):
+ text = await context.get_input_text()
+ return TextResponse(
+ context,
+ request,
+ create_text=lambda: f"Echo: {text}",
+ )
+```
+
+`create_text` can also be an async callable — useful when the answer requires I/O:
+
+```python
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal):
+ async def _build():
+ text = await context.get_input_text()
+ answer = await model.generate(text)
+ return answer
+
+ return TextResponse(context, request, create_text=_build)
+```
+
+### Streaming Text
+
+When an LLM produces tokens incrementally, pass `create_text_stream` — a callable
+that returns an `AsyncIterable[str]`. Each chunk becomes a separate
+`response.output_text.delta` SSE event:
+
+```python
+import asyncio
+
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal):
+ async def generate_tokens():
+ tokens = ["Hello", ", ", "world", "!"]
+ for token in tokens:
+ await asyncio.sleep(0.05)
+ yield token
+
+ return TextResponse(
+ context,
+ request,
+ create_text_stream=generate_tokens,
+ )
+```
+
+### Setting Response Properties
+
+Use the optional `configure` callback to set properties like `temperature` or
+`metadata` before the `response.created` event:
+
+```python
+return TextResponse(
+ context,
+ request,
+ configure=lambda response: setattr(response, "temperature", 0.7),
+ create_text=lambda: "Hello!",
+)
+```
+
+### When to Use TextResponse vs ResponseEventStream
+
+| Use `TextResponse` when... | Use `ResponseEventStream` when... |
+|---|---|
+| Your handler returns a single text message | You need multiple output types (reasoning + message, function calls) |
+| You want minimal boilerplate | You need fine-grained delta control |
+| The focus of your handler is business logic, not event plumbing | You need to emit function calls, reasoning items, or tool calls |
+
+> **Note:** `TextResponse` handles all lifecycle events internally — the contract
+> described in [ResponseEventStream](#responseeventstream) (emit_created → output →
+> terminal event) applies only when you use `ResponseEventStream` directly.
+
+---
+
+## Server Registration
+
+### Default: Decorator Pattern
+
+The primary way to register a handler is the `@app.create_handler` decorator:
+
+```python
+app = ResponsesAgentServerHost()
+
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ return TextResponse(context, request, create_text=lambda: "Hello!")
+
+app.run()
+```
+
+### With Options
+
+Pass `ResponsesServerOptions` to configure runtime behaviour:
+
+```python
+from azure.ai.agentserver.responses import ResponsesServerOptions
+
+app = ResponsesAgentServerHost(
+ options=ResponsesServerOptions(
+ default_model="gpt-4o",
+ default_fetch_history_count=50,
+ ),
+)
+```
+
+### Multi-Protocol Composition
+
+For agents that serve both Invocations and Responses protocols, use cooperative
+(mixin) inheritance:
+
+```python
+from azure.ai.agentserver.invocations import InvocationAgentServerHost
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+
+class MyHost(InvocationAgentServerHost, ResponsesAgentServerHost):
+ pass
+
+app = MyHost()
+```
+
+### Self-Hosting (Mount into existing app)
+
+Because `ResponsesAgentServerHost` **is** a Starlette ASGI application, it can be
+mounted as a sub-application:
+
+```python
+from starlette.applications import Starlette
+from starlette.routing import Mount
+
+responses_app = ResponsesAgentServerHost()
+
+@responses_app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ return TextResponse(context, request, create_text=lambda: "Hello!")
+
+app = Starlette(routes=[
+ Mount("/api", app=responses_app),
+])
+# Now responses are at POST /api/responses
+```
+
+### Route Mapping
+
+The host automatically maps five endpoints:
+
+- `POST /responses` — Create a response
+- `GET /responses/{response_id}` — Retrieve a response (JSON or SSE replay)
+- `POST /responses/{response_id}/cancel` — Cancel a response
+- `DELETE /responses/{response_id}` — Delete a response
+- `GET /responses/{response_id}/input_items` — List input items (paginated)
+
+### Custom Response Provider
+
+The server delegates state persistence and event streaming to a pluggable
+provider. The default in-memory implementation works for single-instance
+deployments.
+
+```python
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+
+# Use default in-memory provider (no configuration needed)
+app = ResponsesAgentServerHost()
+
+# Or provide a custom provider
+app = ResponsesAgentServerHost(provider=MyCustomProvider())
+```
+
+When deployed to Azure AI Foundry, durable persistence is enabled automatically —
+no custom provider registration is needed.
+
+---
+
+## Handler Signature
+
+```python
+@app.create_handler
+def handler(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ ...
+```
+
+| Parameter | Description |
+|-----------|-------------|
+| `request` | The deserialized `CreateResponse` body from the client (model, input, tools, instructions, etc.) |
+| `context` | Provides the response ID, history resolution, and ID generation helpers |
+| `cancellation_signal` | An `asyncio.Event` set on cancellation (explicit `/cancel` call or client disconnection for non-background) |
+
+Your handler can either:
+
+1. **Return a `TextResponse`** — the simplest approach for text-only responses.
+2. **Be a Python generator** — `yield` events one at a time for full control.
+
+The library consumes the events, assigns sequence numbers, manages the response
+lifecycle, and delivers them to the client.
+
+### TextResponse handlers
+
+Use `return` — no generator yield needed:
+
+```python
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ return TextResponse(context, request, create_text=lambda: "Hello!")
+```
+
+### Generator handlers (ResponseEventStream)
+
+Use `yield` for full control. Can be **sync** or **async**:
+
+```python
+# Sync handler
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ yield from stream.output_item_message("Hello!")
+ yield stream.emit_completed()
+
+# Async handler
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ async for event in stream.aoutput_item_message(token_stream()):
+ yield event
+ yield stream.emit_completed()
+```
+
+---
+
+## ResponseEventStream
+
+For full control over every SSE event — multiple output types, custom Response
+properties, streaming deltas — use `ResponseEventStream`. This is the lower-level
+counterpart to `TextResponse`:
+
+```python
+stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+
+# 1. Signal response creation
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+# 2. Build and emit output
+message = stream.add_output_item_message()
+yield message.emit_added()
+
+text = message.add_text_content()
+yield text.emit_added()
+yield text.emit_delta("Hello, world!")
+yield text.emit_done("Hello, world!")
+
+yield message.emit_content_done(text)
+yield message.emit_done()
+
+# 3. Signal completion
+yield stream.emit_completed()
+```
+
+Create a `ResponseEventStream` at the start of your handler:
+
+```python
+stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+```
+
+It provides:
+
+| Category | Methods |
+|----------|---------|
+| Response | `stream.response` — the underlying Response object. Set custom metadata or instructions before `emit_created()` |
+| Lifecycle | `emit_created()`, `emit_in_progress()`, `emit_completed()`, `emit_failed()`, `emit_incomplete()` |
+| Output factories | `add_output_item_message()`, `add_output_item_function_call()`, `add_output_item_reasoning_item()`, and more |
+| Convenience generators | `output_item_message()`, `output_item_function_call()`, `output_item_reasoning_item()`, and async variants |
+
+### Method Naming Conventions
+
+`ResponseEventStream` and its builders use a consistent naming scheme. Knowing the
+prefixes tells you what any method does at a glance:
+
+#### Stream-level methods
+
+| Prefix | Example | Returns | Purpose |
+|--------|---------|---------|---------|
+| `emit_*` | `emit_created()`, `emit_completed()` | A single event | Produce one response-lifecycle event |
+| `add_*` | `add_output_item_message()` | A builder object | Create a builder for step-by-step event emission |
+| `output_item_*` | `output_item_message(text)` | Generator of events | Convenience — yields the complete output-item lifecycle |
+| `aoutput_item_*` | `aoutput_item_message(stream)` | Async generator | Async convenience for streaming `AsyncIterable[str]` |
+
+#### Builder-level methods
+
+| Prefix | Example | Returns | Purpose |
+|--------|---------|---------|---------|
+| `emit_*` | `emit_added()`, `emit_done()`, `emit_delta(chunk)` | A single event | Produce one event in the builder's lifecycle |
+| `add_*` | `add_text_content()`, `add_summary_part()` | A child builder | Create a nested content builder |
+
+**Rule of thumb:** If a method returns a single event, it starts with `emit_`. If
+it returns a builder, it starts with `add_`. If it returns a generator of events,
+it's named after the content it produces (`output_item_message`, etc.).
+
+Every convenience generator has two variants:
+
+| Variant | Signature | When to use |
+|---------|-----------|-------------|
+| **Sync** | `output_item_message(text: str)` → `Iterable` | You have the full value up-front |
+| **Async** | `aoutput_item_message(stream: AsyncIterable[str])` → `AsyncIterable` | You're receiving chunks from a model |
+
+> **Tip:** Start with `TextResponse`. If you need convenience generators
+> (`output_item_message`), use those. Drop down to `add_*` builders only when you
+> need fine-grained control.
+
+### Setting Custom Metadata
+
+Use the `response` property to set custom metadata or instructions before emitting
+the created event:
+
+```python
+stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+
+# Set custom metadata (preserved in all response.* events)
+stream.response.metadata = {"handler_version": "2.0", "region": "us-west-2"}
+
+# Set custom instructions
+stream.response.instructions = "You are a helpful assistant."
+
+yield stream.emit_created()
+```
+
+If the handler does not set metadata or instructions, the library automatically
+copies them from the original `CreateResponse` request.
+
+The library also auto-populates `conversation` and `previous_response_id` on the
+response from the original request.
+
+**Important:** Do not add output items directly to `stream.response.output`. Use
+the output builder factories instead — the library tracks output items through
+`output_item.added` events and will detect direct manipulation as a handler error.
+
+Every `ResponseEventStream` handler must:
+
+1. Call `stream.emit_created()` first — this creates the `response.created` SSE
+ event. Mandatory and must be the first event yielded.
+2. Call `stream.emit_in_progress()` — this creates the `response.in_progress` SSE
+ event.
+3. Emit output items using the builder factories.
+4. End with exactly one terminal event: `stream.emit_completed()`,
+ `stream.emit_failed()`, or `stream.emit_incomplete()`.
+
+**Bad handler consequences:**
+
+| Violation | Result |
+|-----------|--------|
+| First event is not `response.created` | HTTP 500 error, no persistence |
+| Direct `response.output` manipulation detected | `response.failed` (post-created) or HTTP 500 (pre-created) |
+| Empty generator (no events) | HTTP 500 error, no persistence |
+| Throws before `response.created` | HTTP 500 error, no persistence |
+| Ends without terminal event or error | The library emits `response.failed` automatically |
+| Throws after `response.created` | The library emits `response.failed`, persists failed state |
+
+> **Note:** `TextResponse` handles all lifecycle events internally — the contract
+> above applies only when you use `ResponseEventStream` directly.
+
+### Builder Pattern
+
+Output is constructed through a builder hierarchy that enforces correct event
+ordering:
+
+```
+ResponseEventStream
+ └── OutputItemBuilder (message, function call, reasoning, etc.)
+ └── Content builders (text, refusal, summary, etc.)
+```
+
+Each builder tracks its lifecycle state and will raise if you emit events out of
+order. This prevents protocol violations at development time.
+
+**Key rule:** Every builder that you start (`emit_added`) must be finished
+(`emit_done`). Unfinished builders result in malformed responses.
+
+---
+
+## ResponseContext
+
+```python
+class ResponseContext:
+ response_id: str # Library-generated response ID
+ is_shutdown_requested: bool # True when host is shutting down
+ raw_body: dict[str, Any] | None # Raw JSON body (None for provider-replayed responses)
+ request: CreateResponse | None # Parsed request model
+ client_headers: dict[str, str] # x-client-* headers from request
+ query_parameters: dict[str, str] # Query parameters from the HTTP request
+ async def get_input_items() -> Sequence[Item] # Resolved input items as Item subtypes
+ async def get_input_text() -> str # Extract all text content from input items
+ async def get_history() -> Sequence[OutputItem] # Conversation history items
+```
+
+### Input Items — `get_input_items()`
+
+Returns the caller's input items as `Item` subtypes, fully resolved:
+
+```python
+input_items = await context.get_input_items()
+```
+
+- Inline items are returned as-is — the same `Item` subtypes from the original
+ request (e.g. `ItemMessage`, `FunctionCallOutputItemParam`)
+- `ItemReferenceParam` entries are batch-resolved via the provider and converted
+ to concrete `Item` subtypes
+- Unresolvable references (provider returns ``None``) are silently dropped
+- Input order is preserved
+- Lazy — computed once and cached
+
+Pass `resolve_references=False` to skip reference resolution (item references are
+left as `ItemReferenceParam` in the returned sequence):
+
+```python
+input_items = await context.get_input_items(resolve_references=False)
+```
+
+### Input Text — `get_input_text()`
+
+Convenience method that resolves input items, filters for `ItemMessage` items,
+and joins all `MessageContentInputTextContent` text values:
+
+```python
+text = await context.get_input_text()
+```
+
+Returns `""` if no text content is found. Accepts `resolve_references=False` to
+skip reference resolution.
+
+### Conversation History — `get_history()`
+
+Returns resolved output items from previous responses in the conversation chain:
+
+```python
+history = await context.get_history()
+```
+
+- Two-step resolution: resolves history item IDs, then fetches actual items
+- Ascending order — oldest-first
+- Configurable limit via `ResponsesServerOptions.default_fetch_history_count`
+ (default: 100)
+- Lazy singleton — computed once and cached
+
+### Client Headers
+
+Returns `x-client-*` prefixed headers forwarded from the original HTTP request:
+
+```python
+client_headers = context.client_headers
+request_id = client_headers.get("x-client-request-id")
+```
+
+---
+
+## Emitting Output
+
+Each output type can be emitted using either **convenience generators**
+(recommended — less code, correct by construction) or **builders** (when you need
+fine-grained control). The examples below show both, starting with the simpler
+approach.
+
+> **Tip:** For simple text-only responses, [`TextResponse`](#textresponse) is even
+> simpler than `ResponseEventStream` — it handles the entire event lifecycle in a
+> single call.
+
+### Text Messages
+
+#### Using TextResponse (simplest)
+
+```python
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ return TextResponse(context, request,
+ create_text=lambda: "Hello, world!")
+```
+
+#### Using convenience generators
+
+```python
+stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+# Complete text — full value up-front
+yield from stream.output_item_message("Hello, world!")
+
+yield stream.emit_completed()
+```
+
+Streaming from an LLM:
+
+```python
+async for evt in stream.aoutput_item_message(get_token_stream()):
+ yield evt
+```
+
+#### Using builders (fine-grained control)
+
+When you need multiple content parts in one message, emit refusal content, set
+custom properties on the output item, or interleave non-event work between builder
+calls:
+
+```python
+message = stream.add_output_item_message()
+yield message.emit_added()
+
+text = message.add_text_content()
+yield text.emit_added()
+
+# Stream text incrementally
+yield text.emit_delta("First chunk of text. ")
+yield text.emit_delta("Second chunk. ")
+
+# Finalize the text content
+yield text.emit_done("First chunk of text. Second chunk. ")
+
+yield message.emit_content_done(text)
+yield message.emit_done()
+```
+
+### Function Calls (Tool Use)
+
+When your handler needs the client to execute a function (tool) and return the
+result. Function calls require `ResponseEventStream` — `TextResponse` cannot emit
+them.
+
+#### Using convenience generators
+
+```python
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+args = json.dumps({"location": "Seattle"})
+yield from stream.output_item_function_call("get_weather", "call_1", args)
+
+yield stream.emit_completed()
+```
+
+#### Using builders (fine-grained control)
+
+```python
+func_call = stream.add_output_item_function_call("get_weather", "call_weather_1")
+yield func_call.emit_added()
+
+arguments = json.dumps({"location": "Seattle", "unit": "fahrenheit"})
+yield func_call.emit_arguments_delta(arguments)
+yield func_call.emit_arguments_done(arguments)
+yield func_call.emit_done()
+```
+
+The client receives the function call, executes it locally, and sends a new request
+with the function output as input. Your handler then processes the result on the
+next turn.
+
+#### Multi-Turn Function Calling
+
+```python
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ tool_output = _find_function_call_output(request)
+
+ if tool_output is not None:
+ # Turn 2+: Process the function result and respond
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ yield from stream.output_item_message(f"The result is: {tool_output}")
+ yield stream.emit_completed()
+ else:
+ # Turn 1: Request a function call
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ args = json.dumps({"location": "Seattle"})
+ yield from stream.output_item_function_call("get_weather", "call_weather_1", args)
+ yield stream.emit_completed()
+```
+
+### Function Call Output
+
+When your handler itself executes a tool and includes the output in the response
+(no client round-trip):
+
+```python
+yield from stream.output_item_function_call_output("call_weather_1", weather_json)
+```
+
+Function call outputs have no deltas — only `output_item.added` and
+`output_item.done`.
+
+### Reasoning Items
+
+Emit reasoning (chain-of-thought) before the main response. Reasoning items
+require `ResponseEventStream`.
+
+#### Using convenience generators
+
+```python
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+# Output 0: Reasoning
+yield from stream.output_item_reasoning_item("Let me think about this...")
+
+# Output 1: Message with the answer
+yield from stream.output_item_message("The answer is 42.")
+
+yield stream.emit_completed()
+```
+
+#### Using builders (fine-grained control)
+
+```python
+reasoning = stream.add_output_item_reasoning_item()
+yield reasoning.emit_added()
+
+summary = reasoning.add_summary_part()
+yield summary.emit_added()
+yield summary.emit_text_delta("Let me think about this...")
+yield summary.emit_text_done("Let me think about this...")
+yield summary.emit_done()
+reasoning.emit_summary_part_done(summary)
+yield reasoning.emit_done()
+```
+
+### Multiple Output Items
+
+A single response can contain multiple output items. Each gets an auto-incrementing
+output index:
+
+```python
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+# Output 0
+yield from stream.output_item_message("First message.")
+
+# Output 1
+yield from stream.output_item_message("Second message.")
+
+yield stream.emit_completed()
+```
+
+### Other Tool Call Types
+
+The library provides specialised builders for each tool call type:
+
+| Builder | Factory method | Sub-item convenience |
+|---------|---------------|---------------------|
+| `OutputItemCodeInterpreterCallBuilder` | `add_output_item_code_interpreter_call()` | `code()` |
+| `OutputItemFileSearchCallBuilder` | `add_output_item_file_search_call()` | — |
+| `OutputItemWebSearchCallBuilder` | `add_output_item_web_search_call()` | — |
+| `OutputItemImageGenCallBuilder` | `add_output_item_image_gen_call()` | — |
+| `OutputItemMcpCallBuilder` | `add_output_item_mcp_call(server_label, name)` | `arguments()` |
+| `OutputItemCustomToolCallBuilder` | `add_output_item_custom_tool_call(call_id, name)` | `input_data()` |
+
+Each builder enforces its own lifecycle ordering.
+
+---
+
+## Handling Input
+
+Access the client's input via the `ResponseContext`:
+
+```python
+# All resolved input items as Item subtypes
+input_items = await context.get_input_items()
+
+# Convenience: extract all text content as a single string
+text = await context.get_input_text()
+```
+
+The `CreateResponse` object also provides:
+
+- `request.model` — the requested model name
+- `request.instructions` — system instructions
+- `request.tools` — registered tool definitions
+- `request.metadata` — key-value metadata pairs
+- `request.store` — whether to persist the response
+- `request.stream` — whether SSE streaming was requested
+- `request.background` — whether background mode was requested
+
+---
+
+## Cancellation
+
+The `cancellation_signal` (`asyncio.Event`) is set when:
+
+- A client calls `POST /responses/{id}/cancel` (background mode only)
+- A client disconnects the HTTP connection (non-background mode)
+
+### TextResponse Handlers
+
+`TextResponse` handlers use `return TextResponse(...)`. Cancellation is propagated
+automatically — if the signal fires during `create_text` or `create_text_stream`,
+remaining events are suppressed and the library handles the winddown.
+
+For streaming, check cancellation between chunks:
+
+```python
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ async def stream_tokens():
+ async for token in model.stream(prompt):
+ if cancellation_signal.is_set():
+ return
+ yield token
+
+ return TextResponse(context, request, create_text_stream=stream_tokens)
+```
+
+### ResponseEventStream Handlers — Sync
+
+Check the signal between iterations:
+
+```python
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ stream = ResponseEventStream(...)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ for chunk in get_chunks():
+ if cancellation_signal.is_set():
+ break
+ yield text.emit_delta(chunk)
+
+ yield stream.emit_completed()
+```
+
+### ResponseEventStream Handlers — Async
+
+```python
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ stream = ResponseEventStream(...)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ async for token in model.stream(prompt):
+ if cancellation_signal.is_set():
+ break
+ yield text.emit_delta(token)
+
+ yield stream.emit_completed()
+```
+
+### What the Library Does on Cancellation
+
+Let the handler exit cleanly — the server handles the winddown automatically:
+
+1. The library sets the `cancellation_signal` event.
+2. It waits up to 10 seconds for the handler to wind down. If the handler doesn't
+ cooperate, the cancel endpoint returns the response in its current state.
+3. Once the handler finishes (within or beyond the grace period), the response
+ transitions to `cancelled` status and a `response.failed` terminal event is
+ emitted and persisted.
+
+You don't need to emit any terminal event on cancellation — just check the signal
+and exit your generator cleanly.
+
+### Graceful Shutdown
+
+When the host shuts down (e.g., SIGTERM), `context.is_shutdown_requested` is set to
+`True` and the cancellation signal is triggered. Use this to distinguish shutdown
+from explicit cancel:
+
+```python
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ stream = ResponseEventStream(...)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ try:
+ result = await do_long_running_work()
+ except asyncio.CancelledError:
+ if context.is_shutdown_requested:
+ yield stream.emit_incomplete()
+ return
+ raise
+
+ yield from stream.output_item_message(result)
+ yield stream.emit_completed()
+```
+
+---
+
+## Error Handling
+
+### Handler Exceptions
+
+Throwing an exception is a valid way to terminate your handler — you don't need to
+emit a terminal event first. The library catches the exception and maps it to the
+appropriate HTTP error response:
+
+| Exception | HTTP Status | Response Status | Error Code |
+|-----------|-------------|-----------------|------------|
+| `RequestValidationError` | 400 | failed | from exception |
+| `ValueError` | 400 | failed | `invalid_request` |
+| Any other exception | 500 | failed | `server_error` |
+
+For unknown exceptions, clients see a generic 500 — actual exception details are
+logged but never exposed.
+
+### Explicit Failure
+
+To signal a specific failure with a custom error code and message:
+
+```python
+yield stream.emit_created()
+yield stream.emit_in_progress()
+# ... some work ...
+
+# Something went wrong — signal failure explicitly
+yield stream.emit_failed("server_error", "Custom error message")
+# Do NOT yield any more events after a terminal event
+```
+
+### Validation Pipeline
+
+Bad client input returns HTTP 400 before your handler runs. Bad handler output
+returns HTTP 500 or triggers `response.failed`. The library validates:
+
+- Request payload structure
+- Response ID format
+- Agent reference structure
+- Event ordering (created → in_progress → output → terminal)
+
+---
+
+## Response Lifecycle
+
+### Terminal Event Requirement
+
+Your handler must do one of two things before the generator completes:
+
+1. **Emit a terminal event** — `emit_completed()`, `emit_failed()`, or
+ `emit_incomplete()`
+2. **Raise an exception** — the library maps it to `response.failed`
+
+What is **not** valid is silently completing the generator without either — the
+library treats this as a programming error and emits `response.failed`
+automatically.
+
+```python
+# ✅ Emit a terminal event
+yield stream.emit_completed()
+
+# ✅ Also valid: raise an exception
+raise ValueError("Unsupported model")
+
+# ❌ Bad: stopping without a terminal event or exception
+# → library emits response.failed with a diagnostic log
+```
+
+> **Note:** This section applies to `ResponseEventStream` handlers. `TextResponse`
+> handles terminal events automatically.
+
+### Signalling Incomplete
+
+If your handler cannot fully complete the request (e.g., output was truncated):
+
+```python
+yield stream.emit_created()
+yield stream.emit_in_progress()
+
+message = stream.add_output_item_message()
+# ... partial output ...
+yield message.emit_done()
+
+yield stream.emit_incomplete("max_output_tokens")
+```
+
+### Token Usage Reporting
+
+Terminal methods accept an optional `usage` parameter for reporting token
+consumption:
+
+```python
+usage = {"input_tokens": 150, "output_tokens": 42, "total_tokens": 192}
+
+# Completed with usage
+yield stream.emit_completed(usage=usage)
+
+# Failed with usage
+yield stream.emit_failed("server_error", "Error message", usage=usage)
+
+# Incomplete with usage
+yield stream.emit_incomplete("max_output_tokens", usage=usage)
+```
+
+Handlers that proxy to an LLM and receive token counts should pass them through.
+Handlers that do not interact with an LLM typically omit usage.
+
+---
+
+## Configuration
+
+| Option | Default | Description |
+|--------|---------|-------------|
+| `default_model` | `None` | Default model when `model` is omitted from the request |
+| `default_fetch_history_count` | `100` | Maximum history items resolved by `get_history()` |
+| `sse_keep_alive_interval_seconds` | `None` (disabled) | Interval between SSE keep-alive comments |
+| `shutdown_grace_period_seconds` | `10` | Seconds to wait for in-flight requests on shutdown |
+
+Platform environment variables (read once at startup via `AgentConfig`):
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `SSE_KEEPALIVE_INTERVAL` | Disabled | Interval (seconds) between SSE keep-alive comments |
+| `PORT` | `8088` | HTTP listen port |
+| `DEFAULT_FETCH_HISTORY_ITEM_COUNT` | `100` | Override for `default_fetch_history_count` |
+| `FOUNDRY_PROJECT_ENDPOINT` | — | Foundry project endpoint (enables durable persistence) |
+| `FOUNDRY_AGENT_SESSION_ID` | — | Platform-supplied session ID |
+| `FOUNDRY_AGENT_NAME` | — | Agent name for tracing |
+| `FOUNDRY_AGENT_VERSION` | — | Agent version for tracing |
+
+### Distributed Tracing
+
+The server emits OpenTelemetry-compatible spans for `POST /responses` requests.
+Handler authors can create child spans — they are automatically parented under the
+library's span.
+
+The library sets baggage items on the span:
+
+| Key | Description |
+|-----|-------------|
+| `response.id` | The library-generated response identifier |
+| `conversation.id` | Conversation ID from the request (if present) |
+| `streaming` | `"true"` or `"false"` |
+| `agent.name` | Agent name from `agent_reference` (if provided) |
+| `agent.id` | Composite `{name}:{version}` (if provided) |
+| `provider.name` | Fixed: `"azure.ai.responses"` |
+| `request.id` | From the `X-Request-Id` HTTP header (if present) |
+
+### SSE Keep-Alive
+
+The server can send periodic keep-alive comments during SSE streaming to prevent
+reverse proxies from closing idle connections. Disabled by default.
+
+Enable via environment variable:
+
+```bash
+export SSE_KEEPALIVE_INTERVAL=15
+```
+
+Or via the options constructor:
+
+```python
+app = ResponsesAgentServerHost(
+ options=ResponsesServerOptions(sse_keep_alive_interval_seconds=15),
+)
+```
+
+The `X-Accel-Buffering: no` response header is automatically set on SSE streams
+to disable nginx buffering.
+
+---
+
+## Best Practices
+
+### 1. Start with TextResponse
+
+Use `TextResponse` for text-only responses — it handles all lifecycle events
+automatically. Drop down to `ResponseEventStream` only when you need function
+calls, reasoning items, multiple outputs, or fine-grained event control.
+
+### 2. Always Emit Created First, Terminal Last
+
+Every `ResponseEventStream` handler must yield `stream.emit_created()` followed by
+`stream.emit_in_progress()` as its first two events, and exactly one terminal event
+as its last. The library validates this ordering. `TextResponse` handles this
+automatically.
+
+### 3. Use Small, Frequent Deltas
+
+For streaming mode, smaller deltas create a more responsive UX:
+
+```python
+# Good: Stream word-by-word
+for word in words:
+ yield text.emit_delta(word + " ")
+```
+
+### 4. Check Cancellation in Loops
+
+Any long-running loop should check `cancellation_signal`:
+
+```python
+for item in large_collection:
+ if cancellation_signal.is_set():
+ break
+ # ... process item ...
+```
+
+### 5. Close Every Builder You Open
+
+Every builder follows `emit_added()` → work → `emit_done()`. If you forget
+`emit_done()`, the response will have incomplete output items.
+
+### 6. Prefer Convenience Generators Over Builders
+
+Start with `output_item_message()` / `aoutput_item_message()`. Drop down to
+`add_output_item_message()` builders only when you need fine-grained control.
+
+### 7. Let the Library Handle Mode Negotiation
+
+Never branch on `request.stream` or `request.background` in your handler. The
+library handles these — your handler always produces the same event sequence
+regardless of mode.
+
+```python
+# ❌ Don't do this
+if request.stream:
+ # streaming path
+else:
+ # non-streaming path
+
+# ✅ Same event sequence for all modes
+yield stream.emit_created()
+yield stream.emit_in_progress()
+yield from stream.output_item_message("Hello!")
+yield stream.emit_completed()
+```
+
+> **Tip:** `TextResponse` handlers that use `return TextResponse(...)` don't need
+> generators at all — they produce the same events for all modes automatically.
+
+---
+
+## Common Mistakes
+
+### Using ResponseEventStream When TextResponse Suffices
+
+```python
+# ❌ Unnecessary boilerplate for a simple text response
+stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+yield stream.emit_created()
+yield stream.emit_in_progress()
+yield from stream.output_item_message("Hello!")
+yield stream.emit_completed()
+
+# ✅ Use TextResponse — one line, same result
+return TextResponse(context, request, create_text=lambda: "Hello!")
+```
+
+### Emitting Events After a Terminal Event
+
+```python
+# ❌ Don't yield after emit_completed
+yield stream.emit_completed()
+yield message.emit_done() # This will be ignored or cause errors
+
+# ✅ Finish all output items before the terminal event
+yield message.emit_done()
+yield stream.emit_completed()
+```
+
+### Not Closing Content Builders
+
+```python
+# ❌ Missing emit_content_done
+text = message.add_text_content()
+yield text.emit_added()
+yield text.emit_done("text")
+yield message.emit_done() # Content wasn't properly closed
+
+# ✅ Always call emit_content_done before closing the message
+text = message.add_text_content()
+yield text.emit_added()
+yield text.emit_done("text")
+yield message.emit_content_done(text) # Close the content part
+yield message.emit_done()
+```
+
+### Swallowing Cancellation
+
+```python
+# ❌ Don't catch cancellation and convert to failure
+try:
+ ...
+except asyncio.CancelledError:
+ yield stream.emit_failed("server_error", "Cancelled")
+
+# ✅ Let it propagate — the library handles it
+# Just check cancellation_signal.is_set() and exit cleanly
+```
+
+### Branching on Stream/Background Flags
+
+```python
+# ❌ Don't do this — the library handles mode negotiation
+if request.stream:
+ ...
+else:
+ ...
+
+# ✅ Same event sequence regardless of mode
+yield stream.emit_created()
+yield stream.emit_in_progress()
+yield from stream.output_item_message("Hello!")
+yield stream.emit_completed()
+```
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/mypy.ini b/sdk/agentserver/azure-ai-agentserver-responses/mypy.ini
new file mode 100644
index 000000000000..abd4cc95b282
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+explicit_package_bases = True
+
+[mypy-samples.*]
+ignore_errors = true
+
+[mypy-azure.ai.agentserver.responses.models._generated.*]
+ignore_errors = true
\ No newline at end of file
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/pyproject.toml b/sdk/agentserver/azure-ai-agentserver-responses/pyproject.toml
new file mode 100644
index 000000000000..f2e50b819ce9
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/pyproject.toml
@@ -0,0 +1,71 @@
+[project]
+name = "azure-ai-agentserver-responses"
+dynamic = ["version", "readme"]
+description = "Python SDK for building servers implementing the Azure AI Responses protocol"
+
+requires-python = ">=3.10"
+license = "MIT"
+authors = [
+ { name = "Microsoft Corporation" },
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14"
+]
+dependencies = [
+ "azure-ai-agentserver-core>=2.0.0b1",
+ "azure-core>=1.30.0",
+ "isodate>=0.6.1"
+]
+keywords = ["azure", "azure sdk"]
+
+[project.urls]
+repository = "https://github.com/Azure/azure-sdk-for-python"
+
+[build-system]
+requires = ["setuptools>=69", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[tool.setuptools.dynamic]
+version = { attr = "azure.ai.agentserver.responses._version.VERSION" }
+readme = { file = ["README.md"], content-type = "text/markdown" }
+
+[tool.setuptools.packages.find]
+exclude = [
+ "tests*",
+ "type_spec*",
+ "samples*",
+ "doc*",
+ "azure",
+ "azure.ai",
+ "scripts*"
+]
+
+[tool.setuptools.package-data]
+pytyped = ["py.typed"]
+
+[tool.ruff]
+target-version = "py310"
+line-length = 120
+exclude = ["azure/ai/agentserver/responses/models/_generated"]
+
+[tool.ruff.lint]
+select = ["E", "F", "W", "I"]
+
+[tool.mypy]
+python_version = "3.10"
+warn_return_any = true
+warn_unused_configs = true
+disallow_untyped_defs = true
+
+[tool.pytest.ini_options]
+asyncio_mode = "auto"
+testpaths = ["tests"]
+pythonpath = ["."]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/README.md b/sdk/agentserver/azure-ai-agentserver-responses/samples/README.md
new file mode 100644
index 000000000000..ea9cb9be64c8
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/README.md
@@ -0,0 +1,39 @@
+---
+page_type: sample
+languages:
+- python
+products:
+- azure
+name: azure-ai-agentserver-responses samples for Python
+description: Samples for the azure-ai-agentserver-responses client library.
+---
+
+# azure-ai-agentserver-responses Samples
+
+## Quick start
+
+```bash
+pip install -r requirements.txt
+python sample_01_getting_started.py
+```
+
+## Samples index
+
+| # | Sample | Pattern | Description |
+|---|--------|---------|-------------|
+| 01 | [Getting Started](sample_01_getting_started.py) | `TextResponse` | Echo handler — simplest async handler that echoes user input |
+| 02 | [Streaming Text Deltas](sample_02_streaming_text_deltas.py) | `TextResponse` + `create_text_stream` | Token-by-token streaming via async iterable, with `configure` callback |
+| 03 | [Full Control](sample_03_full_control.py) | `ResponseEventStream` | Convenience, streaming, and builder — three ways to emit the same output |
+| 04 | [Function Calling](sample_04_function_calling.py) | `ResponseEventStream` | Two-turn function calling with convenience and builder variants |
+| 05 | [Conversation History](sample_05_conversation_history.py) | `TextResponse` + async `create_text` | Study tutor with `context.get_history()` and `ResponsesServerOptions` |
+| 06 | [Multi-Output](sample_06_multi_output.py) | `ResponseEventStream` | Math solver: reasoning + message, convenience and builder variants |
+| 07 | [Customization](sample_07_customization.py) | `TextResponse` | Custom `ResponsesServerOptions`, default model, debug logging |
+| 08 | [Mixin Composition](sample_08_mixin_composition.py) | `TextResponse` | Multi-protocol server via cooperative mixin inheritance |
+| 09 | [Self-Hosting](sample_09_self_hosting.py) | `TextResponse` | Mount responses into an existing Starlette app under `/api` |
+| 10 | [Streaming Upstream](sample_10_streaming_upstream.py) | Raw events | Forward to upstream streaming LLM via `openai` SDK, relay SSE events |
+| 11 | [Non-Streaming Upstream](sample_11_non_streaming_upstream.py) | `ResponseEventStream` | Forward to upstream non-streaming LLM via `openai` SDK, emit items |
+
+### When to use which
+
+- **`TextResponse`** — Use for text-only responses (samples 1, 2, 5, 7–9). Handles the full SSE lifecycle automatically.
+- **`ResponseEventStream`** — Use when you need function calls, reasoning items, multiple output types, upstream proxying, or fine-grained event control (samples 3, 4, 6, 10, 11).
\ No newline at end of file
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/requirements.txt b/sdk/agentserver/azure-ai-agentserver-responses/samples/requirements.txt
new file mode 100644
index 000000000000..7d41e291837d
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/requirements.txt
@@ -0,0 +1,3 @@
+azure-ai-agentserver-responses
+azure-ai-agentserver-invocations
+openai
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_01_getting_started.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_01_getting_started.py
new file mode 100644
index 000000000000..7234a957b654
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_01_getting_started.py
@@ -0,0 +1,70 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 01 — Getting Started (echo handler).
+
+Simplest possible handler: reads the user's input text and echoes it back
+as a single non-streaming message using ``TextResponse``.
+
+``TextResponse`` handles the full SSE lifecycle automatically:
+``response.created`` → ``response.in_progress`` → message/content events
+→ ``response.completed``.
+
+Usage::
+
+ # Start the server
+ python sample_01_getting_started.py
+
+ # Send a request
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "echo", "input": "Hello, world!"}'
+ # -> {"id": "...", "status": "completed", "output": [{"type": "message",
+ # "content": [{"type": "output_text", "text": "Echo: Hello, world!"}]}]}
+
+ # Stream the response
+ curl -N -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "echo", "input": "Hello, world!", "stream": true}'
+ # -> event: response.created data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.in_progress data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.output_item.added data: {"item": {"type": "message", ...}}
+ # -> event: response.content_part.added data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_text.delta data: {"delta": "Echo: Hello, world!"}
+ # -> event: response.output_text.done data: {"text": "Echo: Hello, world!"}
+ # -> event: response.content_part.done data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_item.done data: {"item": {"type": "message", ...}}
+ # -> event: response.completed data: {"response": {"status": "completed", ...}}
+"""
+
+import asyncio
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ TextResponse,
+)
+
+app = ResponsesAgentServerHost()
+
+
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Echo the user's input back as a single message."""
+
+ async def _create_text():
+ return f"Echo: {await context.get_input_text()}"
+
+ return TextResponse(
+ context,
+ request,
+ create_text=_create_text,
+ )
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_02_streaming_text_deltas.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_02_streaming_text_deltas.py
new file mode 100644
index 000000000000..71ef31168c4c
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_02_streaming_text_deltas.py
@@ -0,0 +1,75 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 02 — Streaming Text Deltas.
+
+Demonstrates token-by-token streaming using ``TextResponse`` with
+``create_text_stream``. Each chunk yielded by the async generator is
+emitted as a separate ``output_text.delta`` SSE event, enabling
+real-time streaming to the client.
+
+The ``configure`` callback sets ``Response.temperature`` on the response
+envelope before ``response.created`` is emitted.
+
+Usage::
+
+ # Start the server
+ python sample_02_streaming_text_deltas.py
+
+ # Stream token-by-token deltas
+ curl -N -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "streaming", "input": "world", "stream": true}'
+ # -> event: response.created data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.in_progress data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.output_item.added data: {"item": {"type": "message", ...}}
+ # -> event: response.content_part.added data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_text.delta data: {"delta": "Hello"}
+ # -> event: response.output_text.delta data: {"delta": ", "}
+ # -> event: response.output_text.delta data: {"delta": "world"}
+ # -> event: response.output_text.delta data: {"delta": "! "}
+ # -> event: response.output_text.delta data: {"delta": "How "}
+ # -> event: response.output_text.delta data: {"delta": "are "}
+ # -> event: response.output_text.delta data: {"delta": "you?"}
+ # -> event: response.output_text.done data: {"text": "Hello, world! How are you?"}
+ # -> event: response.content_part.done data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_item.done data: {"item": {"type": "message", ...}}
+ # -> event: response.completed data: {"response": {"status": "completed", ...}}
+"""
+
+import asyncio
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ TextResponse,
+)
+
+app = ResponsesAgentServerHost()
+
+
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Stream tokens one at a time using TextResponse."""
+ user_text = await context.get_input_text() or "world"
+
+ async def generate_tokens():
+ tokens = ["Hello", ", ", user_text, "! ", "How ", "are ", "you?"]
+ for token in tokens:
+ await asyncio.sleep(0.1)
+ yield token
+
+ return TextResponse(
+ context,
+ request,
+ configure=lambda response: setattr(response, "temperature", 0.7),
+ create_text_stream=generate_tokens,
+ )
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_03_full_control.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_03_full_control.py
new file mode 100644
index 000000000000..e4cd29d71c5c
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_03_full_control.py
@@ -0,0 +1,167 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 03 — ResponseEventStream — Beyond TextResponse.
+
+When your handler needs to emit function calls, reasoning items, multiple
+outputs, or set custom Response properties, step up from ``TextResponse``
+to ``ResponseEventStream``. Start with **convenience generators** — they
+handle the event lifecycle for you. Drop down to **builders** only when
+you need fine-grained control over individual events.
+
+This sample shows three ways to emit the same greeting — all produce the
+identical SSE event sequence:
+
+ 1. **Convenience** — ``output_item_message(text)``
+ 2. **Streaming** — ``aoutput_item_message(async_iterable)``
+ 3. **Builder** — ``add_output_item_message()`` → ``add_text_content()``
+ → ``emit_delta()`` / ``emit_done()``
+
+Usage::
+
+ # Start the server
+ python sample_03_full_control.py
+
+ # Send a request
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "greeting", "input": "Hi there!"}'
+ # -> {"output": [{"type": "message", "content": [{"type": "output_text",
+ # "text": "Hello! You said: \"Hi there!\""}]}]}
+
+ # Stream the response
+ curl -N -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "greeting", "input": "Hi there!", "stream": true}'
+ # -> event: response.created data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.in_progress data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.output_item.added data: {"item": {"type": "message", ...}}
+ # -> event: response.content_part.added data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_text.delta data: {"delta": "Hello! You said: ..."}
+ # -> event: response.output_text.done data: {"text": "Hello! You said: ..."}
+ # -> event: response.content_part.done data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_item.done data: {"item": {"type": "message", ...}}
+ # -> event: response.completed data: {"response": {"status": "completed", ...}}
+"""
+
+import asyncio
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+)
+
+app = ResponsesAgentServerHost()
+
+
+# ── Variant 1: Convenience ──────────────────────────────────────────────
+# Use ``output_item_message(text)`` to emit a complete text message in one
+# call. The convenience generator handles all inner events for you.
+
+
+@app.create_handler
+async def handler(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Emit a greeting using the convenience generator."""
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+
+ # Configure Response properties BEFORE emit_created().
+ stream.response.temperature = 0.7
+ stream.response.max_output_tokens = 1024
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # Emit a complete text message in one call.
+ input_text = await context.get_input_text()
+ for evt in stream.output_item_message(f'Hello! You said: "{input_text}"'):
+ yield evt
+
+ yield stream.emit_completed()
+
+
+# ── Variant 2: Streaming ────────────────────────────────────────────────
+# When your handler calls an LLM that produces tokens incrementally, pass
+# an ``AsyncIterable[str]`` to ``aoutput_item_message()``. Each chunk
+# becomes a separate ``response.output_text.delta`` SSE event.
+
+
+async def handler_streaming(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Stream tokens using the async convenience generator."""
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # Stream tokens as they arrive — each chunk becomes a delta event.
+ async for evt in stream.aoutput_item_message(
+ _generate_tokens(await context.get_input_text()),
+ ):
+ yield evt
+
+ yield stream.emit_completed()
+
+
+async def _generate_tokens(input_text: str):
+ """Simulate an LLM producing tokens one at a time."""
+ tokens = ["Hello! ", "You ", "said: ", f'"{input_text}"']
+ for token in tokens:
+ await asyncio.sleep(0.1)
+ yield token
+
+
+# ── Variant 3: Builder (full event control) ─────────────────────────────
+# When you need to interleave non-event work between individual delta/done
+# calls within a content part, or set custom properties on the output item
+# before ``emit_added()``, drop down to the builder API.
+
+
+async def handler_builder(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Demonstrate all builder events step by step."""
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+
+ # Configure Response properties BEFORE emit_created().
+ stream.response.temperature = 0.7
+ stream.response.max_output_tokens = 1024
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # Add a message output item.
+ message = stream.add_output_item_message()
+ yield message.emit_added()
+
+ # Add text content to the message.
+ text_part = message.add_text_content()
+ yield text_part.emit_added()
+
+ # Emit the text body — delta first, then the final "done" with full text.
+ input_text = await context.get_input_text() or "Hello"
+ reply = f'Hello! You said: "{input_text}"'
+ yield text_part.emit_delta(reply)
+
+ yield text_part.emit_done()
+ yield message.emit_content_done(text_part)
+ yield message.emit_done()
+
+ yield stream.emit_completed()
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_04_function_calling.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_04_function_calling.py
new file mode 100644
index 000000000000..00386cdf26ec
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_04_function_calling.py
@@ -0,0 +1,141 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 04 — Function Calling (two-turn pattern).
+
+Demonstrates a two-turn function-calling flow:
+
+ **Turn 1** — The handler emits a ``function_call`` output item asking the
+ client to call ``get_weather`` with specific arguments.
+
+ **Turn 2** — The client re-invokes the handler with a
+ ``function_call_output`` item in the input. The handler reads that output
+ and responds with a text message.
+
+The handler is shown first using convenience generators, then with full
+builder control.
+
+Usage::
+
+ # Start the server
+ python sample_04_function_calling.py
+
+ # Turn 1 — triggers a function call
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "test", "input": "What is the weather in Seattle?"}'
+ # -> {"output": [{"type": "function_call", "name": "get_weather",
+ # "call_id": "call_weather_1", "arguments": "{\"location\": \"Seattle\", ...}"}]}
+
+ # Turn 2 — submit function output, receive text
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "test", "input": [{"type": "function_call_output", "call_id": "call_weather_1", "output": "72F and sunny"}]}'
+ # -> {"output": [{"type": "message", "content": [{"type": "output_text",
+ # "text": "The weather is: 72F and sunny"}]}]}
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+ get_input_expanded,
+)
+from azure.ai.agentserver.responses.models import FunctionCallOutputItemParam
+
+app = ResponsesAgentServerHost()
+
+
+def _find_function_call_output(request: CreateResponse) -> str | None:
+ """Return the output string from the first function_call_output item, or None."""
+ for item in get_input_expanded(request):
+ if isinstance(item, FunctionCallOutputItemParam):
+ output = item.output
+ if isinstance(output, str):
+ return output
+ return None
+
+
+# ── Variant 1: Convenience ──────────────────────────────────────────────
+# Use ``output_item_function_call()`` and ``output_item_message()`` to emit
+# complete output items in one call each.
+
+
+@app.create_handler
+def handler(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Two-turn function-calling handler using convenience generators."""
+ tool_output = _find_function_call_output(request)
+
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ if tool_output is not None:
+ # Turn 2: we have the tool result — produce a final text message.
+ yield from stream.output_item_message(f"The weather is: {tool_output}")
+ else:
+ # Turn 1: ask the client to call get_weather.
+ arguments = json.dumps({"location": "Seattle", "unit": "fahrenheit"})
+ yield from stream.output_item_function_call("get_weather", "call_weather_1", arguments)
+
+ yield stream.emit_completed()
+
+
+# ── Variant 2: Builder (full event control) ─────────────────────────────
+# When you need to set custom properties on the function call item before
+# ``emit_added()``, or interleave non-event work between builder calls,
+# use the builder API.
+
+
+def handler_builder(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Two-turn function-calling handler using the builder API."""
+ tool_output = _find_function_call_output(request)
+
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ if tool_output is not None:
+ # Turn 2: function output received — return the weather as text.
+ message = stream.add_output_item_message()
+ yield message.emit_added()
+
+ text_part = message.add_text_content()
+ yield text_part.emit_added()
+
+ reply = f"The weather is: {tool_output}"
+ yield text_part.emit_delta(reply)
+ yield text_part.emit_done()
+ yield message.emit_content_done(text_part)
+ yield message.emit_done()
+ else:
+ # Turn 1: emit a function call for "get_weather".
+ arguments = json.dumps({"location": "Seattle", "unit": "fahrenheit"})
+ fc = stream.add_output_item_function_call(name="get_weather", call_id="call_weather_1")
+ yield fc.emit_added()
+ yield fc.emit_arguments_delta(arguments)
+ yield fc.emit_arguments_done(arguments)
+ yield fc.emit_done()
+
+ yield stream.emit_completed()
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_05_conversation_history.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_05_conversation_history.py
new file mode 100644
index 000000000000..bdcd6fad310f
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_05_conversation_history.py
@@ -0,0 +1,90 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 05 — Conversation History — Study Tutor.
+
+Demonstrates reading conversation history via ``context.get_history()``
+using ``TextResponse`` with an async ``create_text`` callback. The study
+tutor references previous turns to give contextual follow-up answers,
+demonstrating multi-turn conversational flows using
+``previous_response_id``.
+
+The server is configured with
+``ResponsesServerOptions(default_fetch_history_count=20)`` to limit the
+number of history items fetched per request.
+
+Usage::
+
+ # Start the server
+ python sample_05_conversation_history.py
+
+ # Turn 1 — initial message (no history)
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "tutor", "input": "Explain photosynthesis."}'
+ # -> {"id": "resp_...", "output": [{"type": "message", "content":
+ # [{"type": "output_text", "text": "Welcome! I'm your study tutor. You asked: ..."}]}]}
+
+ # Turn 2 — chain via previous_response_id (use the id from Turn 1)
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "tutor", "input": "What role does chlorophyll play?", "previous_response_id": ""}'
+ # -> {"output": [{"type": "message", "content": [{"type": "output_text",
+ # "text": "[Turn 2] Building on our previous discussion ..."}]}]}
+"""
+
+import asyncio
+from collections.abc import Sequence
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ ResponsesServerOptions,
+ TextResponse,
+)
+from azure.ai.agentserver.responses.models import OutputItem
+
+app = ResponsesAgentServerHost(
+ options=ResponsesServerOptions(default_fetch_history_count=20),
+)
+
+
+def _build_reply(current_input: str, history: Sequence[OutputItem]) -> str:
+ """Compose a study-tutor reply that references the conversation history."""
+ history_messages = [item for item in history if getattr(item, "type", None) == "message"]
+ turn_number = len(history_messages) + 1
+
+ if not history_messages:
+ return f'Welcome! I\'m your study tutor. You asked: "{current_input}". Let me help you understand that topic.'
+
+ last = history_messages[-1]
+ last_text = "(none)"
+ if last.get("content"):
+ raw = last["content"][0].get("text", "(none)")
+ last_text = raw[:50] + "..." if len(raw) > 50 else raw
+
+ return (
+ f"[Turn {turn_number}] Building on our previous discussion "
+ f'(last answer: "{last_text}"), '
+ f'you asked: "{current_input}".'
+ )
+
+
+@app.create_handler
+def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Study tutor that reads and references conversation history."""
+
+ async def _build():
+ history = await context.get_history()
+ current_input = await context.get_input_text()
+ return _build_reply(current_input, history)
+
+ return TextResponse(context, request, create_text=_build)
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_06_multi_output.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_06_multi_output.py
new file mode 100644
index 000000000000..892640676f7a
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_06_multi_output.py
@@ -0,0 +1,146 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 06 — Multi-Output — Math Problem Solver with Reasoning.
+
+Builds a math problem solver that shows its work. The agent emits a
+**reasoning** item (the thought process) followed by a **message** item
+(the final answer). This demonstrates streaming multiple output types in
+a single response — first using convenience generators, then with full
+builder control.
+
+Usage::
+
+ # Start the server
+ python sample_06_multi_output.py
+
+ # Send a math question
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "math", "input": "What is 6 times 7?"}'
+ # -> {"output": [{"type": "reasoning", ...},
+ # {"type": "message", "content": [{"type": "output_text",
+ # "text": "The answer is 42. Here's how: 6 × 7 = 42. ..."}]}]}
+
+ # Stream to see reasoning + answer arrive in sequence
+ curl -N -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "math", "input": "What is 6 times 7?", "stream": true}'
+ # -> event: response.created data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.in_progress data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.output_item.added data: {"item": {"type": "reasoning", ...}}
+ # -> event: response.output_item.done data: {"item": {"type": "reasoning", ...}}
+ # -> event: response.output_item.added data: {"item": {"type": "message", ...}}
+ # -> event: response.content_part.added data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_text.delta data: {"delta": "The answer is 42. ..."}
+ # -> event: response.output_text.done data: {"text": "The answer is 42. ..."}
+ # -> event: response.content_part.done data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_item.done data: {"item": {"type": "message", ...}}
+ # -> event: response.completed data: {"response": {"status": "completed", ...}}
+"""
+
+import asyncio
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+)
+
+app = ResponsesAgentServerHost()
+
+
+# ── Variant 1: Convenience ──────────────────────────────────────────────
+# Use ``output_item_reasoning_item()`` and ``output_item_message()`` to
+# emit complete output items with one call each.
+
+
+@app.create_handler
+async def handler(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Emit reasoning and answer using convenience generators."""
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ question = await context.get_input_text() or "What is 6 times 7?"
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # Output item 0: Reasoning — show the thought process.
+ thought = (
+ f'The user asked: "{question}". '
+ "I need to identify the mathematical operation, "
+ "compute the result, and explain the steps."
+ )
+ for evt in stream.output_item_reasoning_item(thought):
+ yield evt
+
+ # Output item 1: Message — the final answer.
+ answer = "The answer is 42. Here's how: 6 × 7 = 42. The multiplication of 6 and 7 gives 42."
+ for evt in stream.output_item_message(answer):
+ yield evt
+
+ yield stream.emit_completed()
+
+
+# ── Variant 2: Builder (full event control) ─────────────────────────────
+# When you need multiple summary parts in a single reasoning item, set
+# custom properties on output items before ``emit_added()``, or interleave
+# non-event work between builder calls, use the builder API.
+
+
+async def handler_builder(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Emit reasoning and answer using the builder API."""
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ question = await context.get_input_text() or "What is 6 times 7?"
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # Output item 0: Reasoning — show the thought process.
+ reasoning = stream.add_output_item_reasoning_item()
+ yield reasoning.emit_added()
+
+ summary = reasoning.add_summary_part()
+ yield summary.emit_added()
+
+ thought = (
+ f'The user asked: "{question}". '
+ "I need to identify the mathematical operation, "
+ "compute the result, and explain the steps."
+ )
+ yield summary.emit_text_delta(thought)
+ yield summary.emit_text_done(thought)
+ yield summary.emit_done()
+ reasoning.emit_summary_part_done(summary)
+
+ yield reasoning.emit_done()
+
+ # Output item 1: Message — the final answer.
+ message = stream.add_output_item_message()
+ yield message.emit_added()
+
+ text_part = message.add_text_content()
+ yield text_part.emit_added()
+
+ answer = "The answer is 42. Here's how: 6 × 7 = 42. The multiplication of 6 and 7 gives 42."
+ yield text_part.emit_delta(answer)
+ yield text_part.emit_done()
+ yield message.emit_content_done(text_part)
+ yield message.emit_done()
+
+ yield stream.emit_completed()
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_07_customization.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_07_customization.py
new file mode 100644
index 000000000000..ac2ac7e9c69e
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_07_customization.py
@@ -0,0 +1,71 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 07 — Customization Options.
+
+Shows how to configure the server with custom runtime options:
+
+ - ``ResponsesServerOptions`` for default model, SSE keep-alive, and
+ shutdown grace period.
+ - ``log_level`` on the host for verbose logging.
+ - A handler that relies on ``request.model``, which is automatically
+ filled from ``default_model`` when the client omits it.
+
+Usage::
+
+ # Start the server (with DEBUG logging)
+ python sample_07_customization.py
+
+ # Send a request (model defaults to gpt-4o via default_model)
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"input": "Hello!"}'
+ # -> {"output": [{"type": "message", "content":
+ # [{"type": "output_text", "text": "[model=gpt-4o] Echo: Hello!"}]}]}
+
+ # Override the model explicitly
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "custom", "input": "Hello!"}'
+ # -> {"output": [{"type": "message", "content":
+ # [{"type": "output_text", "text": "[model=custom] Echo: Hello!"}]}]}
+"""
+
+import asyncio
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ ResponsesServerOptions,
+ TextResponse,
+)
+
+options = ResponsesServerOptions(
+ default_model="gpt-4o",
+ sse_keep_alive_interval_seconds=5,
+ shutdown_grace_period_seconds=15,
+)
+
+app = ResponsesAgentServerHost(options=options, log_level="DEBUG")
+
+
+@app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Echo handler that reports which model is being used."""
+
+ async def _create_text():
+ return f"[model={request.model}] Echo: {await context.get_input_text()}"
+
+ return TextResponse(
+ context,
+ request,
+ create_text=_create_text,
+ )
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_08_mixin_composition.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_08_mixin_composition.py
new file mode 100644
index 000000000000..c6fdcbd751db
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_08_mixin_composition.py
@@ -0,0 +1,88 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 08 — Mixin Composition (multi-protocol).
+
+Demonstrates running both the **Invocations** and **Responses** protocols
+on a single server using Python's cooperative (mixin) inheritance.
+
+Endpoints exposed:
+ POST /invocations — Invocation protocol
+ POST /responses — Responses protocol
+ GET /readiness — Health probe (from core)
+
+Usage::
+
+ # Start the dual-protocol server
+ python sample_08_mixin_composition.py
+
+ # Hit the Invocation endpoint
+ curl -X POST http://localhost:8088/invocations \
+ -H "Content-Type: application/json" \
+ -d '{"message": "Hello!"}'
+ # -> {"invocation_id": "...", "status": "completed",
+ # "output": "[Invocation] Echo: Hello!"}
+
+ # Hit the Responses endpoint
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "test", "input": "Hello!"}'
+ # -> {"output": [{"type": "message", "content":
+ # [{"type": "output_text", "text": "[Response] Echo: Hello!"}]}]}
+"""
+
+import asyncio
+
+from azure.ai.agentserver.invocations import InvocationAgentServerHost
+from starlette.requests import Request
+from starlette.responses import JSONResponse, Response
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ TextResponse,
+)
+
+
+class MyHost(InvocationAgentServerHost, ResponsesAgentServerHost):
+ pass
+
+
+app = MyHost()
+
+
+@app.invoke_handler
+async def handle_invoke(request: Request) -> Response:
+ """Echo invocation: returns the message from the JSON body."""
+ data = await request.json()
+ invocation_id = request.state.invocation_id
+ message = data.get("message", "")
+ return JSONResponse(
+ {
+ "invocation_id": invocation_id,
+ "status": "completed",
+ "output": f"[Invocation] Echo: {message}",
+ }
+ )
+
+
+@app.create_handler
+async def handle_response(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Echo response: returns the user's input text."""
+
+ async def _create_text():
+ return f"[Response] Echo: {await context.get_input_text()}"
+
+ return TextResponse(
+ context,
+ request,
+ create_text=_create_text,
+ )
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_09_self_hosting.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_09_self_hosting.py
new file mode 100644
index 000000000000..bc397a53db40
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_09_self_hosting.py
@@ -0,0 +1,71 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 09 — Self-Hosting (mounting into an existing Starlette app).
+
+Shows how to mount the ``ResponsesAgentServerHost`` into a parent
+Starlette application so responses endpoints live under a custom
+URL prefix (e.g. ``/api/responses``).
+
+Because ``ResponsesAgentServerHost`` **is** a Starlette application,
+it can be used as a sub-application via ``starlette.routing.Mount``.
+
+Usage::
+
+ # Start the server
+ python sample_09_self_hosting.py
+
+ # Responses are mounted under /api
+ curl -X POST http://localhost:8000/api/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "test", "input": "Hello!"}'
+ # -> {"output": [{"type": "message", "content":
+ # [{"type": "output_text", "text": "Self-hosted echo: Hello!"}]}]}
+"""
+
+import asyncio
+
+from starlette.applications import Starlette
+from starlette.routing import Mount
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ TextResponse,
+)
+
+# Create the responses host (it IS a Starlette app)
+responses_app = ResponsesAgentServerHost()
+
+
+@responses_app.create_handler
+async def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Echo handler mounted under /api."""
+
+ async def _create_text():
+ return f"Self-hosted echo: {await context.get_input_text()}"
+
+ return TextResponse(
+ context,
+ request,
+ create_text=_create_text,
+ )
+
+
+# Mount into a parent Starlette app
+app = Starlette(
+ routes=[
+ Mount("/api", app=responses_app),
+ ]
+)
+# Now responses are at /api/responses
+
+
+def main() -> None:
+ import uvicorn
+
+ uvicorn.run(app)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_10_streaming_upstream.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_10_streaming_upstream.py
new file mode 100644
index 000000000000..a98934cdeda4
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_10_streaming_upstream.py
@@ -0,0 +1,163 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 10 — Streaming Upstream (forward to OpenAI-compatible server).
+
+Demonstrates how to forward a request to an upstream OpenAI-compatible API
+that returns streaming Server-Sent Events, translating each upstream
+chunk into local response events using the ``openai`` Python SDK.
+
+The handler **owns the response lifecycle** — it constructs its own
+``response.created``, ``response.in_progress``, and terminal events — while
+translating upstream **content events** (output items, text deltas,
+function-call arguments, reasoning, tool calls) and yielding them directly.
+Both model stacks share the same JSON wire contract, so content events
+round-trip with full fidelity.
+
+This is **not** a transparent proxy. The sample showcases type
+compatibility between the two model stacks. In practice you would add
+orchestration logic — filtering outputs, injecting items, calling multiple
+upstreams, or transforming content — between the upstream call and the
+``yield``.
+
+Usage::
+
+ # Start the server (set upstream endpoint and API key)
+ UPSTREAM_ENDPOINT=http://localhost:5211 OPENAI_API_KEY=your-key \
+ python sample_10_streaming_upstream.py
+
+ # Send a streaming request
+ curl -N -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "gpt-4o-mini", "input": "Say hello!", "stream": true}'
+ # -> event: response.created data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.in_progress data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.output_item.added data: {"item": {"type": "message", ...}}
+ # -> event: response.content_part.added data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_text.delta data: {"delta": "..."}
+ # -> ... (more deltas)
+ # -> event: response.output_text.done data: {"text": "..."}
+ # -> event: response.output_item.done data: {"item": {"type": "message", ...}}
+ # -> event: response.completed data: {"response": {"status": "completed", ...}}
+"""
+
+import asyncio
+import os
+from typing import Any
+
+import openai
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponsesAgentServerHost,
+ get_input_expanded,
+)
+
+app = ResponsesAgentServerHost()
+
+
+def _build_response_snapshot(request: CreateResponse, context: ResponseContext) -> dict[str, Any]:
+ """Construct a response snapshot dict from request + context."""
+ snapshot: dict[str, Any] = {
+ "id": context.response_id,
+ "object": "response",
+ "status": "in_progress",
+ "model": request.model or "",
+ "output": [],
+ }
+ if request.metadata is not None:
+ snapshot["metadata"] = request.metadata
+ if request.background is not None:
+ snapshot["background"] = request.background
+ if request.previous_response_id is not None:
+ snapshot["previous_response_id"] = request.previous_response_id
+ # Normalize conversation to ConversationReference form.
+ conv = request.conversation
+ if isinstance(conv, str):
+ snapshot["conversation"] = {"id": conv}
+ elif isinstance(conv, dict) and conv.get("id"):
+ snapshot["conversation"] = {"id": conv["id"]}
+ return snapshot
+
+
+@app.create_handler
+async def handler(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Forward to upstream with streaming, translate content events back."""
+ upstream = openai.AsyncOpenAI(
+ base_url=os.environ.get("UPSTREAM_ENDPOINT", "https://api.openai.com/v1"),
+ api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"),
+ )
+
+ # Build the upstream request — translate every input item.
+ # Both model stacks share the same JSON wire contract, so
+ # serializing our Item to dict round-trips to the OpenAI SDK.
+ input_items = [item.as_dict() for item in get_input_expanded(request)]
+
+ # This handler owns the response lifecycle — construct the
+ # response snapshot directly instead of forwarding the upstream's.
+ # Seeding from the request preserves metadata, conversation, model.
+ snapshot = _build_response_snapshot(request, context)
+
+ # Lifecycle events nest the response snapshot under "response"
+ # — matching the SSE wire format.
+ yield {"type": "response.created", "response": snapshot}
+ yield {"type": "response.in_progress", "response": snapshot}
+
+ # Stream from the upstream. Translate content events (output
+ # items, deltas, etc.) and yield them directly. Skip upstream
+ # lifecycle events — we own the response envelope.
+ output_items: list[dict[str, Any]] = []
+ upstream_failed = False
+
+ async with await upstream.responses.create(
+ model=request.model or "gpt-4o-mini",
+ input=input_items,
+ stream=True,
+ ) as upstream_stream:
+ async for event in upstream_stream:
+ # Skip lifecycle events — we own the response envelope.
+ if event.type in ("response.created", "response.in_progress"):
+ continue
+
+ if event.type == "response.completed":
+ break
+
+ if event.type == "response.failed":
+ upstream_failed = True
+ break
+
+ # Translate the upstream event to a dict via the openai SDK.
+ evt = event.model_dump()
+
+ # Clear upstream response_id on output items so the
+ # orchestrator's auto-stamp fills in this server's ID.
+ if event.type == "response.output_item.added":
+ evt.get("item", {}).pop("response_id", None)
+ elif event.type == "response.output_item.done":
+ item = evt.get("item", {})
+ item.pop("response_id", None)
+ output_items.append(item)
+
+ yield evt
+
+ # Emit terminal event — the handler decides the outcome.
+ if upstream_failed:
+ snapshot["status"] = "failed"
+ snapshot["error"] = {"code": "server_error", "message": "Upstream request failed"}
+ yield {"type": "response.failed", "response": snapshot}
+ else:
+ snapshot["status"] = "completed"
+ snapshot["output"] = output_items
+ yield {"type": "response.completed", "response": snapshot}
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_11_non_streaming_upstream.py b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_11_non_streaming_upstream.py
new file mode 100644
index 000000000000..e66d10dbce4c
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/samples/sample_11_non_streaming_upstream.py
@@ -0,0 +1,120 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Sample 11 — Non-Streaming Upstream (call upstream, build event stream).
+
+Demonstrates forwarding a request to an upstream OpenAI-compatible API
+that returns a complete (non-streaming) response, then using the builder
+API to construct output items for the client.
+
+The handler calls the upstream without streaming, waits for the complete
+response, and uses ``output_item_message`` and ``output_item_reasoning_item``
+to emit ``output_item.added`` / ``output_item.done`` pairs for each item.
+
+This pattern is useful when your handler needs to inspect or transform the
+full response before streaming it to the client — for example, filtering
+output items, injecting additional context, or calling multiple upstreams.
+
+Usage::
+
+ # Start the server (set upstream endpoint and API key)
+ UPSTREAM_ENDPOINT=http://localhost:5211 OPENAI_API_KEY=your-key \
+ python sample_11_non_streaming_upstream.py
+
+ # Send a request
+ curl -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "gpt-4o-mini", "input": "Say hello!"}'
+ # -> {"output": [{"type": "message", "content":
+ # [{"type": "output_text", "text": "Hello! ..."}]}]}
+
+ # Stream the response
+ curl -N -X POST http://localhost:8088/responses \
+ -H "Content-Type: application/json" \
+ -d '{"model": "gpt-4o-mini", "input": "Say hello!", "stream": true}'
+ # -> event: response.created data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.in_progress data: {"response": {"status": "in_progress", ...}}
+ # -> event: response.output_item.added data: {"item": {"type": "message", ...}}
+ # -> event: response.content_part.added data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_text.delta data: {"delta": "..."}
+ # -> event: response.output_text.done data: {"text": "..."}
+ # -> event: response.content_part.done data: {"part": {"type": "output_text", ...}}
+ # -> event: response.output_item.done data: {"item": {"type": "message", ...}}
+ # -> event: response.completed data: {"response": {"status": "completed", ...}}
+"""
+
+import asyncio
+import os
+
+import openai
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+ get_input_expanded,
+)
+
+app = ResponsesAgentServerHost()
+
+
+@app.create_handler
+async def handler(
+ request: CreateResponse,
+ context: ResponseContext,
+ cancellation_signal: asyncio.Event,
+):
+ """Call upstream (non-streaming), emit every output item."""
+ upstream = openai.AsyncOpenAI(
+ base_url=os.environ.get("UPSTREAM_ENDPOINT", "https://api.openai.com/v1"),
+ api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"),
+ )
+
+ # Build the upstream request — translate every input item.
+ # Both model stacks share the same JSON wire contract, so
+ # serializing our Item to dict round-trips to the OpenAI SDK.
+ input_items = [item.as_dict() for item in get_input_expanded(request)]
+
+ # Call upstream without streaming and get the complete response.
+ result = await upstream.responses.create(
+ model=request.model or "gpt-4o-mini",
+ input=input_items,
+ )
+
+ # Build a standard SSE event stream. Seed from the request to
+ # preserve metadata, conversation, and agent reference.
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # Translate every upstream output item back into local events.
+ # Use the convenience generators to emit the full lifecycle for
+ # each output-item type.
+ for upstream_item in result.output:
+ if upstream_item.type == "message":
+ # Extract text content from the message.
+ output_text = ""
+ for part in upstream_item.content:
+ if part.type == "output_text":
+ output_text += part.text
+ for event in stream.output_item_message(output_text):
+ yield event
+ elif upstream_item.type == "reasoning":
+ # Extract reasoning summary text.
+ summary = ""
+ for part in upstream_item.summary:
+ if part.type == "summary_text":
+ summary += part.text
+ for event in stream.output_item_reasoning_item(summary):
+ yield event
+ # Add additional item types as needed (function_call, etc.)
+
+ yield stream.emit_completed()
+
+
+def main() -> None:
+ app.run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/__init__.py
new file mode 100644
index 000000000000..94be86955924
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Code generation scripts for the responses server."""
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/generate_validators.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generate_validators.py
new file mode 100644
index 000000000000..0b06ade83327
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generate_validators.py
@@ -0,0 +1,138 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+#!/usr/bin/env python3
+"""Generate Python payload validators from an OpenAPI document."""
+
+from __future__ import annotations
+
+import argparse
+import json
+from pathlib import Path
+from typing import Any
+
+from validator_emitter import build_validator_module
+from validator_schema_walker import SchemaWalker, discover_post_request_roots
+
+
+def _load_spec(input_path: Path) -> dict[str, Any]:
+ """Load a JSON or YAML OpenAPI document from disk."""
+ text = input_path.read_text(encoding="utf-8")
+ try:
+ loaded = json.loads(text)
+ if isinstance(loaded, dict):
+ return loaded
+ except json.JSONDecodeError:
+ pass
+
+ try:
+ import yaml # type: ignore[import-not-found]
+ except ModuleNotFoundError as exc:
+ raise ValueError(
+ f"unable to parse OpenAPI file '{input_path}'. Expected JSON, or install PyYAML for YAML input."
+ ) from exc
+
+ loaded_yaml = yaml.safe_load(text)
+ if not isinstance(loaded_yaml, dict):
+ raise ValueError(f"OpenAPI file '{input_path}' must contain a top-level object")
+ return loaded_yaml
+
+
+def _load_overlay(overlay_path: Path | None) -> dict[str, Any]:
+ """Load an optional validation overlay YAML file."""
+ if overlay_path is None:
+ return {}
+ text = overlay_path.read_text(encoding="utf-8")
+ try:
+ import yaml # type: ignore[import-not-found]
+ except ModuleNotFoundError as exc:
+ raise ValueError("PyYAML is required to load the overlay file. Run: pip install pyyaml") from exc
+ loaded = yaml.safe_load(text)
+ if not isinstance(loaded, dict):
+ raise ValueError(f"Overlay file '{overlay_path}' must contain a top-level object")
+ return loaded
+
+
+def _build_output(spec: dict[str, Any], roots: list[str], overlay: dict[str, Any] | None = None) -> str:
+ """Create deterministic validator module source text."""
+ schemas = spec.get("components", {}).get("schemas", {})
+ if not isinstance(schemas, dict):
+ schemas = {}
+ else:
+ schemas = dict(schemas)
+
+ def _find_create_response_inline_schema() -> dict[str, Any] | None:
+ paths = spec.get("paths", {})
+ for path, methods in paths.items():
+ if not isinstance(methods, dict):
+ continue
+ if "responses" not in str(path).lower():
+ continue
+ post = methods.get("post")
+ if not isinstance(post, dict):
+ continue
+ request_body = post.get("requestBody", {})
+ content = request_body.get("content", {}).get("application/json", {})
+ schema = content.get("schema", {})
+ if isinstance(schema, dict) and "anyOf" in schema:
+ branches = schema.get("anyOf", [])
+ if isinstance(branches, list) and branches and isinstance(branches[0], dict):
+ return branches[0]
+ if isinstance(schema, dict) and "oneOf" in schema:
+ branches = schema.get("oneOf", [])
+ if isinstance(branches, list) and branches and isinstance(branches[0], dict):
+ return branches[0]
+ if isinstance(schema, dict):
+ return schema
+ return None
+
+ for root in roots:
+ if root in schemas:
+ continue
+ if root == "CreateResponse":
+ inline_schema = _find_create_response_inline_schema()
+ if isinstance(inline_schema, dict):
+ schemas[root] = inline_schema
+
+ # If explicit roots are provided, respect them and skip route-wide discovery.
+ discovered_roots = [] if roots else discover_post_request_roots(spec)
+ merged_roots: list[str] = []
+ seen: set[str] = set()
+ for root in [*roots, *discovered_roots]:
+ if root and root not in seen:
+ seen.add(root)
+ merged_roots.append(root)
+
+ walker = SchemaWalker(schemas, overlay=overlay)
+ for root in merged_roots:
+ walker.walk(root)
+
+ reachable = walker.reachable if walker.reachable else schemas
+ effective_roots = merged_roots if merged_roots else sorted(reachable)
+ return build_validator_module(reachable, effective_roots)
+
+
+def main() -> int:
+ """Run the validator generator CLI."""
+ parser = argparse.ArgumentParser(description="Generate Python payload validators from OpenAPI")
+ parser.add_argument("--input", required=True, help="Path to OpenAPI JSON file")
+ parser.add_argument("--output", required=True, help="Output Python module path")
+ parser.add_argument("--root-schemas", default="", help="Comma-separated root schema names")
+ parser.add_argument("--overlay", default=None, help="Path to validation overlay YAML (optional)")
+ args = parser.parse_args()
+
+ input_path = Path(args.input)
+ output_path = Path(args.output)
+ overlay_path = Path(args.overlay) if args.overlay else None
+ roots = [part.strip() for part in args.root_schemas.split(",") if part.strip()]
+
+ spec = _load_spec(input_path)
+ overlay = _load_overlay(overlay_path)
+ output = _build_output(spec, roots, overlay=overlay)
+
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ output_path.write_text(output, encoding="utf-8")
+ return 0
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/__init__.py
new file mode 100644
index 000000000000..b783bfa73795
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility re-exports for generated models preserved under sdk/models."""
+
+from .sdk.models.models import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_enums.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_enums.py
new file mode 100644
index 000000000000..481d6d628755
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_enums.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility shim for generated enum symbols."""
+
+from .sdk.models.models._enums import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_models.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_models.py
new file mode 100644
index 000000000000..01e649adb824
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_models.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility shim for generated model symbols."""
+
+from .sdk.models.models._models import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_patch.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_patch.py
new file mode 100644
index 000000000000..66ee2dea3a63
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/_patch.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Compatibility shim for generated patch helpers."""
+
+from .sdk.models.models._patch import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/models_patch.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/models_patch.py
new file mode 100644
index 000000000000..3effa2fad974
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/models_patch.py
@@ -0,0 +1,89 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+"""Hand-written customizations injected into the generated models package.
+
+This file is copied over the generated ``_patch.py`` inside
+``sdk/models/models/`` by ``make generate-models``. Anything listed in
+``__all__`` is automatically re-exported by the generated ``__init__.py``,
+shadowing the generated class of the same name.
+
+Approach follows the official customization guide:
+https://aka.ms/azsdk/python/dpcodegen/python/customize
+"""
+
+from enum import Enum
+from typing import Optional
+
+from azure.core import CaseInsensitiveEnumMeta
+
+from .._utils.model_base import rest_field
+from ._models import CreateResponse as CreateResponseGenerated
+from ._models import ResponseObject as ResponseObjectGenerated
+
+
+class ResponseIncompleteReason(str, Enum, metaclass=CaseInsensitiveEnumMeta):
+ """Reason a response finished as incomplete.
+
+ The upstream TypeSpec defines this as an inline literal union
+ (``"max_output_tokens" | "content_filter"``), so the code generator
+ emits ``Literal[...]`` instead of a named enum. This hand-written
+ enum provides a friendlier symbolic constant for SDK consumers.
+ """
+
+ MAX_OUTPUT_TOKENS = "max_output_tokens"
+ """The response was cut short because the maximum output token limit was reached."""
+ CONTENT_FILTER = "content_filter"
+ """The response was cut short because of a content filter."""
+
+
+# ---------------------------------------------------------------------------
+# Fix temperature / top_p types: numeric → float (emitter bug workaround)
+#
+# The upstream TypeSpec defines temperature and top_p as ``numeric | null``
+# (the abstract base scalar for all numbers). The TypeSpec emitter correctly
+# maps this to ``double?`` but @azure-tools/typespec-python@0.61.2 maps
+# ``numeric`` → ``int``. The OpenAPI 3 spec emits ``type: number``
+# (i.e. float), so ``int`` is wrong.
+#
+# Per the official customization guide we subclass the generated models and
+# re-declare the affected fields with the correct type. The generated
+# ``__init__.py`` picks up these subclasses via ``from ._patch import *``
+# which shadows the generated names.
+# ---------------------------------------------------------------------------
+
+
+class CreateResponse(CreateResponseGenerated):
+ """Override generated ``CreateResponse`` to correct temperature/top_p types."""
+
+ temperature: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Sampling temperature. Float between 0 and 2."""
+ top_p: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Nucleus sampling parameter. Float between 0 and 1."""
+
+
+class ResponseObject(ResponseObjectGenerated):
+ """Override generated ``ResponseObject`` to correct temperature/top_p types."""
+
+ temperature: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Sampling temperature. Float between 0 and 2."""
+ top_p: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"])
+ """Nucleus sampling parameter. Float between 0 and 1."""
+
+
+__all__: list[str] = [
+ "ResponseIncompleteReason",
+ "CreateResponse",
+ "ResponseObject",
+]
+
+
+def patch_sdk():
+ """Do not remove from this file.
+
+ `patch_sdk` is a last resort escape hatch that allows you to do customizations
+ you can't accomplish using the techniques described in
+ https://aka.ms/azsdk/python/dpcodegen/python/customize
+ """
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/sdk_models__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/sdk_models__init__.py
new file mode 100644
index 000000000000..9abd30ab9c84
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/generated_shims/sdk_models__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------
+
+"""Model-only generated package surface."""
+
+from .models import * # type: ignore # noqa: F401,F403
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/validation-overlay.yaml b/sdk/agentserver/azure-ai-agentserver-responses/scripts/validation-overlay.yaml
new file mode 100644
index 000000000000..35c649563613
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/validation-overlay.yaml
@@ -0,0 +1,50 @@
+# Validation Overlay
+# -----------------
+# Constraints the TypeSpec OpenAPI emitter misses (or underspecifies).
+# Applied by generate-validators.py before codegen.
+#
+# To add a new override:
+# 1. Add a key under `schemas:` matching the schema name (or "CreateResponse" for the inline request body)
+# 2. Set `required:` to add required fields
+# 3. Set `not_required:` to remove fields from required (for input schemas where the
+# merged TypeSpec view is stricter than the OpenAI input spec)
+# 4. Set `properties..minimum/maximum` for range fixes
+#
+# Example:
+# schemas:
+# MySchema:
+# required: ["field_a"]
+# not_required: ["field_b"]
+# properties:
+# field_c:
+# minimum: 0
+# maximum: 100
+
+schemas:
+ # The TypeSpec emitter produces required: [] for the CreateResponse request body.
+ # Model was previously required here, but is now optional (PW-006).
+ # When omitted, the SDK resolves it via ResponsesServerOptions.DefaultModel or empty string.
+ CreateResponse:
+ required: []
+
+ # GAP-01: OpenAI spec EasyInputMessage only requires [role, content].
+ # The "type" field defaults to "message" when absent.
+ # ItemValidatorCustom.ResolveDefaultDiscriminator handles discriminator routing;
+ # this overlay prevents ItemMessageValidator from rejecting missing "type".
+ ItemMessage:
+ not_required: ["type"]
+
+ # GAP-01 (discriminator): When "type" is absent on an Item, default to "message".
+ # This mirrors .NET ItemValidatorCustom.ResolveDefaultDiscriminator.
+ Item:
+ default_discriminator: "message"
+
+ # GAP-03: OpenAI spec InputImageContentParamAutoParam only requires [type].
+ # The "detail" field is nullable/optional and defaults to "auto".
+ MessageContentInputImageContent:
+ not_required: ["detail"]
+
+ # GAP-04 & GAP-05: OpenAI spec FunctionToolParam only requires [name, type].
+ # Both "strict" and "parameters" are nullable/optional.
+ FunctionTool:
+ not_required: ["strict", "parameters"]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/validator_emitter.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/validator_emitter.py
new file mode 100644
index 000000000000..a5008b79918d
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/validator_emitter.py
@@ -0,0 +1,474 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Emitter that builds deterministic Python validator modules from schemas."""
+
+from __future__ import annotations
+
+from typing import Any
+
+
+def _sanitize_identifier(name: str) -> str:
+ normalized = "".join(ch if ch.isalnum() else "_" for ch in name)
+ while "__" in normalized:
+ normalized = normalized.replace("__", "_")
+ normalized = normalized.strip("_")
+ return normalized or "schema"
+
+
+def _resolve_ref(ref: str) -> str:
+ return ref.rsplit("/", 1)[-1]
+
+
+def _ordered(value: Any) -> Any:
+ if isinstance(value, dict):
+ return {k: _ordered(value[k]) for k in sorted(value)}
+ if isinstance(value, list):
+ return [_ordered(v) for v in value]
+ return value
+
+
+def _header() -> str:
+ return (
+ "# pylint: disable=line-too-long,useless-suppression,too-many-lines\n"
+ "# coding=utf-8\n"
+ "# --------------------------------------------------------------------------\n"
+ "# Copyright (c) Microsoft Corporation. All rights reserved.\n"
+ "# Licensed under the MIT License. See License.txt in the project root for license information.\n"
+ "# Code generated by Microsoft (R) Python Code Generator.\n"
+ "# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n"
+ "# --------------------------------------------------------------------------\n"
+ )
+
+
+def _schema_kind(schema: dict[str, Any]) -> str | None:
+ schema_type = schema.get("type")
+ if isinstance(schema_type, str):
+ return schema_type
+ if "properties" in schema or "additionalProperties" in schema or "discriminator" in schema:
+ return "object"
+ if "oneOf" in schema or "anyOf" in schema:
+ return "union"
+ return None
+
+
+def build_validator_module(schemas: dict[str, dict[str, Any]], roots: list[str]) -> str:
+ """Build generated validator module source code without runtime schema blobs."""
+ ordered_schemas = _ordered(schemas)
+ target_roots = sorted(dict.fromkeys(roots)) if roots else sorted(ordered_schemas)
+
+ lines: list[str] = [_header(), "", "from __future__ import annotations", "", "from typing import Any", ""]
+ lines.extend(
+ [
+ "try:",
+ " from . import _enums as _generated_enums",
+ "except Exception:",
+ " _generated_enums = None",
+ "",
+ "def _append_error(errors: list[dict[str, str]], path: str, message: str) -> None:",
+ " errors.append({'path': path, 'message': message})",
+ "",
+ "def _type_label(value: Any) -> str:",
+ " if value is None:",
+ " return 'null'",
+ " if isinstance(value, bool):",
+ " return 'boolean'",
+ " if isinstance(value, int):",
+ " return 'integer'",
+ " if isinstance(value, float):",
+ " return 'number'",
+ " if isinstance(value, str):",
+ " return 'string'",
+ " if isinstance(value, dict):",
+ " return 'object'",
+ " if isinstance(value, list):",
+ " return 'array'",
+ " return type(value).__name__",
+ "",
+ "def _is_type(value: Any, expected: str) -> bool:",
+ " if expected == 'string':",
+ " return isinstance(value, str)",
+ " if expected == 'integer':",
+ " return isinstance(value, int) and not isinstance(value, bool)",
+ " if expected == 'number':",
+ " return (isinstance(value, int) and not isinstance(value, bool)) or isinstance(value, float)",
+ " if expected == 'boolean':",
+ " return isinstance(value, bool)",
+ " if expected == 'object':",
+ " return isinstance(value, dict)",
+ " if expected == 'array':",
+ " return isinstance(value, list)",
+ " return True",
+ "",
+ "def _append_type_mismatch(errors: list[dict[str, str]], path: str, expected: str, value: Any) -> None:",
+ ' _append_error(errors, path, f"Expected {expected}, got {_type_label(value)}")',
+ "",
+ "def _enum_values(enum_name: str) -> tuple[tuple[str, ...] | None, str | None]:",
+ " if _generated_enums is None:",
+ " return None, f'enum type _enums.{enum_name} is unavailable'",
+ " enum_cls = getattr(_generated_enums, enum_name, None)",
+ " if enum_cls is None:",
+ " return None, f'enum type _enums.{enum_name} is not defined'",
+ " try:",
+ " return tuple(str(member.value) for member in enum_cls), None",
+ " except Exception:",
+ " return None, f'enum type _enums.{enum_name} failed to load values'",
+ "",
+ ]
+ )
+
+ function_schemas: dict[str, dict[str, Any]] = {}
+ function_hints: dict[str, str | None] = {}
+ function_order: list[str] = []
+ anonymous_by_key: dict[str, str] = {}
+
+ def make_unique_function_name(hint: str | None) -> str:
+ base = _sanitize_identifier(hint or "branch")
+ candidate = f"_validate_{base}"
+ if candidate not in function_schemas:
+ return candidate
+
+ suffix = 2
+ while True:
+ candidate = f"_validate_{base}_{suffix}"
+ if candidate not in function_schemas:
+ return candidate
+ suffix += 1
+
+ def ensure_schema_function(schema_name: str) -> str:
+ fn_name = f"_validate_{_sanitize_identifier(schema_name)}"
+ if fn_name not in function_schemas:
+ schema = ordered_schemas.get(schema_name)
+ if isinstance(schema, dict):
+ function_schemas[fn_name] = schema
+ function_hints[fn_name] = schema_name
+ function_order.append(fn_name)
+ return fn_name
+
+ def ensure_anonymous_function(schema: dict[str, Any], hint: str | None = None) -> str:
+ key = repr(_ordered(schema))
+ if key in anonymous_by_key:
+ existing = anonymous_by_key[key]
+ if function_hints.get(existing) is None and hint is not None:
+ function_hints[existing] = hint
+ return existing
+ fn_name = make_unique_function_name(hint)
+ anonymous_by_key[key] = fn_name
+ function_schemas[fn_name] = schema
+ function_hints[fn_name] = hint
+ function_order.append(fn_name)
+ return fn_name
+
+ for root in target_roots:
+ ensure_schema_function(root)
+
+ def emit_line(block: list[str], indent: int, text: str) -> None:
+ block.append((" " * indent) + text)
+
+ def emit_union(
+ schema: dict[str, Any],
+ block: list[str],
+ indent: int,
+ value_expr: str,
+ path_expr: str,
+ errors_expr: str,
+ schema_name_hint: str | None,
+ ) -> None:
+ branches = schema.get("oneOf", schema.get("anyOf", []))
+ branch_funcs: list[tuple[str, str]] = []
+ expected_labels: list[str] = []
+ has_inline_enum_branch = False
+
+ for branch in branches:
+ if not isinstance(branch, dict):
+ continue
+
+ if "$ref" in branch:
+ ref_name = _resolve_ref(str(branch["$ref"]))
+ ref_schema = ordered_schemas.get(ref_name)
+ if isinstance(ref_schema, dict):
+ branch_funcs.append((ensure_schema_function(ref_name), _schema_kind(ref_schema) or "value"))
+ expected_labels.append(ref_name)
+ continue
+
+ if schema_name_hint and "enum" in branch:
+ # Keep enum branches tied to the logical schema name so enum-class resolution stays stable.
+ branch_hint = schema_name_hint
+ has_inline_enum_branch = True
+ else:
+ raw_type = branch.get("type")
+ branch_type = raw_type if isinstance(raw_type, str) else (_schema_kind(branch) or "branch")
+ branch_hint = f"{schema_name_hint}_{branch_type}" if schema_name_hint else str(branch_type)
+ fn_name = ensure_anonymous_function(branch, hint=branch_hint)
+ branch_funcs.append((fn_name, _schema_kind(branch) or "value"))
+ label = branch.get("type") if isinstance(branch.get("type"), str) else (_schema_kind(branch) or "value")
+ expected_labels.append(str(label))
+
+ if not branch_funcs:
+ return
+
+ emit_line(block, indent, "_matched_union = False")
+ for idx, (fn_name, kind) in enumerate(branch_funcs):
+ condition = "True" if kind in ("value", "union", None) else f"_is_type({value_expr}, {kind!r})"
+ emit_line(block, indent, f"if not _matched_union and {condition}:")
+ emit_line(block, indent + 1, f"_branch_errors_{idx}: list[dict[str, str]] = []")
+ emit_line(block, indent + 1, f"{fn_name}({value_expr}, {path_expr}, _branch_errors_{idx})")
+ emit_line(block, indent + 1, f"if not _branch_errors_{idx}:")
+ emit_line(block, indent + 2, "_matched_union = True")
+
+ unique_expected_labels = list(dict.fromkeys(expected_labels))
+ emit_line(block, indent, "if not _matched_union:")
+ if len(unique_expected_labels) == 1:
+ only_label = unique_expected_labels[0]
+ if schema_name_hint and only_label == "string" and has_inline_enum_branch:
+ schema_label = schema_name_hint.rsplit(".", 1)[-1]
+ emit_line(
+ block,
+ indent + 1,
+ f"_append_error({errors_expr}, {path_expr}, "
+ f'f"Expected {schema_label} to be a string value, '
+ f'got {{_type_label({value_expr})}}")',
+ )
+ else:
+ emit_line(block, indent + 1, f"_append_error({errors_expr}, {path_expr}, 'Expected {only_label}')")
+ else:
+ expected = ", ".join(unique_expected_labels) if unique_expected_labels else "valid branch"
+ emit_line(
+ block,
+ indent + 1,
+ f"_append_error({errors_expr}, {path_expr}, "
+ f'f"Expected one of: {expected}; got {{_type_label({value_expr})}}")',
+ )
+ emit_line(block, indent + 1, "return")
+
+ def emit_schema_body(
+ schema: dict[str, Any],
+ block: list[str],
+ indent: int,
+ value_expr: str,
+ path_expr: str,
+ errors_expr: str,
+ schema_name_hint: str | None = None,
+ ) -> None:
+ if schema.get("nullable"):
+ emit_line(block, indent, f"if {value_expr} is None:")
+ emit_line(block, indent + 1, "return")
+
+ if "$ref" in schema:
+ ref_name = _resolve_ref(str(schema["$ref"]))
+ ref_schema = ordered_schemas.get(ref_name)
+ if isinstance(ref_schema, dict):
+ fn = ensure_schema_function(ref_name)
+ emit_line(block, indent, f"{fn}({value_expr}, {path_expr}, {errors_expr})")
+ return
+
+ if "enum" in schema:
+ allowed = tuple(schema.get("enum", []))
+ enum_class_name = None
+ if schema_name_hint:
+ hint_schema = ordered_schemas.get(schema_name_hint)
+ hint_is_enum_like = False
+ if isinstance(hint_schema, dict):
+ if "enum" in hint_schema:
+ hint_is_enum_like = True
+ else:
+ for combo in ("oneOf", "anyOf"):
+ branches = hint_schema.get(combo, [])
+ if isinstance(branches, list) and any(
+ isinstance(b, dict) and "enum" in b for b in branches
+ ):
+ hint_is_enum_like = True
+ break
+ if hint_is_enum_like:
+ candidate = schema_name_hint.rsplit(".", 1)[-1]
+ if candidate and candidate[0].isalpha():
+ enum_class_name = candidate
+
+ if enum_class_name:
+ emit_line(
+ block,
+ indent,
+ f"_allowed_values, _enum_error = _enum_values({enum_class_name!r})",
+ )
+ emit_line(block, indent, "if _enum_error is not None:")
+ emit_line(block, indent + 1, f"_append_error({errors_expr}, {path_expr}, _enum_error)")
+ emit_line(block, indent + 1, "return")
+ emit_line(block, indent, "if _allowed_values is None:")
+ emit_line(block, indent + 1, "return")
+ else:
+ emit_line(block, indent, f"_allowed_values = {allowed!r}")
+ emit_line(block, indent, f"if {value_expr} not in _allowed_values:")
+ emit_line(
+ block,
+ indent + 1,
+ f"_append_error({errors_expr}, {path_expr}, "
+ f"f\"Invalid value '{{{value_expr}}}'. "
+ f"Allowed: {{', '.join(str(v) for v in _allowed_values)}}\")",
+ )
+
+ if "oneOf" in schema or "anyOf" in schema:
+ emit_union(schema, block, indent, value_expr, path_expr, errors_expr, schema_name_hint)
+ return
+
+ schema_type = schema.get("type")
+ effective_type = schema_type if isinstance(schema_type, str) else _schema_kind(schema)
+
+ if isinstance(effective_type, str) and effective_type not in ("value", "union"):
+ emit_line(block, indent, f"if not _is_type({value_expr}, {effective_type!r}):")
+ emit_line(
+ block,
+ indent + 1,
+ f"_append_type_mismatch({errors_expr}, {path_expr}, {effective_type!r}, {value_expr})",
+ )
+ emit_line(block, indent + 1, "return")
+
+ if effective_type == "array":
+ items = schema.get("items")
+ if isinstance(items, dict):
+ item_hint = f"{schema_name_hint}_item" if schema_name_hint else "item"
+ item_fn = ensure_anonymous_function(items, hint=item_hint)
+ emit_line(block, indent, f"for _idx, _item in enumerate({value_expr}):")
+ emit_line(block, indent + 1, f'{item_fn}(_item, f"{{{path_expr}}}[{{_idx}}]", {errors_expr})')
+ return
+
+ if effective_type == "object":
+ properties = schema.get("properties", {})
+ required = schema.get("required", [])
+ disc = schema.get("discriminator")
+ # When a default_discriminator is configured, the discriminator
+ # property is no longer strictly required — it defaults instead.
+ disc_prop_with_default = None
+ if isinstance(disc, dict) and disc.get("defaultValue") is not None:
+ disc_prop_with_default = disc.get("propertyName", "type")
+ if isinstance(properties, dict):
+ for field in required:
+ if field == disc_prop_with_default:
+ continue # Skip — handled by default discriminator
+ emit_line(block, indent, f"if {field!r} not in {value_expr}:")
+ emit_line(
+ block,
+ indent + 1,
+ f'_append_error({errors_expr}, f"{{{path_expr}}}.{field}", '
+ f"\"Required property '{field}' is missing\")",
+ )
+
+ for field, field_schema in sorted(properties.items()):
+ if not isinstance(field_schema, dict):
+ continue
+ field_hint = f"{schema_name_hint}_{field}" if schema_name_hint else field
+ field_fn = ensure_anonymous_function(field_schema, hint=field_hint)
+ emit_line(block, indent, f"if {field!r} in {value_expr}:")
+ emit_line(
+ block,
+ indent + 1,
+ f'{field_fn}({value_expr}[{field!r}], f"{{{path_expr}}}.{field}", {errors_expr})',
+ )
+
+ addl = schema.get("additionalProperties")
+ if isinstance(addl, dict):
+ addl_hint = f"{schema_name_hint}_additional_property" if schema_name_hint else "additional_property"
+ addl_fn = ensure_anonymous_function(addl, hint=addl_hint)
+ known = tuple(sorted(properties.keys())) if isinstance(properties, dict) else tuple()
+ emit_line(block, indent, f"for _key, _item in {value_expr}.items():")
+ emit_line(block, indent + 1, f"if _key not in {known!r}:")
+ emit_line(block, indent + 2, f'{addl_fn}(_item, f"{{{path_expr}}}.{{_key}}", {errors_expr})')
+
+ disc = schema.get("discriminator")
+ if isinstance(disc, dict):
+ prop = disc.get("propertyName", "type")
+ mapping = disc.get("mapping", {})
+ default_disc = disc.get("defaultValue")
+ if default_disc is not None:
+ # GAP-01: When a default discriminator is configured, use it
+ # as a fallback instead of rejecting missing values.
+ # Mirrors ResolveDefaultDiscriminator.
+ emit_line(block, indent, f"if {prop!r} in {value_expr}:")
+ field_hint = f"{schema_name_hint}_{prop}" if schema_name_hint else prop
+ for fld, fld_schema in sorted(properties.items()):
+ if fld == prop and isinstance(fld_schema, dict):
+ fld_fn = ensure_anonymous_function(fld_schema, hint=field_hint)
+ emit_line(
+ block,
+ indent + 1,
+ f'{fld_fn}({value_expr}[{prop!r}], f"{{{path_expr}}}.{prop}", {errors_expr})',
+ )
+ break
+ emit_line(
+ block,
+ indent,
+ f"_disc_value = {value_expr}.get({prop!r}, {default_disc!r})",
+ )
+ emit_line(block, indent, "if not isinstance(_disc_value, str):")
+ disc_err = f"Required discriminator '{prop}' is missing or invalid"
+ emit_line(
+ block,
+ indent + 1,
+ f'_append_error({errors_expr}, f"{{{path_expr}}}.{prop}", "{disc_err}")',
+ )
+ emit_line(block, indent + 1, "return")
+ else:
+ emit_line(block, indent, f"_disc_value = {value_expr}.get({prop!r})")
+ emit_line(block, indent, "if not isinstance(_disc_value, str):")
+ disc_err = f"Required discriminator '{prop}' is missing or invalid"
+ emit_line(
+ block,
+ indent + 1,
+ f'_append_error({errors_expr}, f"{{{path_expr}}}.{prop}", "{disc_err}")',
+ )
+ emit_line(block, indent + 1, "return")
+
+ for disc_value, ref in sorted(mapping.items()):
+ if not isinstance(ref, str):
+ continue
+ ref_name = _resolve_ref(ref)
+ ref_schema = ordered_schemas.get(ref_name)
+ if not isinstance(ref_schema, dict):
+ continue
+ ref_fn = ensure_schema_function(ref_name)
+ emit_line(block, indent, f"if _disc_value == {disc_value!r}:")
+ emit_line(block, indent + 1, f"{ref_fn}({value_expr}, {path_expr}, {errors_expr})")
+
+ rendered_blocks: dict[str, list[str]] = {}
+ idx = 0
+ while idx < len(function_order):
+ fn_name = function_order[idx]
+ idx += 1
+ schema = function_schemas[fn_name]
+ block: list[str] = [f"def {fn_name}(value: Any, path: str, errors: list[dict[str, str]]) -> None:"]
+ schema_name_hint = function_hints.get(fn_name)
+ emit_schema_body(schema, block, 1, "value", "path", "errors", schema_name_hint=schema_name_hint)
+ if len(block) == 1:
+ emit_line(block, 1, "return")
+ rendered_blocks[fn_name] = block
+
+ for fn_name in function_order:
+ lines.extend(rendered_blocks[fn_name])
+ lines.append("")
+
+ lines.append("ROOT_SCHEMAS = " + repr(target_roots))
+ lines.append("")
+
+ for root in target_roots:
+ class_name = f"{_sanitize_identifier(root)}Validator"
+ fn_name = f"_validate_{_sanitize_identifier(root)}"
+ lines.append(f"class {class_name}:")
+ lines.append(' """Generated validator for the root schema."""')
+ lines.append("")
+ lines.append(" @staticmethod")
+ lines.append(" def validate(payload: Any) -> list[dict[str, str]]:")
+ lines.append(" errors: list[dict[str, str]] = []")
+ lines.append(f" {fn_name}(payload, '$', errors)")
+ lines.append(" return errors")
+ lines.append("")
+
+ wrapper_name = f"validate_{_sanitize_identifier(root)}"
+ lines.append(f"def {wrapper_name}(payload: Any) -> list[dict[str, str]]:")
+ lines.append(f" return {class_name}.validate(payload)")
+ lines.append("")
+
+ if not target_roots:
+ lines.append("def validate_payload(payload: Any) -> list[dict[str, str]]:")
+ lines.append(" _ = payload")
+ lines.append(" return []")
+ lines.append("")
+
+ return "\n".join(lines).rstrip() + "\n"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/scripts/validator_schema_walker.py b/sdk/agentserver/azure-ai-agentserver-responses/scripts/validator_schema_walker.py
new file mode 100644
index 000000000000..690633f46a24
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/scripts/validator_schema_walker.py
@@ -0,0 +1,183 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Schema walking helpers for validator generation."""
+
+from __future__ import annotations
+
+from typing import Any
+
+
+def resolve_ref(ref: str) -> str:
+ """Extract schema name from OpenAPI $ref values."""
+ return ref.rsplit("/", 1)[-1]
+
+
+def _iter_subschemas(schema: dict[str, Any]) -> list[dict[str, Any]]:
+ """Yield nested schema objects that may contain references."""
+ nested: list[dict[str, Any]] = []
+
+ for key in ("oneOf", "anyOf", "allOf"):
+ branches = schema.get(key, [])
+ if isinstance(branches, list):
+ nested.extend([branch for branch in branches if isinstance(branch, dict)])
+
+ properties = schema.get("properties", {})
+ if isinstance(properties, dict):
+ nested.extend([value for value in properties.values() if isinstance(value, dict)])
+
+ items = schema.get("items")
+ if isinstance(items, dict):
+ nested.append(items)
+
+ additional = schema.get("additionalProperties")
+ if isinstance(additional, dict):
+ nested.append(additional)
+
+ # Walk discriminator mapping refs so schemas like ItemMessage / FunctionTool
+ # that are only reachable via a discriminator are included in the reachable set
+ # (and therefore have the overlay applied before the emitter sees them).
+ disc = schema.get("discriminator")
+ if isinstance(disc, dict):
+ mapping = disc.get("mapping", {})
+ if isinstance(mapping, dict):
+ for ref_str in mapping.values():
+ if isinstance(ref_str, str):
+ nested.append({"$ref": ref_str})
+
+ return nested
+
+
+class SchemaWalker:
+ """Collect schemas reachable from one or more roots."""
+
+ def __init__(
+ self,
+ schemas: dict[str, dict[str, Any]],
+ overlay: dict[str, Any] | None = None,
+ ) -> None:
+ self.schemas = schemas
+ self.overlay: dict[str, Any] = overlay or {}
+ self.reachable: dict[str, dict[str, Any]] = {}
+ self._visited: set[str] = set()
+
+ def walk(self, name: str) -> None:
+ """Walk a schema by name and recursively collect reachable references."""
+ if name in self._visited:
+ return
+ self._visited.add(name)
+
+ schema = self.schemas.get(name)
+ if schema is None:
+ return
+
+ schema = self._apply_overlay(name, dict(schema))
+ self.reachable[name] = schema
+ self._walk_schema(schema)
+
+ def _apply_overlay(self, name: str, schema: dict[str, Any]) -> dict[str, Any]:
+ """Apply overlay fixes to a schema.
+
+ Supports four overlay keys per schema entry:
+ - ``required``: replace the required list entirely.
+ - ``not_required``: remove individual fields from required and mark their
+ property schemas as ``nullable`` so ``None`` is accepted.
+ - ``properties``: merge per-property constraint overrides (e.g. minimum/maximum).
+ - ``default_discriminator``: set a default discriminator value for the schema's
+ discriminator dispatch. When the discriminator property is absent from the
+ payload, this value is used instead of rejecting the input.
+ """
+ overlay_schemas = self.overlay.get("schemas", {})
+ # Try exact name first, then fall back to the name with any "Vendor." prefix stripped
+ # (e.g. "OpenAI.ItemMessage" -> "ItemMessage") to stay compatible with the overlay
+ # format shared with the TypeSpec code generator, where TypeSpec uses bare names.
+ overlay_entry = overlay_schemas.get(name)
+ if not overlay_entry:
+ bare = name.rsplit(".", 1)[-1] if "." in name else None
+ if bare:
+ overlay_entry = overlay_schemas.get(bare)
+ if not overlay_entry:
+ return schema
+
+ # Replace required list entirely
+ if "required" in overlay_entry:
+ schema["required"] = list(overlay_entry["required"])
+
+ # Remove individual fields from required; mark those properties nullable
+ if "not_required" in overlay_entry:
+ current_required = list(schema.get("required", []))
+ for field in overlay_entry["not_required"]:
+ if field in current_required:
+ current_required.remove(field)
+ # Mark property nullable so the emitter accepts None/absent values
+ props = schema.get("properties")
+ if isinstance(props, dict) and field in props:
+ props[field] = dict(props[field])
+ props[field]["nullable"] = True
+ schema["required"] = current_required
+
+ # Merge property-level constraint overrides
+ if "properties" in overlay_entry:
+ if "properties" not in schema:
+ schema["properties"] = {}
+ for prop_name, constraints in overlay_entry["properties"].items():
+ if prop_name not in schema["properties"]:
+ schema["properties"][prop_name] = {}
+ else:
+ schema["properties"][prop_name] = dict(schema["properties"][prop_name])
+ schema["properties"][prop_name].update(constraints)
+
+ # Inject default_discriminator into the schema's discriminator dict
+ if "default_discriminator" in overlay_entry:
+ disc = schema.get("discriminator")
+ if isinstance(disc, dict):
+ schema["discriminator"] = dict(disc)
+ schema["discriminator"]["defaultValue"] = overlay_entry["default_discriminator"]
+
+ return schema
+
+ def _walk_schema(self, schema: dict[str, Any]) -> None:
+ """Walk nested schema branches."""
+ ref = schema.get("$ref")
+ if isinstance(ref, str):
+ self.walk(resolve_ref(ref))
+ return
+
+ for nested in _iter_subschemas(schema):
+ self._walk_schema(nested)
+
+
+def discover_post_request_roots(spec: dict[str, Any]) -> list[str]:
+ """Discover root schema names referenced by POST request bodies."""
+ roots: list[str] = []
+ paths = spec.get("paths", {})
+
+ for _path, methods in sorted(paths.items()):
+ if not isinstance(methods, dict):
+ continue
+ post = methods.get("post")
+ if not isinstance(post, dict):
+ continue
+ request_body = post.get("requestBody", {})
+ content = request_body.get("content", {}).get("application/json", {})
+ schema = content.get("schema", {})
+
+ if isinstance(schema, dict) and isinstance(schema.get("$ref"), str):
+ roots.append(resolve_ref(schema["$ref"]))
+ continue
+
+ if isinstance(schema, dict):
+ for key in ("oneOf", "anyOf"):
+ branches = schema.get(key, [])
+ if not isinstance(branches, list):
+ continue
+ for branch in branches:
+ if isinstance(branch, dict) and isinstance(branch.get("$ref"), str):
+ roots.append(resolve_ref(branch["$ref"]))
+
+ deduped: list[str] = []
+ seen: set[str] = set()
+ for root in roots:
+ if root not in seen:
+ seen.add(root)
+ deduped.append(root)
+ return deduped
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/__init__.py
new file mode 100644
index 000000000000..9a0454564dbb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/_helpers/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/_helpers/__init__.py
new file mode 100644
index 000000000000..0efcce424aec
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/_helpers/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Shared testing helpers for deterministic synchronization and diagnostics."""
+
+from .synchronization import EventGate, format_async_failure, poll_until
+
+__all__ = ["poll_until", "EventGate", "format_async_failure"]
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/_helpers/synchronization.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/_helpers/synchronization.py
new file mode 100644
index 000000000000..240c4a1cc202
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/_helpers/synchronization.py
@@ -0,0 +1,72 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Deterministic synchronization helpers used by contract and integration tests."""
+
+from __future__ import annotations
+
+import threading
+import time
+from typing import Any, Callable, Mapping
+
+ContextProvider = Callable[[], Mapping[str, Any] | None]
+
+
+def format_async_failure(
+ *,
+ label: str,
+ timeout_s: float,
+ elapsed_s: float,
+ context: Mapping[str, Any] | None,
+) -> str:
+ """Build a stable, diagnostics-rich timeout failure message."""
+ context_payload = dict(context or {})
+ return f"{label} timed out after {elapsed_s:.3f}s (budget={timeout_s:.3f}s); diagnostics={context_payload}"
+
+
+def poll_until(
+ condition: Callable[[], bool],
+ *,
+ timeout_s: float,
+ interval_s: float = 0.05,
+ context_provider: ContextProvider | None = None,
+ label: str = "poll_until condition",
+) -> tuple[bool, str | None]:
+ """Poll a condition until true or timeout; always returns diagnostic details on timeout."""
+ start = time.monotonic()
+ deadline = start + timeout_s
+ last_context: Mapping[str, Any] | None = None
+
+ while time.monotonic() < deadline:
+ if condition():
+ return True, None
+ if context_provider is not None:
+ maybe_context = context_provider()
+ if maybe_context is not None:
+ last_context = maybe_context
+ time.sleep(interval_s)
+
+ elapsed = time.monotonic() - start
+ return False, format_async_failure(
+ label=label,
+ timeout_s=timeout_s,
+ elapsed_s=elapsed,
+ context=last_context,
+ )
+
+
+class EventGate:
+ """Thread-safe event gate for deterministic synchronization in tests."""
+
+ __slots__ = ("_event", "_payload")
+
+ def __init__(self) -> None:
+ self._event = threading.Event()
+ self._payload: Any = None
+
+ def signal(self, payload: Any = None) -> None:
+ self._payload = payload
+ self._event.set()
+
+ def wait(self, *, timeout_s: float) -> tuple[bool, Any]:
+ ok = self._event.wait(timeout_s)
+ return ok, self._payload
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/conftest.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/conftest.py
new file mode 100644
index 000000000000..9d834c339b88
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/conftest.py
@@ -0,0 +1,11 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Root conftest — ensures the project root is on sys.path so that
+``from tests._helpers import …`` works regardless of how pytest is invoked."""
+
+import sys
+from pathlib import Path
+
+_PROJECT_ROOT = str(Path(__file__).resolve().parent.parent)
+if _PROJECT_ROOT not in sys.path:
+ sys.path.insert(0, _PROJECT_ROOT)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/__init__.py
new file mode 100644
index 000000000000..9a0454564dbb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_agent_reference_auto_stamp.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_agent_reference_auto_stamp.py
new file mode 100644
index 000000000000..b8691747e652
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_agent_reference_auto_stamp.py
@@ -0,0 +1,288 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for auto-stamping ``agent_reference`` on output items (US3).
+
+Validates that ``agent_reference`` from the create request propagates to the
+response object and all output items, with handler-set values taking precedence.
+
+Python port of AgentReferenceAutoStampProtocolTests.
+"""
+
+from __future__ import annotations
+
+import json as _json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ return events
+
+
+def _handler_with_output(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits a single message output item using the builder."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ text = msg.add_text_content()
+ yield text.emit_added()
+ yield text.emit_delta("Hello")
+ yield text.emit_done()
+ yield msg.emit_content_done(text)
+ yield msg.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _handler_with_handler_set_agent_ref(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that sets a custom agent_reference on the output item directly."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ # Use the builder then override agent_reference on the emitted event
+ msg = stream.add_output_item_message()
+ added_event = msg.emit_added()
+ added_event["item"]["agent_reference"] = {
+ "type": "agent_reference",
+ "name": "handler-agent",
+ "version": "9.0",
+ }
+ yield added_event
+
+ done_event = msg.emit_done()
+ done_event["item"]["agent_reference"] = {
+ "type": "agent_reference",
+ "name": "handler-agent",
+ "version": "9.0",
+ }
+ yield done_event
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _direct_yield_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that directly yields events without using builder.
+
+ Does NOT set agent_reference on output items. Layer 2 must stamp it.
+ """
+
+ async def _events():
+ # Use builder for response.created
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ item_id = f"caitem_{context.response_id[7:25]}directyield00000000000000000001"
+ yield {
+ "type": "response.output_item.added",
+ "item": {
+ "id": item_id,
+ "type": "message",
+ "role": "assistant",
+ "status": "in_progress",
+ "content": [],
+ # agent_reference intentionally NOT set
+ },
+ "output_index": 0,
+ }
+ yield {
+ "type": "response.output_item.done",
+ "item": {
+ "id": item_id,
+ "type": "message",
+ "role": "assistant",
+ "status": "completed",
+ "content": [],
+ },
+ "output_index": 0,
+ }
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _build_client(handler: Any) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return TestClient(app)
+
+
+# ════════════════════════════════════════════════════════════
+# T024: agent_reference on CreateResponse appears on response
+# ════════════════════════════════════════════════════════════
+
+
+def test_agent_reference_appears_on_response() -> None:
+ """T024 — agent_reference from request propagates to response.created."""
+ client = _build_client(_handler_with_output)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "stream": True,
+ "agent_reference": {"type": "agent_reference", "name": "my-agent", "version": "1.0"},
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ created_event = next(e for e in events if e["type"] == "response.created")
+ response_obj = created_event["data"]["response"]
+ agent_ref = response_obj["agent_reference"]
+ assert agent_ref["name"] == "my-agent"
+ assert agent_ref["version"] == "1.0"
+
+
+# ════════════════════════════════════════════════════════════
+# T025: agent_reference propagates to output items
+# ════════════════════════════════════════════════════════════
+
+
+def test_agent_reference_propagates_to_output_items() -> None:
+ """T025 — agent_reference from request propagates to all output items."""
+ client = _build_client(_handler_with_output)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "stream": True,
+ "agent_reference": {"type": "agent_reference", "name": "my-agent", "version": "1.0"},
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ item_events = [e for e in events if e["type"] in ("response.output_item.added", "response.output_item.done")]
+ assert item_events, "Expected at least one output item event"
+
+ for evt in item_events:
+ item = evt["data"]["item"]
+ agent_ref = item.get("agent_reference")
+ assert agent_ref is not None, f"agent_reference missing on {evt['type']}"
+ assert agent_ref["name"] == "my-agent"
+ assert agent_ref["version"] == "1.0"
+
+
+# ════════════════════════════════════════════════════════════
+# T026: Handler-set agent_reference takes precedence
+# ════════════════════════════════════════════════════════════
+
+
+def test_handler_set_agent_reference_is_preserved() -> None:
+ """T026 — handler-set agent_reference takes precedence over request agent_reference."""
+ client = _build_client(_handler_with_handler_set_agent_ref)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "stream": True,
+ "agent_reference": {"type": "agent_reference", "name": "request-agent", "version": "1.0"},
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ item_added = next(e for e in events if e["type"] == "response.output_item.added")
+ item = item_added["data"]["item"]
+ agent_ref = item.get("agent_reference")
+ assert agent_ref is not None
+ assert agent_ref["name"] == "handler-agent", f"Expected handler-agent to take precedence, got {agent_ref['name']}"
+
+
+# ════════════════════════════════════════════════════════════
+# T027: No agent_reference on request → no agent_reference on items
+# ════════════════════════════════════════════════════════════
+
+
+def test_no_agent_reference_on_request_no_agent_reference_on_items() -> None:
+ """T027 — without agent_reference on request, output items should not have one."""
+ client = _build_client(_handler_with_output)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "stream": True},
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ item_events = [e for e in events if e["type"] in ("response.output_item.added", "response.output_item.done")]
+ assert item_events, "Expected at least one output item event"
+
+ for evt in item_events:
+ item = evt["data"]["item"]
+ agent_ref = item.get("agent_reference")
+ # agent_reference should be absent or null when request has none
+ assert agent_ref is None or agent_ref == {}, (
+ f"Output item should not have agent_reference when request has none, got: {agent_ref}"
+ )
+
+
+# ════════════════════════════════════════════════════════════
+# T028: Direct-yield handler gets agent_reference auto-stamped (Layer 2)
+# ════════════════════════════════════════════════════════════
+
+
+def test_direct_yield_handler_gets_agent_reference_auto_stamped() -> None:
+ """T028 — Layer 2 auto-stamps agent_reference on items from direct-yield handlers."""
+ client = _build_client(_direct_yield_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "stream": True,
+ "agent_reference": {"type": "agent_reference", "name": "direct-agent", "version": "2.0"},
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ item_added = next(e for e in events if e["type"] == "response.output_item.added")
+ item = item_added["data"]["item"]
+ agent_ref = item.get("agent_reference")
+ assert agent_ref is not None, "agent_reference should be auto-stamped by Layer 2"
+ assert agent_ref["name"] == "direct-agent"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_bg_stream_disconnect.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_bg_stream_disconnect.py
new file mode 100644
index 000000000000..8a6ab65e9990
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_bg_stream_disconnect.py
@@ -0,0 +1,443 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for background streaming handler surviving disconnect (US3).
+
+Verifies FR-012 (handler continues after SSE disconnect for bg+stream),
+FR-013 (SSE write failure does NOT cancel handler CT).
+
+Python port of BgStreamDisconnectTests.
+
+NOTE: These tests use the async ASGI client with a cancellation-aware SSE reader
+to simulate client disconnect behavior.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json as _json
+from typing import Any
+
+import pytest
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Async ASGI client with disconnect capability
+# ════════════════════════════════════════════════════════════
+
+
+class _AsgiResponse:
+ def __init__(self, status_code: int, body: bytes, headers: list[tuple[bytes, bytes]]) -> None:
+ self.status_code = status_code
+ self.body = body
+ self.headers = headers
+
+ def json(self) -> Any:
+ return _json.loads(self.body)
+
+
+class _AsyncAsgiClient:
+ def __init__(self, app: Any) -> None:
+ self._app = app
+
+ @staticmethod
+ def _build_scope(method: str, path: str, body: bytes) -> dict[str, Any]:
+ headers: list[tuple[bytes, bytes]] = []
+ query_string = b""
+ if "?" in path:
+ path, qs = path.split("?", 1)
+ query_string = qs.encode()
+ if body:
+ headers = [
+ (b"content-type", b"application/json"),
+ (b"content-length", str(len(body)).encode()),
+ ]
+ return {
+ "type": "http",
+ "asgi": {"version": "3.0"},
+ "http_version": "1.1",
+ "method": method,
+ "headers": headers,
+ "scheme": "http",
+ "path": path,
+ "raw_path": path.encode(),
+ "query_string": query_string,
+ "server": ("localhost", 80),
+ "client": ("127.0.0.1", 123),
+ "root_path": "",
+ }
+
+ async def request(self, method: str, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ body = _json.dumps(json_body).encode() if json_body else b""
+ scope = self._build_scope(method, path, body)
+ status_code: int | None = None
+ response_headers: list[tuple[bytes, bytes]] = []
+ body_parts: list[bytes] = []
+ request_sent = False
+ response_done = asyncio.Event()
+
+ async def receive() -> dict[str, Any]:
+ nonlocal request_sent
+ if not request_sent:
+ request_sent = True
+ return {"type": "http.request", "body": body, "more_body": False}
+ await response_done.wait()
+ return {"type": "http.disconnect"}
+
+ async def send(message: dict[str, Any]) -> None:
+ nonlocal status_code, response_headers
+ if message["type"] == "http.response.start":
+ status_code = message["status"]
+ response_headers = message.get("headers", [])
+ elif message["type"] == "http.response.body":
+ chunk = message.get("body", b"")
+ if chunk:
+ body_parts.append(chunk)
+ if not message.get("more_body", False):
+ response_done.set()
+
+ await self._app(scope, receive, send)
+ assert status_code is not None
+ return _AsgiResponse(status_code=status_code, body=b"".join(body_parts), headers=response_headers)
+
+ async def request_with_disconnect(
+ self,
+ method: str,
+ path: str,
+ *,
+ json_body: dict[str, Any] | None = None,
+ disconnect_event: asyncio.Event | None = None,
+ ) -> _AsgiResponse:
+ """Send a request and disconnect mid-stream when disconnect_event is set."""
+ body = _json.dumps(json_body).encode() if json_body else b""
+ scope = self._build_scope(method, path, body)
+ status_code: int | None = None
+ response_headers: list[tuple[bytes, bytes]] = []
+ body_parts: list[bytes] = []
+ request_sent = False
+ response_done = asyncio.Event()
+
+ async def receive() -> dict[str, Any]:
+ nonlocal request_sent
+ if not request_sent:
+ request_sent = True
+ return {"type": "http.request", "body": body, "more_body": False}
+ if disconnect_event is not None:
+ await disconnect_event.wait()
+ return {"type": "http.disconnect"}
+ await response_done.wait()
+ return {"type": "http.disconnect"}
+
+ async def send(message: dict[str, Any]) -> None:
+ nonlocal status_code, response_headers
+ if message["type"] == "http.response.start":
+ status_code = message["status"]
+ response_headers = message.get("headers", [])
+ elif message["type"] == "http.response.body":
+ chunk = message.get("body", b"")
+ if chunk:
+ body_parts.append(chunk)
+ if not message.get("more_body", False):
+ response_done.set()
+
+ await self._app(scope, receive, send)
+ assert status_code is not None
+ return _AsgiResponse(status_code=status_code, body=b"".join(body_parts), headers=response_headers)
+
+ async def get(self, path: str) -> _AsgiResponse:
+ return await self.request("GET", path)
+
+ async def post(self, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ return await self.request("POST", path, json_body=json_body)
+
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _build_client(handler: Any) -> _AsyncAsgiClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return _AsyncAsgiClient(app)
+
+
+async def _ensure_task_done(task: asyncio.Task[Any], handler: Any, timeout: float = 5.0) -> None:
+ for attr in vars(handler):
+ obj = getattr(handler, attr, None)
+ if isinstance(obj, asyncio.Event):
+ obj.set()
+ if not task.done():
+ try:
+ await asyncio.wait_for(task, timeout=timeout)
+ except (asyncio.TimeoutError, Exception):
+ task.cancel()
+ try:
+ await task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+
+async def _wait_for_background_completion(client: _AsyncAsgiClient, response_id: str, timeout: float = 5.0) -> None:
+ for _ in range(int(timeout / 0.05)):
+ resp = await client.get(f"/responses/{response_id}")
+ if resp.status_code == 200:
+ doc = resp.json()
+ if doc.get("status") in ("completed", "failed", "incomplete", "cancelled"):
+ return
+ await asyncio.sleep(0.05)
+ raise TimeoutError(f"Response {response_id} did not reach terminal state within {timeout}s")
+
+
+def _make_multi_output_handler(total_outputs: int, signal_after: int):
+ """Handler that produces N output items, signals for disconnect after M items."""
+ ready_for_disconnect = asyncio.Event()
+ handler_completed = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+
+ for i in range(total_outputs):
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ text = msg.add_text_content()
+ yield text.emit_added()
+ yield text.emit_delta(f"Item-{i}")
+ yield text.emit_done()
+ yield msg.emit_content_done(text)
+ yield msg.emit_done()
+
+ if i == signal_after - 1:
+ ready_for_disconnect.set()
+ # Give client time to disconnect
+ await asyncio.sleep(0.3)
+
+ yield stream.emit_completed()
+ handler_completed.set()
+
+ return _events()
+
+ handler.ready_for_disconnect = ready_for_disconnect
+ handler.handler_completed = handler_completed
+ return handler
+
+
+def _make_cancellation_tracking_handler():
+ """Handler that tracks whether it was cancelled vs completed."""
+ ready_for_disconnect = asyncio.Event()
+ handler_cancelled = asyncio.Event()
+ handler_completed = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ ready_for_disconnect.set()
+
+ # Wait without checking cancellation_signal (simulates work)
+ await asyncio.sleep(0.5)
+
+ if cancellation_signal.is_set():
+ handler_cancelled.set()
+ return
+
+ yield stream.emit_completed()
+ handler_completed.set()
+
+ return _events()
+
+ handler.ready_for_disconnect = ready_for_disconnect
+ handler.handler_cancelled = handler_cancelled
+ handler.handler_completed = handler_completed
+ return handler
+
+
+def _make_slow_completing_handler():
+ """Handler that takes a moment to complete (for bg+nostream regression test)."""
+ handler_completed = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ await asyncio.sleep(0.2)
+ yield stream.emit_completed()
+ handler_completed.set()
+
+ return _events()
+
+ handler.handler_completed = handler_completed
+ return handler
+
+
+# ════════════════════════════════════════════════════════════
+# T036: bg+stream — client disconnects after 3 events,
+# handler produces 10 total → GET returns completed with all output
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_bg_stream_client_disconnects_handler_completes_all_events() -> None:
+ """T036/FR-012 — bg+stream: handler continues after client disconnect.
+
+ Handler produces 10 output items, client disconnects after 3.
+ GET after handler completes should return completed with all items.
+ """
+ total = 10
+ disconnect_after = 3
+ handler = _make_multi_output_handler(total, disconnect_after)
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ disconnect = asyncio.Event()
+ post_task = asyncio.create_task(
+ client.request_with_disconnect(
+ "POST",
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ "stream": True,
+ },
+ disconnect_event=disconnect,
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.ready_for_disconnect.wait(), timeout=5.0)
+
+ # Disconnect the SSE client
+ disconnect.set()
+ try:
+ await asyncio.wait_for(post_task, timeout=2.0)
+ except (asyncio.TimeoutError, Exception):
+ pass
+
+ # Wait for handler to complete all events
+ await asyncio.wait_for(handler.handler_completed.wait(), timeout=5.0)
+
+ # Wait for orchestrator to persist terminal status
+ await _wait_for_background_completion(client, response_id)
+
+ # GET should return completed
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ doc = get_resp.json()
+ assert doc["status"] == "completed", (
+ f"FR-012: bg+stream handler should complete after disconnect, got status '{doc['status']}'"
+ )
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+
+# ════════════════════════════════════════════════════════════
+# T037: bg+stream — SSE write failure does NOT cancel handler CT
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_bg_stream_sse_write_failure_does_not_cancel_handler_ct() -> None:
+ """T037/FR-013 — bg+stream: SSE write failure does not trigger handler cancellation.
+
+ After client disconnect, the handler should complete normally,
+ not be cancelled by the SSE write failure.
+ """
+ handler = _make_cancellation_tracking_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ disconnect = asyncio.Event()
+ post_task = asyncio.create_task(
+ client.request_with_disconnect(
+ "POST",
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ "stream": True,
+ },
+ disconnect_event=disconnect,
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.ready_for_disconnect.wait(), timeout=5.0)
+
+ # Disconnect
+ disconnect.set()
+ try:
+ await asyncio.wait_for(post_task, timeout=2.0)
+ except (asyncio.TimeoutError, Exception):
+ pass
+
+ # Wait for handler outcome
+ done_events = [handler.handler_completed, handler.handler_cancelled]
+ done, _ = await asyncio.wait(
+ [asyncio.create_task(e.wait()) for e in done_events],
+ timeout=3.0,
+ return_when=asyncio.FIRST_COMPLETED,
+ )
+
+ # Handler should have COMPLETED, not been CANCELLED
+ assert handler.handler_completed.is_set(), (
+ "FR-013: Handler should complete normally, not be cancelled by SSE disconnect"
+ )
+ assert not handler.handler_cancelled.is_set(), (
+ "FR-013: Handler CT should NOT have been cancelled by SSE disconnect"
+ )
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+
+# ════════════════════════════════════════════════════════════
+# T038: bg+nostream — handler continues after disconnect (regression)
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_bg_nostream_handler_continues_after_disconnect() -> None:
+ """T038 — bg+nostream: handler continues to completion even after client disconnect.
+
+ POST bg+nostream returns immediately. Handler is still running.
+ After handler completes, GET returns completed.
+ """
+ handler = _make_slow_completing_handler()
+ client = _build_client(handler)
+
+ # POST bg+nostream returns immediately
+ post_resp = await client.post(
+ "/responses",
+ json_body={
+ "model": "test",
+ "background": True,
+ },
+ )
+ assert post_resp.status_code == 200
+ response_id = post_resp.json()["id"]
+
+ # Handler hasn't completed yet
+ assert not handler.handler_completed.is_set()
+
+ # Wait for handler to complete
+ await asyncio.wait_for(handler.handler_completed.wait(), timeout=5.0)
+
+ # Wait for orchestrator to persist terminal status
+ await _wait_for_background_completion(client, response_id)
+
+ # GET should return completed
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cancel_consistency.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cancel_consistency.py
new file mode 100644
index 000000000000..e02f838002f2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cancel_consistency.py
@@ -0,0 +1,277 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for cancel consistency (US6, FR-014, FR-015).
+
+Verifies FR-014 (SetCancelled applied exactly once) and
+FR-015 (persisted state matches returned state on cancel).
+
+Python port of CancelConsistencyTests.
+
+NOTE: These tests require concurrent access during active handlers, so they use
+the async ASGI client pattern.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json as _json
+from typing import Any
+
+import pytest
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Async ASGI client
+# ════════════════════════════════════════════════════════════
+
+
+class _AsgiResponse:
+ def __init__(self, status_code: int, body: bytes, headers: list[tuple[bytes, bytes]]) -> None:
+ self.status_code = status_code
+ self.body = body
+ self.headers = headers
+
+ def json(self) -> Any:
+ return _json.loads(self.body)
+
+
+class _AsyncAsgiClient:
+ def __init__(self, app: Any) -> None:
+ self._app = app
+
+ @staticmethod
+ def _build_scope(method: str, path: str, body: bytes) -> dict[str, Any]:
+ headers: list[tuple[bytes, bytes]] = []
+ query_string = b""
+ if "?" in path:
+ path, qs = path.split("?", 1)
+ query_string = qs.encode()
+ if body:
+ headers = [
+ (b"content-type", b"application/json"),
+ (b"content-length", str(len(body)).encode()),
+ ]
+ return {
+ "type": "http",
+ "asgi": {"version": "3.0"},
+ "http_version": "1.1",
+ "method": method,
+ "headers": headers,
+ "scheme": "http",
+ "path": path,
+ "raw_path": path.encode(),
+ "query_string": query_string,
+ "server": ("localhost", 80),
+ "client": ("127.0.0.1", 123),
+ "root_path": "",
+ }
+
+ async def request(self, method: str, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ body = _json.dumps(json_body).encode() if json_body else b""
+ scope = self._build_scope(method, path, body)
+ status_code: int | None = None
+ response_headers: list[tuple[bytes, bytes]] = []
+ body_parts: list[bytes] = []
+ request_sent = False
+ response_done = asyncio.Event()
+
+ async def receive() -> dict[str, Any]:
+ nonlocal request_sent
+ if not request_sent:
+ request_sent = True
+ return {"type": "http.request", "body": body, "more_body": False}
+ await response_done.wait()
+ return {"type": "http.disconnect"}
+
+ async def send(message: dict[str, Any]) -> None:
+ nonlocal status_code, response_headers
+ if message["type"] == "http.response.start":
+ status_code = message["status"]
+ response_headers = message.get("headers", [])
+ elif message["type"] == "http.response.body":
+ chunk = message.get("body", b"")
+ if chunk:
+ body_parts.append(chunk)
+ if not message.get("more_body", False):
+ response_done.set()
+
+ await self._app(scope, receive, send)
+ assert status_code is not None
+ return _AsgiResponse(status_code=status_code, body=b"".join(body_parts), headers=response_headers)
+
+ async def get(self, path: str) -> _AsgiResponse:
+ return await self.request("GET", path)
+
+ async def post(self, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ return await self.request("POST", path, json_body=json_body)
+
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _build_client(handler: Any) -> _AsyncAsgiClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return _AsyncAsgiClient(app)
+
+
+async def _ensure_task_done(task: asyncio.Task[Any], handler: Any, timeout: float = 5.0) -> None:
+ for attr in vars(handler):
+ obj = getattr(handler, attr, None)
+ if isinstance(obj, asyncio.Event):
+ obj.set()
+ if not task.done():
+ try:
+ await asyncio.wait_for(task, timeout=timeout)
+ except (asyncio.TimeoutError, Exception):
+ task.cancel()
+ try:
+ await task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+
+def _make_cancellable_bg_handler():
+ """Handler that emits created+in_progress, then blocks until cancelled or released."""
+ started = asyncio.Event()
+ release = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ started.set()
+ while not release.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.started = started
+ handler.release = release
+ return handler
+
+
+# ════════════════════════════════════════════════════════════
+# T055: Cancel bg response — persisted state = returned state
+# (0 output items, status: cancelled)
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_cancel_bg_response_persisted_state_matches_returned_state() -> None:
+ """T055 — cancel bg response: persisted state matches returned cancel snapshot.
+
+ FR-015: The cancel endpoint return value must match the persisted state.
+ """
+ handler = _make_cancellable_bg_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # Cancel the response
+ cancel_resp = await client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200 # FR-015
+
+ cancel_doc = cancel_resp.json()
+ returned_status = cancel_doc["status"]
+ returned_output = cancel_doc["output"]
+ assert returned_status == "cancelled"
+ assert returned_output == []
+
+ # Let handler exit
+ handler.release.set()
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # Allow cleanup
+ await asyncio.sleep(0.2)
+
+ # Verify persisted state matches returned state
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ persisted = get_resp.json()
+ assert persisted["status"] == "cancelled", (
+ f"Persisted status should match cancel return: expected 'cancelled', got '{persisted['status']}'"
+ )
+ assert persisted["output"] == [], (
+ f"Persisted output should match cancel return: expected [], got {persisted['output']}"
+ )
+
+
+# ════════════════════════════════════════════════════════════
+# T056: Cancel bg+stream response — persisted state matches cancel return
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_cancel_bg_stream_response_persisted_state_matches() -> None:
+ """T056 — cancel bg+stream: persisted state matches cancel endpoint return value.
+
+ FR-014: SetCancelled applied exactly once.
+ FR-015: Persisted state = returned state.
+ """
+ handler = _make_cancellable_bg_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ "stream": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+ await asyncio.sleep(0.1) # Let bg task process response.created
+
+ # Cancel the response
+ cancel_resp = await client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+
+ cancel_doc = cancel_resp.json()
+ assert cancel_doc["status"] == "cancelled"
+
+ # Let handler exit
+ handler.release.set()
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # Allow cleanup
+ await asyncio.sleep(0.2)
+
+ # Verify persisted state matches
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ persisted = get_resp.json()
+ assert persisted["status"] == "cancelled", f"Persisted status should be 'cancelled', got '{persisted['status']}'"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cancel_endpoint.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cancel_endpoint.py
new file mode 100644
index 000000000000..7b67e3f8087d
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cancel_endpoint.py
@@ -0,0 +1,623 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for POST /responses/{response_id}/cancel behavior."""
+
+from __future__ import annotations
+
+import asyncio
+import threading
+from typing import Any
+
+import pytest
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from tests._helpers import EventGate, poll_until
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire the hosting surface in contract tests."""
+
+ async def _events():
+ if False: # pragma: no cover - required to keep async-generator shape.
+ yield None
+
+ return _events()
+
+
+def _delayed_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that keeps background execution cancellable for a short period."""
+
+ async def _events():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.25)
+ if cancellation_signal.is_set():
+ return
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+def _cancellable_bg_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then blocks until cancelled.
+
+ Phase 3: response_created_signal is set on the first event, so run_background
+ returns quickly with in_progress status while the task waits for cancellation.
+ """
+
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {
+ "status": "in_progress",
+ "output": [],
+ },
+ }
+ # Block until cancellation signal is set
+ while not cancellation_signal.is_set():
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+
+def _raising_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that raises to transition a background response into failed."""
+
+ async def _events():
+ raise RuntimeError("simulated handler failure")
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+def _unknown_cancellation_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that raises an unknown cancellation exception source."""
+
+ async def _events():
+ raise asyncio.CancelledError("unknown cancellation source")
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+def _incomplete_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits an explicit incomplete terminal response event."""
+
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {
+ "status": "queued",
+ "output": [],
+ },
+ }
+ yield {
+ "type": "response.in_progress",
+ "response": {
+ "status": "in_progress",
+ "output": [],
+ },
+ }
+ yield {
+ "type": "response.incomplete",
+ "response": {
+ "status": "incomplete",
+ "output": [],
+ },
+ }
+
+ return _events()
+
+
+def _make_blocking_sync_response_handler(started_gate: EventGate, release_gate: threading.Event):
+ """Factory for a handler that holds a sync request in-flight for deterministic concurrent cancel checks."""
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ started_gate.signal(True)
+ while not release_gate.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+ return handler
+
+
+def _build_client(handler: Any | None = None) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler or _noop_response_handler)
+ return TestClient(app)
+
+
+def _create_background_response(client: TestClient, *, response_id: str | None = None) -> str:
+ payload: dict[str, Any] = {
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ }
+ if response_id is not None:
+ payload["response_id"] = response_id
+
+ create_response = client.post("/responses", json=payload)
+ assert create_response.status_code == 200
+ created_id = create_response.json().get("id")
+ assert isinstance(created_id, str)
+ return created_id
+
+
+def _wait_for_status(client: TestClient, response_id: str, expected_status: str, *, timeout_s: float = 5.0) -> None:
+ latest_status: str | None = None
+
+ def _has_expected_status() -> bool:
+ nonlocal latest_status
+ get_response = client.get(f"/responses/{response_id}")
+ if get_response.status_code != 200:
+ return False
+ latest_status = get_response.json().get("status")
+ return latest_status == expected_status
+
+ ok, failure = poll_until(
+ _has_expected_status,
+ timeout_s=timeout_s,
+ interval_s=0.05,
+ context_provider=lambda: {"response_id": response_id, "last_status": latest_status},
+ label=f"wait for status={expected_status}",
+ )
+ assert ok, failure
+
+
+def _assert_error(
+ response: Any,
+ *,
+ expected_status: int,
+ expected_type: str,
+ expected_message: str | None = None,
+) -> None:
+ assert response.status_code == expected_status
+ payload = response.json()
+ assert isinstance(payload.get("error"), dict)
+ assert payload["error"].get("type") == expected_type
+ if expected_message is not None:
+ assert payload["error"].get("message") == expected_message
+
+
+def test_cancel__cancels_background_response_and_clears_output() -> None:
+ client = _build_client(_cancellable_bg_response_handler)
+
+ response_id = _create_background_response(client)
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+ payload = cancel_response.json()
+ assert payload.get("status") == "cancelled"
+ assert payload.get("output") == []
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ snapshot = get_response.json()
+ assert snapshot.get("status") == "cancelled"
+ assert snapshot.get("output") == []
+
+
+def test_cancel__is_idempotent_for_already_cancelled_response() -> None:
+ client = _build_client(_cancellable_bg_response_handler)
+
+ response_id = _create_background_response(client)
+
+ first_cancel = client.post(f"/responses/{response_id}/cancel")
+ assert first_cancel.status_code == 200
+ assert first_cancel.json().get("status") == "cancelled"
+ assert first_cancel.json().get("output") == []
+
+ second_cancel = client.post(f"/responses/{response_id}/cancel")
+ assert second_cancel.status_code == 200
+ assert second_cancel.json().get("status") == "cancelled"
+ assert second_cancel.json().get("output") == []
+
+
+def test_cancel__returns_400_for_completed_background_response() -> None:
+ client = _build_client()
+ response_id = _create_background_response(client)
+ _wait_for_status(client, response_id, "completed")
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ _assert_error(
+ cancel_response,
+ expected_status=400,
+ expected_type="invalid_request_error",
+ expected_message="Cannot cancel a completed response.",
+ )
+
+
+def test_cancel__returns_failed_for_immediate_handler_failure() -> None:
+ """Background POST waits for response.created; when the handler fails
+ before emitting it, the POST returns 200 with status=failed.
+ """
+
+ def _raising_before_events(req: Any, ctx: Any, sig: Any):
+ async def _ev():
+ raise RuntimeError("simulated handler failure")
+ if False: # pragma: no cover
+ yield None
+
+ return _ev()
+
+ client = _build_client(_raising_before_events)
+ create_response = client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": False, "store": True, "background": True},
+ )
+ # Background POST returns 200 — failure reflected in status, not HTTP code
+ assert create_response.status_code == 200
+ payload = create_response.json()
+ assert payload.get("status") == "failed"
+
+
+@pytest.mark.skip(reason="S-024: Starlette TestClient teardown sends spurious CancelledErrors")
+def test_cancel__unknown_cancellation_exception_is_treated_as_failed() -> None:
+ """S-024: An unknown CancelledError (not from cancel signal) should be
+ treated as a handler error, transitioning the response to failed."""
+ client = _build_client(_unknown_cancellation_response_handler)
+ response_id = _create_background_response(client)
+ _wait_for_status(client, response_id, "failed")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ assert get_response.json().get("status") == "failed"
+
+
+def test_cancel__stream_disconnect_sets_handler_cancellation_signal() -> None:
+ pytest.skip(
+ "Requires a real ASGI disconnect harness; Starlette TestClient does not deterministically surface"
+ " client-disconnect cancellation signals to the handler."
+ )
+
+
+def test_cancel__background_stream_disconnect_does_not_cancel_handler() -> None:
+ pytest.skip(
+ "Requires a real ASGI disconnect harness to verify that background execution is immune to"
+ " stream client disconnect per S-026."
+ )
+
+
+def test_cancel__returns_400_for_incomplete_background_response() -> None:
+ client = _build_client(_incomplete_response_handler)
+ response_id = _create_background_response(client)
+ _wait_for_status(client, response_id, "incomplete")
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ _assert_error(
+ cancel_response,
+ expected_status=400,
+ expected_type="invalid_request_error",
+ )
+
+
+def test_cancel__returns_400_for_synchronous_response() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ _assert_error(
+ cancel_response,
+ expected_status=400,
+ expected_type="invalid_request_error",
+ expected_message="Cannot cancel a synchronous response.",
+ )
+
+
+def test_cancel__returns_404_for_in_flight_synchronous_response() -> None:
+ started_gate = EventGate()
+ release_gate = threading.Event()
+ client = _build_client(_make_blocking_sync_response_handler(started_gate, release_gate))
+ response_id = IdGenerator.new_response_id()
+
+ create_result: dict[str, Any] = {}
+
+ def _issue_sync_create() -> None:
+ try:
+ create_result["response"] = client.post(
+ "/responses",
+ json={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ except Exception as exc: # pragma: no cover - surfaced by assertions below.
+ create_result["error"] = exc
+
+ create_thread = threading.Thread(target=_issue_sync_create, daemon=True)
+ create_thread.start()
+
+ started, _ = started_gate.wait(timeout_s=2.0)
+ assert started, "Expected sync create request to enter handler before cancel call"
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ _assert_error(
+ cancel_response,
+ expected_status=404,
+ expected_type="invalid_request_error",
+ )
+
+ release_gate.set()
+ create_thread.join(timeout=2.0)
+ assert not create_thread.is_alive(), "Expected in-flight sync request to finish after release"
+
+ thread_error = create_result.get("error")
+ assert thread_error is None, str(thread_error)
+ create_response = create_result.get("response")
+ assert create_response is not None
+ assert create_response.status_code == 200
+
+
+def test_cancel__returns_404_for_unknown_response_id() -> None:
+ client = _build_client()
+
+ cancel_response = client.post("/responses/resp_does_not_exist/cancel")
+ _assert_error(
+ cancel_response,
+ expected_status=404,
+ expected_type="invalid_request_error",
+ )
+
+
+# ══════════════════════════════════════════════════════════
+# B11: Cancel from queued / early in_progress state
+# ══════════════════════════════════════════════════════════
+
+
+def test_cancel__from_queued_or_early_in_progress_succeeds() -> None:
+ """B11 — Cancel issued immediately after creation (queued/early in_progress) returns HTTP 200,
+ status=cancelled, and output=[]."""
+ client = _build_client(_cancellable_bg_response_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ # Cancel immediately — response is likely queued or very early in_progress.
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+
+ _wait_for_status(client, response_id, "cancelled")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ assert payload["status"] == "cancelled"
+ assert payload.get("output") == [], f"output must be cleared for a cancelled response, got: {payload.get('output')}"
+
+
+# ══════════════════════════════════════════════════════════
+# B11 winddown: cancel timeout forces termination
+# ══════════════════════════════════════════════════════════
+
+
+def _stubborn_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that ignores the cancellation signal entirely."""
+
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {
+ "status": "in_progress",
+ "output": [],
+ },
+ }
+ # Block forever, ignoring the cancellation signal
+ while True:
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+
+def test_cancel__winddown_forces_termination_when_handler_ignores_signal() -> None:
+ """B11 winddown — if the handler ignores the cancellation signal, the
+ background task still reaches a terminal state within the winddown timeout."""
+ import azure.ai.agentserver.responses.hosting._orchestrator as _orch
+
+ original = _orch._CANCEL_WINDDOWN_TIMEOUT
+ _orch._CANCEL_WINDDOWN_TIMEOUT = 0.5 # shorten for test speed
+ try:
+ client = _build_client(_stubborn_handler)
+ response_id = _create_background_response(client)
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+
+ # The background task should terminate within the winddown timeout
+ _wait_for_status(client, response_id, "cancelled", timeout_s=5.0)
+ finally:
+ _orch._CANCEL_WINDDOWN_TIMEOUT = original
+
+
+# ══════════════════════════════════════════════════════════
+# B12: Cancel fallback for stored terminal responses after restart
+# ══════════════════════════════════════════════════════════
+
+
+def test_cancel__provider_fallback_returns_400_for_completed_after_restart() -> None:
+ """B12 — cancel on a completed response whose runtime record was lost
+ (simulated restart) returns 400 instead of 404."""
+ from azure.ai.agentserver.responses.store._memory import InMemoryResponseProvider
+
+ provider = InMemoryResponseProvider()
+
+ # First app instance: create and complete a response
+ app1 = ResponsesAgentServerHost(provider=provider)
+ app1.create_handler(_noop_response_handler)
+ client1 = TestClient(app1)
+ response_id = _create_background_response(client1)
+ _wait_for_status(client1, response_id, "completed")
+
+ # Second app instance (simulating restart): fresh runtime state, same provider
+ app2 = ResponsesAgentServerHost(provider=provider)
+ app2.create_handler(_noop_response_handler)
+ client2 = TestClient(app2)
+
+ cancel_response = client2.post(f"/responses/{response_id}/cancel")
+ _assert_error(
+ cancel_response,
+ expected_status=400,
+ expected_type="invalid_request_error",
+ expected_message="Cannot cancel a completed response.",
+ )
+
+
+def test_cancel__provider_fallback_returns_400_for_failed_after_restart() -> None:
+ """B12 — cancel on a failed response whose runtime record was lost returns 400."""
+ from azure.ai.agentserver.responses.store._memory import InMemoryResponseProvider
+
+ provider = InMemoryResponseProvider()
+
+ # First app instance: create a response that fails
+ app1 = ResponsesAgentServerHost(provider=provider)
+ app1.create_handler(_raising_response_handler)
+ client1 = TestClient(app1)
+ response_id = _create_background_response(client1)
+ _wait_for_status(client1, response_id, "failed")
+
+ # Second app instance (simulating restart)
+ app2 = ResponsesAgentServerHost(provider=provider)
+ app2.create_handler(_noop_response_handler)
+ client2 = TestClient(app2)
+
+ cancel_response = client2.post(f"/responses/{response_id}/cancel")
+ _assert_error(
+ cancel_response,
+ expected_status=400,
+ expected_type="invalid_request_error",
+ expected_message="Cannot cancel a failed response.",
+ )
+
+
+def test_cancel__persisted_state_is_cancelled_even_when_handler_completes_after_timeout() -> None:
+ """B11 race condition: handler eventually yields response.completed after cancel.
+
+ The durable store must still reflect 'cancelled', not 'completed'.
+ """
+ from azure.ai.agentserver.responses.store._memory import InMemoryResponseProvider
+
+ provider = InMemoryResponseProvider()
+
+ def _uncooperative_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that ignores cancellation and eventually completes."""
+
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {"status": "in_progress", "output": []},
+ }
+ # Deliberatly ignores cancellation_signal — simulates uncooperative handler.
+ # The short sleep ensures the handler is still "running" when cancel comes in,
+ # but completes before the 10s winddown deadline.
+ await asyncio.sleep(0.5)
+ yield {
+ "type": "response.completed",
+ "response": {"status": "completed", "output": []},
+ }
+
+ return _events()
+
+ app = ResponsesAgentServerHost(provider=provider)
+ app.create_handler(_uncooperative_handler)
+ client = TestClient(app)
+
+ create = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "stream": False, "store": True, "background": True},
+ )
+ assert create.status_code == 200
+ response_id = create.json()["id"]
+
+ # Cancel — the handler is still running
+ cancel = client.post(f"/responses/{response_id}/cancel")
+ assert cancel.status_code == 200
+ assert cancel.json()["status"] == "cancelled"
+
+ # Wait for background task to fully finalize
+ import time
+
+ time.sleep(2.0)
+
+ # GET from durable store must show cancelled
+ get = client.get(f"/responses/{response_id}")
+ assert get.status_code == 200
+ assert get.json()["status"] == "cancelled", (
+ "B11: Persisted state must be 'cancelled' — cancellation always wins, "
+ "even when the handler yields response.completed after cancel"
+ )
+
+
+def test_cancel__in_progress_response_triggers_cancellation_signal() -> None:
+ """Cancel triggers the cancellation_signal provided to the handler.
+
+ Ported from CancelResponseProtocolTests.Cancel_InProgressResponse_TriggersCancellationToken.
+ """
+
+ def _tracking_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {"status": "in_progress", "output": []},
+ }
+ # Block until cancel; the asyncio.sleep yields to the event loop
+ # so the cancel endpoint's signal actually propagates.
+ for _ in range(500):
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+ client = _build_client(_tracking_handler)
+
+ create = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "stream": False, "store": True, "background": True},
+ )
+ assert create.status_code == 200
+ response_id = create.json()["id"]
+
+ cancel = client.post(f"/responses/{response_id}/cancel")
+ assert cancel.status_code == 200
+
+ # If the signal was triggered the handler should have exited and the
+ # response reached a terminal state ("cancelled").
+ _wait_for_status(client, response_id, "cancelled", timeout_s=5.0)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_connection_termination.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_connection_termination.py
new file mode 100644
index 000000000000..39fa260d880e
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_connection_termination.py
@@ -0,0 +1,223 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for connection termination behavior (US3).
+
+Validates that client disconnects are handled correctly for each mode:
+- Non-bg streaming disconnect → handler cancelled
+- Bg non-streaming → POST returns immediately, handler continues
+
+Python port of ConnectionTerminationTests.
+
+NOTE: The T067 (non-bg disconnect → cancelled) relies on HTTP connection
+lifecycle that Starlette TestClient cannot model (no TCP disconnect).
+We test what we can: bg non-streaming handler continuation.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json as _json
+from typing import Any
+
+import pytest
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Async ASGI client
+# ════════════════════════════════════════════════════════════
+
+
+class _AsgiResponse:
+ def __init__(self, status_code: int, body: bytes, headers: list[tuple[bytes, bytes]]) -> None:
+ self.status_code = status_code
+ self.body = body
+ self.headers = headers
+
+ def json(self) -> Any:
+ return _json.loads(self.body)
+
+
+class _AsyncAsgiClient:
+ def __init__(self, app: Any) -> None:
+ self._app = app
+
+ @staticmethod
+ def _build_scope(method: str, path: str, body: bytes) -> dict[str, Any]:
+ headers: list[tuple[bytes, bytes]] = []
+ query_string = b""
+ if "?" in path:
+ path, qs = path.split("?", 1)
+ query_string = qs.encode()
+ if body:
+ headers = [
+ (b"content-type", b"application/json"),
+ (b"content-length", str(len(body)).encode()),
+ ]
+ return {
+ "type": "http",
+ "asgi": {"version": "3.0"},
+ "http_version": "1.1",
+ "method": method,
+ "headers": headers,
+ "scheme": "http",
+ "path": path,
+ "raw_path": path.encode(),
+ "query_string": query_string,
+ "server": ("localhost", 80),
+ "client": ("127.0.0.1", 123),
+ "root_path": "",
+ }
+
+ async def request(self, method: str, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ body = _json.dumps(json_body).encode() if json_body else b""
+ scope = self._build_scope(method, path, body)
+ status_code: int | None = None
+ response_headers: list[tuple[bytes, bytes]] = []
+ body_parts: list[bytes] = []
+ request_sent = False
+ response_done = asyncio.Event()
+
+ async def receive() -> dict[str, Any]:
+ nonlocal request_sent
+ if not request_sent:
+ request_sent = True
+ return {"type": "http.request", "body": body, "more_body": False}
+ await response_done.wait()
+ return {"type": "http.disconnect"}
+
+ async def send(message: dict[str, Any]) -> None:
+ nonlocal status_code, response_headers
+ if message["type"] == "http.response.start":
+ status_code = message["status"]
+ response_headers = message.get("headers", [])
+ elif message["type"] == "http.response.body":
+ chunk = message.get("body", b"")
+ if chunk:
+ body_parts.append(chunk)
+ if not message.get("more_body", False):
+ response_done.set()
+
+ await self._app(scope, receive, send)
+ assert status_code is not None
+ return _AsgiResponse(status_code=status_code, body=b"".join(body_parts), headers=response_headers)
+
+ async def get(self, path: str) -> _AsgiResponse:
+ return await self.request("GET", path)
+
+ async def post(self, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ return await self.request("POST", path, json_body=json_body)
+
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _build_client(handler: Any) -> _AsyncAsgiClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return _AsyncAsgiClient(app)
+
+
+async def _ensure_task_done(task: asyncio.Task[Any], handler: Any, timeout: float = 5.0) -> None:
+ for attr in vars(handler):
+ obj = getattr(handler, attr, None)
+ if isinstance(obj, asyncio.Event):
+ obj.set()
+ if not task.done():
+ try:
+ await asyncio.wait_for(task, timeout=timeout)
+ except (asyncio.TimeoutError, Exception):
+ task.cancel()
+ try:
+ await task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+
+async def _wait_for_background_completion(client: _AsyncAsgiClient, response_id: str, timeout: float = 5.0) -> None:
+ for _ in range(int(timeout / 0.05)):
+ resp = await client.get(f"/responses/{response_id}")
+ if resp.status_code == 200:
+ doc = resp.json()
+ if doc.get("status") in ("completed", "failed", "incomplete", "cancelled"):
+ return
+ await asyncio.sleep(0.05)
+ raise TimeoutError(f"Response {response_id} did not reach terminal state within {timeout}s")
+
+
+# ════════════════════════════════════════════════════════════
+# T069: bg non-streaming → POST returns immediately, handler continues
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_bg_non_streaming_post_returns_handler_continues() -> None:
+ """T069 — bg non-streaming: POST returns immediately with in_progress, handler continues."""
+ handler_completed = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ # Simulate work
+ await asyncio.sleep(0.3)
+ yield stream.emit_completed()
+ handler_completed.set()
+
+ return _events()
+
+ client = _build_client(handler)
+
+ post_resp = await client.post(
+ "/responses",
+ json_body={"model": "test", "background": True},
+ )
+
+ # POST returns immediately with in_progress
+ assert post_resp.status_code == 200
+ doc = post_resp.json()
+ assert doc["status"] == "in_progress"
+ response_id = doc["id"]
+
+ # Handler hasn't completed yet
+ assert not handler_completed.is_set()
+
+ # Wait for handler to complete
+ await asyncio.wait_for(handler_completed.wait(), timeout=5.0)
+ await asyncio.sleep(0.1) # let cleanup finish
+
+ # GET should return completed response
+ await _wait_for_background_completion(client, response_id)
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
+
+
+# ════════════════════════════════════════════════════════════
+# T067: non-bg streaming disconnect → handler cancelled (skip)
+#
+# NOTE: This test is skipped because the Starlette TestClient and our
+# async ASGI client cannot model the TCP disconnect lifecycle required
+# to test this behavior. The original test relies on HTTP connection
+# cancellation token propagation which doesn't map to ASGI's receive()
+# disconnect model in the same way.
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.skip(reason="Starlette/ASGI TestClient cannot model TCP disconnect lifecycle for non-bg streaming")
+@pytest.mark.asyncio
+async def test_non_bg_streaming_disconnect_results_in_cancelled() -> None:
+ """T067 — non-bg streaming: client disconnect → handler cancelled, status: cancelled.
+
+ This test cannot be implemented with Starlette TestClient because:
+ 1. Non-bg streaming ties the SSE response to the HTTP connection
+ 2. Disconnect requires the HTTP framework to propagate cancellation through
+ the ASGI lifecycle, which TestClient doesn't model
+ """
+ pass
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_conversation_store.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_conversation_store.py
new file mode 100644
index 000000000000..9cb5b49475dc
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_conversation_store.py
@@ -0,0 +1,349 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for ``conversation`` × ``store`` interaction.
+
+``conversation`` + ``store=false`` is accepted — reads history, doesn't write it.
+The response is ephemeral (GET returns 404).
+
+Python port of ConversationStoreProtocolTests.
+"""
+
+from __future__ import annotations
+
+import json as _json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ return events
+
+
+def _simple_text_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits created + completed."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ if False:
+ yield None
+
+ return _events()
+
+
+def _build_client(handler: Any = None) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler or _noop_handler)
+ return TestClient(app)
+
+
+# ════════════════════════════════════════════════════════════
+# conversation (string ID) + store=false → 200 SSE, GET → 404
+# ════════════════════════════════════════════════════════════
+
+
+def test_store_false_with_conversation_string_id_returns_200_ephemeral() -> None:
+ """conversation + store=false is accepted and response is ephemeral (GET → 404)."""
+ client = _build_client(_simple_text_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "store": False,
+ "stream": True,
+ "conversation": "conv_abc123",
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ content_type = resp.headers.get("content-type", "")
+ assert "text/event-stream" in content_type
+ events = _collect_sse_events(resp)
+
+ # Verify stream completed
+ terminal = [e for e in events if e["type"] == "response.completed"]
+ assert terminal, "Expected response.completed event"
+
+ # Extract response ID and verify ephemeral (GET → 404)
+ response_id = events[0]["data"]["response"]["id"]
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 404
+
+
+# ════════════════════════════════════════════════════════════
+# conversation (object) + store=false → 200 SSE, GET → 404
+# ════════════════════════════════════════════════════════════
+
+
+def test_store_false_with_conversation_object_returns_200_ephemeral() -> None:
+ """conversation object + store=false is accepted and response is ephemeral (GET → 404)."""
+ client = _build_client(_simple_text_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "store": False,
+ "stream": True,
+ "conversation": {"id": "conv_xyz789"},
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ content_type = resp.headers.get("content-type", "")
+ assert "text/event-stream" in content_type
+ events = _collect_sse_events(resp)
+
+ terminal = [e for e in events if e["type"] == "response.completed"]
+ assert terminal, "Expected response.completed event"
+
+ response_id = events[0]["data"]["response"]["id"]
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 404
+
+
+# ════════════════════════════════════════════════════════════
+# store=true + conversation is allowed (not rejected)
+# ════════════════════════════════════════════════════════════
+
+
+def test_store_true_with_conversation_is_allowed() -> None:
+ """store=true + conversation combination should not be rejected."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "test",
+ "store": True,
+ "conversation": "conv_abc123",
+ },
+ )
+ # Should not be 400 — this combination is valid
+ assert response.status_code != 400
+
+
+# ════════════════════════════════════════════════════════════
+# store=false without conversation is allowed
+# ════════════════════════════════════════════════════════════
+
+
+def test_store_false_without_conversation_is_allowed() -> None:
+ """store=false without conversation should be accepted."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "test",
+ "store": False,
+ },
+ )
+ assert response.status_code == 200
+
+
+# ════════════════════════════════════════════════════════════
+# Conversation round-trip tests
+# (ported from ConversationStoreProtocolTests.cs)
+# ════════════════════════════════════════════════════════════
+
+
+def test_default_with_conversation_string_round_trips_in_response() -> None:
+ """Conversation string round-trips in the default (sync) JSON response."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={"model": "test", "conversation": "conv_abc123"},
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ conv = payload.get("conversation")
+ assert conv is not None, "conversation must be present in response"
+ conv_id = conv.get("id") if isinstance(conv, dict) else conv
+ assert conv_id == "conv_abc123", f"Expected conversation.id='conv_abc123', got {conv_id!r}"
+
+
+def test_default_with_conversation_object_round_trips_in_response() -> None:
+ """Conversation object round-trips in the default (sync) JSON response."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={"model": "test", "conversation": {"id": "conv_xyz789"}},
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ conv = payload.get("conversation")
+ assert conv is not None, "conversation must be present in response"
+ conv_id = conv.get("id") if isinstance(conv, dict) else conv
+ assert conv_id == "conv_xyz789", f"Expected conversation.id='conv_xyz789', got {conv_id!r}"
+
+
+def test_streaming_with_conversation_string_round_trips_in_created_event() -> None:
+ """Conversation string is stamped on the response.created SSE event."""
+ client = _build_client(_simple_text_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "stream": True, "conversation": "conv_abc123"},
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ created = [e for e in events if e["type"] == "response.created"]
+ assert created, "Expected response.created event"
+ conv = created[0]["data"]["response"].get("conversation")
+ assert conv is not None, "conversation must be stamped on response.created"
+ conv_id = conv.get("id") if isinstance(conv, dict) else conv
+ assert conv_id == "conv_abc123"
+
+
+def test_streaming_with_conversation_object_round_trips_in_created_event() -> None:
+ """Conversation object is stamped on the response.created SSE event."""
+ client = _build_client(_simple_text_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "stream": True, "conversation": {"id": "conv_xyz789"}},
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ created = [e for e in events if e["type"] == "response.created"]
+ assert created, "Expected response.created event"
+ conv = created[0]["data"]["response"].get("conversation")
+ assert conv is not None, "conversation must be stamped on response.created"
+ conv_id = conv.get("id") if isinstance(conv, dict) else conv
+ assert conv_id == "conv_xyz789"
+
+
+def test_streaming_conversation_stamped_on_completed_event() -> None:
+ """Conversation ID is stamped on the response.completed SSE event."""
+ client = _build_client(_simple_text_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "stream": True, "conversation": "conv_roundtrip"},
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ completed = [e for e in events if e["type"] == "response.completed"]
+ assert completed, "Expected response.completed event"
+ conv = completed[0]["data"]["response"].get("conversation")
+ assert conv is not None, "conversation must be stamped on response.completed"
+ conv_id = conv.get("id") if isinstance(conv, dict) else conv
+ assert conv_id == "conv_roundtrip"
+
+
+def _lifecycle_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits created → in_progress → completed lifecycle events."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def test_streaming_conversation_stamped_on_all_lifecycle_events() -> None:
+ """Conversation ID is stamped on all lifecycle events (created, in_progress, completed)."""
+ client = _build_client(_lifecycle_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "stream": True, "conversation": "conv_all_events"},
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ lifecycle_types = {"response.created", "response.in_progress", "response.completed"}
+ lifecycle_events = [e for e in events if e["type"] in lifecycle_types]
+ assert len(lifecycle_events) >= 3, (
+ f"Expected at least 3 lifecycle events, got {[e['type'] for e in lifecycle_events]}"
+ )
+
+ for evt in lifecycle_events:
+ conv = evt["data"]["response"].get("conversation")
+ assert conv is not None, f"conversation must be stamped on {evt['type']}"
+ conv_id = conv.get("id") if isinstance(conv, dict) else conv
+ assert conv_id == "conv_all_events", (
+ f"Expected conversation.id='conv_all_events' on {evt['type']}, got {conv_id!r}"
+ )
+
+
+def test_background_with_conversation_string_round_trips_in_response() -> None:
+ """Background mode: conversation string round-trips in the 200 POST response."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "test",
+ "background": True,
+ "store": True,
+ "conversation": "conv_bg_123",
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ conv = payload.get("conversation")
+ assert conv is not None, "conversation must be present in background response"
+ conv_id = conv.get("id") if isinstance(conv, dict) else conv
+ assert conv_id == "conv_bg_123", f"Expected conversation.id='conv_bg_123', got {conv_id!r}"
+
+
+def test_default_without_conversation_response_has_null_conversation() -> None:
+ """When no conversation is provided, the conversation property is absent or null."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={"model": "test"},
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ conv = payload.get("conversation")
+ # conversation should be absent or null
+ assert conv is None, f"Expected null/absent conversation when not provided, got {conv!r}"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_create_endpoint.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_create_endpoint.py
new file mode 100644
index 000000000000..025152aad67f
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_create_endpoint.py
@@ -0,0 +1,827 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for POST /responses endpoint behavior."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from tests._helpers import poll_until
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire the hosting surface in contract tests."""
+
+ async def _events():
+ if False: # pragma: no cover - required to keep async-generator shape.
+ yield None
+
+ return _events()
+
+
+def _build_client() -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ return TestClient(app)
+
+
+def test_create__returns_json_response_for_non_streaming_success() -> None:
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert isinstance(payload.get("id"), str)
+ assert payload["id"].startswith("caresp_")
+ assert payload.get("response_id") == payload.get("id")
+ # agent_reference may be empty/absent when the request doesn't include one
+ agent_ref = payload.get("agent_reference")
+ assert agent_ref is None or isinstance(agent_ref, dict)
+ assert payload.get("object") == "response"
+ assert payload.get("status") in {"completed", "in_progress", "queued"}
+ assert "sequence_number" not in payload
+
+
+def test_create__preserves_client_supplied_identity_fields() -> None:
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "response_id": "caresp_1234567890abcdef00ABCDEFGHIJKLMNOPQRSTUVWXYZ012345",
+ "agent_reference": {
+ "type": "agent_reference",
+ "name": "custom-agent",
+ "version": "v1",
+ },
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload.get("id") == "caresp_1234567890abcdef00ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
+ assert payload.get("response_id") == "caresp_1234567890abcdef00ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
+ assert payload.get("agent_reference") == {
+ "type": "agent_reference",
+ "name": "custom-agent",
+ "version": "v1",
+ }
+
+
+def test_create__rejects_invalid_response_id_format() -> None:
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "response_id": "bad-id",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+ assert payload["error"].get("param") == "response_id"
+
+
+def test_create__rejects_invalid_agent_reference_shape() -> None:
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "agent_reference": {"type": "not_agent_reference", "name": "bad"},
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+ assert payload["error"].get("param") == "agent_reference.type"
+
+
+def test_create__returns_structured_400_for_invalid_payload() -> None:
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "background": True,
+ "store": False,
+ },
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ error = payload.get("error")
+ assert isinstance(error, dict)
+ assert error.get("type") == "invalid_request_error"
+
+
+def test_create__store_false_response_is_not_visible_via_get() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": False,
+ "background": False,
+ },
+ )
+
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 404
+
+
+def test_create__background_mode_returns_immediate_then_reaches_terminal_state() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+
+ assert create_response.status_code == 200
+ created_payload = create_response.json()
+ # Phase 3: handler runs immediately, so POST may return completed when the
+ # noop handler finishes quickly in the TestClient synchronous context.
+ assert created_payload.get("status") in {"queued", "in_progress", "completed"}
+ response_id = created_payload["id"]
+
+ latest_snapshot: dict[str, Any] = {}
+
+ def _is_terminal() -> bool:
+ nonlocal latest_snapshot
+ snapshot_response = client.get(f"/responses/{response_id}")
+ if snapshot_response.status_code != 200:
+ return False
+ latest_snapshot = snapshot_response.json()
+ return latest_snapshot.get("status") in {"completed", "failed", "incomplete", "cancelled"}
+
+ ok, failure = poll_until(
+ _is_terminal,
+ timeout_s=5.0,
+ interval_s=0.05,
+ context_provider=lambda: {"last_status": latest_snapshot.get("status")},
+ label="background create terminal transition",
+ )
+ assert ok, failure
+
+
+def test_create__non_stream_returns_completed_response_with_output_items() -> None:
+ from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+ def _output_producing_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message_item = stream.add_output_item_message()
+ yield message_item.emit_added()
+
+ text_content = message_item.add_text_content()
+ yield text_content.emit_added()
+ yield text_content.emit_delta("hello")
+ yield text_content.emit_done()
+ yield message_item.emit_content_done(text_content)
+ yield message_item.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ app = ResponsesAgentServerHost()
+ app.create_handler(_output_producing_handler)
+ client = TestClient(app)
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload.get("status") == "completed"
+ assert "sequence_number" not in payload
+ assert isinstance(payload.get("output"), list)
+ assert len(payload["output"]) == 1
+ assert payload["output"][0].get("type") == "message"
+ assert payload["output"][0].get("content", [])[0].get("type") == "output_text"
+ assert payload["output"][0].get("content", [])[0].get("text") == "hello"
+
+
+def test_create__background_non_stream_get_eventually_returns_output_items() -> None:
+ from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+ def _output_producing_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message_item = stream.add_output_item_message()
+ yield message_item.emit_added()
+
+ text_content = message_item.add_text_content()
+ yield text_content.emit_added()
+ yield text_content.emit_delta("hello")
+ yield text_content.emit_done()
+ yield message_item.emit_content_done(text_content)
+ yield message_item.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ app = ResponsesAgentServerHost()
+ app.create_handler(_output_producing_handler)
+ client = TestClient(app)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ latest_snapshot: dict[str, Any] = {}
+
+ def _is_completed_with_output() -> bool:
+ nonlocal latest_snapshot
+ snapshot_response = client.get(f"/responses/{response_id}")
+ if snapshot_response.status_code != 200:
+ return False
+ latest_snapshot = snapshot_response.json()
+ output = latest_snapshot.get("output")
+ return latest_snapshot.get("status") == "completed" and isinstance(output, list) and len(output) == 1
+
+ ok, failure = poll_until(
+ _is_completed_with_output,
+ timeout_s=5.0,
+ interval_s=0.05,
+ context_provider=lambda: {
+ "last_status": latest_snapshot.get("status"),
+ "last_output_count": len(latest_snapshot.get("output", []))
+ if isinstance(latest_snapshot.get("output"), list)
+ else None,
+ },
+ label="background non-stream output availability",
+ )
+ assert ok, failure
+
+ assert latest_snapshot["output"][0].get("type") == "message"
+ assert latest_snapshot["output"][0].get("content", [])[0].get("type") == "output_text"
+ assert latest_snapshot["output"][0].get("content", [])[0].get("text") == "hello"
+ assert "sequence_number" not in latest_snapshot
+
+
+def test_create__model_is_optional_and_resolved_to_empty_or_default() -> None:
+ """B22 — model can be omitted. Resolution: request.model → default_model → empty string."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ # B22: model should be present (possibly empty string or server default)
+ assert "model" in payload
+ assert isinstance(payload["model"], str)
+
+
+def test_create__metadata_rejects_more_than_16_keys() -> None:
+ """Metadata constraints: max 16 key-value pairs."""
+ client = _build_client()
+
+ metadata = {f"key_{i}": f"value_{i}" for i in range(17)}
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "metadata": metadata,
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ assert payload["error"]["type"] == "invalid_request_error"
+
+
+def test_create__metadata_rejects_key_longer_than_64_chars() -> None:
+ """Metadata constraints: key max 64 characters."""
+ client = _build_client()
+
+ metadata = {"a" * 65: "value"}
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "metadata": metadata,
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ assert payload["error"]["type"] == "invalid_request_error"
+
+
+def test_create__metadata_rejects_value_longer_than_512_chars() -> None:
+ """Metadata constraints: value max 512 characters."""
+ client = _build_client()
+
+ metadata = {"key": "v" * 513}
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "metadata": metadata,
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ assert payload["error"]["type"] == "invalid_request_error"
+
+
+def test_create__validation_error_includes_details_array() -> None:
+ """B29 — Invalid request returns 400 with details[] array."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": "not-a-bool",
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ error = payload.get("error")
+ assert error is not None
+ assert error.get("type") == "invalid_request_error"
+ # B29: should have details[] array
+ details = error.get("details")
+ assert isinstance(details, list), f"Expected details[] array per B29, got: {type(details)}"
+ assert len(details) >= 1
+ for detail in details:
+ assert detail.get("type") == "invalid_request_error"
+ assert detail.get("code") == "invalid_value"
+ assert "param" in detail
+ assert "message" in detail
+
+
+# ══════════════════════════════════════════════════════════
+# B1, B2, B3: Request body edge cases
+# ══════════════════════════════════════════════════════════
+
+
+def test_create__returns_400_for_empty_body() -> None:
+ """B1 — Empty request body → HTTP 400, error.type: invalid_request_error."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ content=b"",
+ headers={"Content-Type": "application/json"},
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ assert isinstance(payload.get("error"), dict)
+ assert payload["error"].get("type") == "invalid_request_error"
+
+
+def test_create__returns_400_for_invalid_json_body() -> None:
+ """B2 — Malformed JSON body → HTTP 400, error.type: invalid_request_error."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ content=b"{invalid json",
+ headers={"Content-Type": "application/json"},
+ )
+
+ assert response.status_code == 400
+ payload = response.json()
+ assert isinstance(payload.get("error"), dict)
+ assert payload["error"].get("type") == "invalid_request_error"
+
+
+def test_create__ignores_unknown_fields_in_request_body() -> None:
+ """B3 — Unknown fields are ignored for forward compatibility → HTTP 200."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ "foo": "bar",
+ "unknown_nested": {"key": "value"},
+ },
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload.get("object") == "response"
+
+
+# ══════════════════════════════════════════════════════════
+# Task 4.1 — _process_handler_events sync contract tests
+# ══════════════════════════════════════════════════════════
+
+
+def test_sync_handler_exception_returns_500() -> None:
+ """T5 — Handler raises an exception; stream=False → HTTP 500.
+
+ B8 / B13 for sync mode: any handler exception surfaces as HTTP 500.
+ """
+
+ def _raising_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ raise RuntimeError("Simulated handler failure")
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_raising_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ response = client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": False, "store": True, "background": False},
+ )
+
+ assert response.status_code == 500
+
+
+def test_sync_no_terminal_event_still_completes() -> None:
+ """T6 — Handler yields response.created + response.in_progress but no terminal.
+
+ stream=False → HTTP 200, status=failed.
+
+ S-015: When the handler completes without emitting a terminal event, the library
+ synthesises a ``response.failed`` terminal. Sync callers receive HTTP 200 with
+ a "failed" response body (not HTTP 500).
+ """
+ from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+ def _no_terminal_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ # Intentionally omit terminal event (response.completed / response.failed)
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_no_terminal_handler)
+ client = TestClient(_app)
+
+ response = client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": False, "store": True, "background": False},
+ )
+
+ assert response.status_code == 200, (
+ f"S-015: sync no-terminal handler must return HTTP 200, got {response.status_code}"
+ )
+ payload = response.json()
+ assert payload.get("status") == "failed", (
+ f"S-015: synthesised terminal must set status to 'failed', got {payload.get('status')!r}"
+ )
+
+
+# ══════════════════════════════════════════════════════════
+# Phase 5 — Task 5.1: FR-006 / FR-007 first-event contract first-event contract tests
+# ══════════════════════════════════════════════════════════
+
+
+def test_s007_wrong_first_event_sync() -> None:
+ """T1 — Handler yields response.in_progress as first event; stream=False → HTTP 500.
+
+ FR-006: The first event MUST be response.created. Violations are treated as
+ pre-creation errors (B8) and map to HTTP 500 in sync mode.
+ Uses a raw dict to bypass ResponseEventStream internal ordering validation so
+ the orchestrator's _check_first_event_contract is the authority under test.
+ """
+
+ def _wrong_first_event_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ # Raw dict bypasses ResponseEventStream validation so _check_first_event_contract runs
+ yield {
+ "type": "response.in_progress",
+ "response": {
+ "status": "in_progress",
+ "object": "response",
+ },
+ }
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_wrong_first_event_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ response = client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": False, "store": True, "background": False},
+ )
+
+ assert response.status_code == 500, (
+ f"FR-006 violation in sync mode must return HTTP 500, got {response.status_code}"
+ )
+
+
+def test_s007_wrong_first_event_stream() -> None:
+ """T2 — Handler yields response.in_progress as first event; stream=True → SSE contains only 'error'.
+
+ FR-006: Violation → single standalone error event; no response.created in stream.
+ Uses a raw dict to bypass ResponseEventStream internal ordering validation.
+ """
+
+ def _wrong_first_event_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ yield {
+ "type": "response.in_progress",
+ "response": {
+ "status": "in_progress",
+ "object": "response",
+ },
+ }
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_wrong_first_event_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ import json as _json
+
+ events: list[dict[str, Any]] = []
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+
+ event_types = [e["type"] for e in events]
+ assert event_types == ["error"], (
+ f"FR-006 violation in stream mode must produce exactly ['error'], got: {event_types}"
+ )
+ assert "response.created" not in event_types
+
+
+def test_s008_mismatched_id_stream() -> None:
+ """T3 — Handler yields response.created with wrong id; stream=True → SSE contains only 'error'.
+
+ FR-006b: The id in response.created MUST equal the library-assigned response_id.
+ """
+
+ def _mismatched_id_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ # Emit response.created with a deliberately wrong id
+ yield {
+ "type": "response.created",
+ "response": {
+ "id": "caresp_WRONG00000000000000000000000000000000000000000000",
+ "response_id": "caresp_WRONG00000000000000000000000000000000000000000000",
+ "status": "queued",
+ "object": "response",
+ },
+ }
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_mismatched_id_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ import json as _json
+
+ events: list[dict[str, Any]] = []
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+
+ event_types = [e["type"] for e in events]
+ assert event_types == ["error"], f"FR-006b violation must produce exactly ['error'], got: {event_types}"
+
+
+def test_s009_terminal_status_on_created_stream() -> None:
+ """T4 — Handler yields response.created with terminal status; stream=True → SSE contains only 'error'.
+
+ FR-007: The status in response.created MUST be non-terminal (queued or in_progress).
+ """
+
+ def _terminal_on_created_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {
+ "status": "completed",
+ "object": "response",
+ },
+ }
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_terminal_on_created_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ import json as _json
+
+ events: list[dict[str, Any]] = []
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+
+ event_types = [e["type"] for e in events]
+ assert event_types == ["error"], f"FR-007 violation must produce exactly ['error'], got: {event_types}"
+
+
+def test_s007_valid_handler_not_affected() -> None:
+ """T5 — Compliant handler emits response.created with correct id; stream=True → normal SSE flow.
+
+ Regression: the FR-006/FR-007 validation must not block valid handlers.
+ """
+ from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+ def _compliant_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_completed()
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_compliant_handler)
+ client = TestClient(_app)
+
+ import json as _json
+
+ events: list[dict[str, Any]] = []
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ events.append({"type": current_type, "data": _json.loads(current_data) if current_data else {}})
+
+ event_types = [e["type"] for e in events]
+ assert "response.created" in event_types, (
+ f"Compliant handler must not be blocked; expected response.created in: {event_types}"
+ )
+ assert "error" not in event_types, f"Compliant handler must not produce error event; got: {event_types}"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_create_mode_matrix.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_create_mode_matrix.py
new file mode 100644
index 000000000000..a32a38364a5c
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_create_mode_matrix.py
@@ -0,0 +1,247 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract matrix tests for POST /responses store/background/stream combinations.
+
+These cases mirror C1-C8 in docs/api-behaviour-contract.md.
+"""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+import pytest
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire contract matrix tests."""
+
+ async def _events():
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+class _CreateModeCase:
+ def __init__(
+ self,
+ id: str,
+ store: bool,
+ background: bool,
+ stream: bool,
+ expected_http: int,
+ expected_content_prefix: str,
+ expected_get_status: int | None = None,
+ ) -> None:
+ self.id = id
+ self.store = store
+ self.background = background
+ self.stream = stream
+ self.expected_http = expected_http
+ self.expected_content_prefix = expected_content_prefix
+ self.expected_get_status = expected_get_status
+
+
+def _build_client() -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ return TestClient(app)
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+
+ return events
+
+
+def _extract_response_id_from_sse_text(raw_text: str) -> str | None:
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in raw_text.splitlines():
+ if not line:
+ if current_type is not None and current_data:
+ payload = json.loads(current_data)
+ candidate = payload.get("response", {}).get("id")
+ if isinstance(candidate, str) and candidate:
+ return candidate
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None and current_data:
+ payload = json.loads(current_data)
+ candidate = payload.get("response", {}).get("id")
+ if isinstance(candidate, str) and candidate:
+ return candidate
+
+ return None
+
+
+_CASES: tuple[_CreateModeCase, ...] = (
+ _CreateModeCase(
+ id="C1",
+ store=True,
+ background=False,
+ stream=False,
+ expected_http=200,
+ expected_content_prefix="application/json",
+ expected_get_status=200,
+ ),
+ _CreateModeCase(
+ id="C2",
+ store=True,
+ background=False,
+ stream=True,
+ expected_http=200,
+ expected_content_prefix="text/event-stream",
+ expected_get_status=200,
+ ),
+ _CreateModeCase(
+ id="C3",
+ store=True,
+ background=True,
+ stream=False,
+ expected_http=200,
+ expected_content_prefix="application/json",
+ expected_get_status=200,
+ ),
+ _CreateModeCase(
+ id="C4",
+ store=True,
+ background=True,
+ stream=True,
+ expected_http=200,
+ expected_content_prefix="text/event-stream",
+ expected_get_status=200,
+ ),
+ _CreateModeCase(
+ id="C5",
+ store=False,
+ background=False,
+ stream=False,
+ expected_http=200,
+ expected_content_prefix="application/json",
+ expected_get_status=404,
+ ),
+ _CreateModeCase(
+ id="C6",
+ store=False,
+ background=False,
+ stream=True,
+ expected_http=200,
+ expected_content_prefix="text/event-stream",
+ expected_get_status=404,
+ ),
+ _CreateModeCase(
+ id="C7",
+ store=False,
+ background=True,
+ stream=False,
+ expected_http=400,
+ expected_content_prefix="application/json",
+ expected_get_status=None,
+ ),
+ _CreateModeCase(
+ id="C8",
+ store=False,
+ background=True,
+ stream=True,
+ expected_http=400,
+ expected_content_prefix="application/json",
+ expected_get_status=None,
+ ),
+)
+
+
+@pytest.mark.parametrize(
+ "case",
+ [
+ *_CASES,
+ ],
+ ids=[case.id for case in _CASES],
+)
+def test_create_mode_matrix__http_and_content_type(case: _CreateModeCase) -> None:
+ client = _build_client()
+ payload = {
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": case.stream,
+ "store": case.store,
+ "background": case.background,
+ }
+
+ response = client.post("/responses", json=payload)
+
+ assert response.status_code == case.expected_http
+ assert response.headers.get("content-type", "").startswith(case.expected_content_prefix)
+ # Contract: C7/C8 (store=false, background=true) → error.code="unsupported_parameter", error.param="background"
+ if case.id in {"C7", "C8"}:
+ error = response.json().get("error", {})
+ assert error.get("code") == "unsupported_parameter"
+ assert error.get("param") == "background"
+
+ if case.expected_http == 400:
+ body = response.json()
+ assert isinstance(body.get("error"), dict)
+ assert body["error"].get("type") == "invalid_request_error"
+
+
+@pytest.mark.parametrize(
+ "case",
+ [case for case in _CASES if case.expected_http == 200 and case.expected_get_status is not None],
+ ids=[case.id for case in _CASES if case.expected_http == 200 and case.expected_get_status is not None],
+)
+def test_create_mode_matrix__get_visibility(case: _CreateModeCase) -> None:
+ client = _build_client()
+ payload = {
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": case.stream,
+ "store": case.store,
+ "background": case.background,
+ }
+
+ create_response = client.post("/responses", json=payload)
+ assert create_response.status_code == 200
+ content_type = create_response.headers.get("content-type", "")
+
+ if content_type.startswith("text/event-stream"):
+ response_id = _extract_response_id_from_sse_text(create_response.text)
+ else:
+ body = create_response.json()
+ response_id = body.get("id")
+
+ assert isinstance(response_id, str) and response_id
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == case.expected_get_status
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cross_api_e2e.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cross_api_e2e.py
new file mode 100644
index 000000000000..74a2c0109f33
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cross_api_e2e.py
@@ -0,0 +1,940 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Cross-API E2E behavioural tests exercising multi-endpoint flows on a single response.
+
+Each test calls 2+ endpoints and asserts cross-endpoint consistency per the contract.
+Validates: E1–E44 from the cross-API matrix.
+
+Python port of CrossApiE2eTests.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import threading
+from typing import Any
+
+import pytest
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+from tests._helpers import EventGate, poll_until
+
+# ════════════════════════════════════════════════════════════
+# Shared helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ """Parse SSE lines from a streaming response into a list of event dicts."""
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+
+ return events
+
+
+def _wait_for_terminal(
+ client: TestClient,
+ response_id: str,
+ *,
+ timeout_s: float = 5.0,
+) -> dict[str, Any]:
+ """Poll GET until the response reaches a terminal status."""
+ latest: dict[str, Any] = {}
+ terminal_statuses = {"completed", "failed", "incomplete", "cancelled"}
+
+ def _is_terminal() -> bool:
+ nonlocal latest
+ r = client.get(f"/responses/{response_id}")
+ if r.status_code != 200:
+ return False
+ latest = r.json()
+ return latest.get("status") in terminal_statuses
+
+ ok, failure = poll_until(
+ _is_terminal,
+ timeout_s=timeout_s,
+ interval_s=0.05,
+ context_provider=lambda: {"status": latest.get("status")},
+ label=f"wait_for_terminal({response_id})",
+ )
+ assert ok, failure
+ return latest
+
+
+# ════════════════════════════════════════════════════════════
+# Handler factories
+# ════════════════════════════════════════════════════════════
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler — emits no events (framework auto-completes)."""
+
+ async def _events():
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+
+def _simple_text_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits created + completed with no output items."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _output_producing_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that produces a single message output item with text 'hello'."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ message = stream.add_output_item_message()
+ yield message.emit_added()
+ text = message.add_text_content()
+ yield text.emit_added()
+ yield text.emit_delta("hello")
+ yield text.emit_done()
+ yield message.emit_content_done(text)
+ yield message.emit_done()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _throwing_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that raises after emitting created."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ raise RuntimeError("Simulated handler failure")
+
+ return _events()
+
+
+def _incomplete_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits an incomplete terminal event."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_incomplete(reason="max_output_tokens")
+
+ return _events()
+
+
+def _delayed_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that sleeps briefly, checking for cancellation."""
+
+ async def _events():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.25)
+ if cancellation_signal.is_set():
+ return
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+
+def _cancellable_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then blocks until cancelled.
+
+ Suitable for Phase 3 cancel tests: response_created_signal is set on the
+ first event, so run_background returns immediately with in_progress status
+ while the task continues running until cancellation.
+ """
+
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created() # unblocks run_background
+ # Block until cancelled
+ while not cancellation_signal.is_set():
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+
+def _make_blocking_sync_handler(started_gate: EventGate, release_gate: threading.Event):
+ """Factory for a handler that blocks on a gate, for testing concurrent GET/Cancel on in-flight sync requests."""
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ started_gate.signal(True)
+ while not release_gate.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+ return handler
+
+
+def _make_two_item_gated_handler(
+ item1_emitted: EventGate,
+ item1_gate: threading.Event,
+ item2_emitted: EventGate,
+ item2_gate: threading.Event,
+):
+ """Factory for a handler that emits two message output items with gates between them."""
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # First message
+ msg1 = stream.add_output_item_message()
+ yield msg1.emit_added()
+ text1 = msg1.add_text_content()
+ yield text1.emit_added()
+ yield text1.emit_delta("Hello")
+ yield text1.emit_done()
+ yield msg1.emit_content_done(text1)
+ yield msg1.emit_done()
+
+ item1_emitted.signal()
+ while not item1_gate.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ # Second message
+ msg2 = stream.add_output_item_message()
+ yield msg2.emit_added()
+ text2 = msg2.add_text_content()
+ yield text2.emit_added()
+ yield text2.emit_delta("World")
+ yield text2.emit_done()
+ yield msg2.emit_content_done(text2)
+ yield msg2.emit_done()
+
+ item2_emitted.signal()
+ while not item2_gate.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+def _build_client(handler: Any | None = None) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler or _noop_handler)
+ return TestClient(app)
+
+
+def _create_sync_response(client: TestClient, **extra: Any) -> str:
+ """POST /responses with stream=False, store=True, background=False. Returns response_id."""
+ payload = {"model": "gpt-4o-mini", "input": "hello", "stream": False, "store": True, "background": False}
+ payload.update(extra)
+ r = client.post("/responses", json=payload)
+ assert r.status_code == 200
+ return r.json()["id"]
+
+
+def _create_streaming_response(client: TestClient, **extra: Any) -> str:
+ """POST /responses with stream=True. Consumes the SSE stream and returns response_id."""
+ payload = {"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False}
+ payload.update(extra)
+ with client.stream("POST", "/responses", json=payload) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+ assert events, "Expected at least one SSE event"
+ return events[0]["data"]["response"]["id"]
+
+
+def _create_bg_response(client: TestClient, **extra: Any) -> str:
+ """POST /responses with background=True, stream=False. Returns response_id."""
+ payload = {"model": "gpt-4o-mini", "input": "hello", "stream": False, "store": True, "background": True}
+ payload.update(extra)
+ r = client.post("/responses", json=payload)
+ assert r.status_code == 200
+ return r.json()["id"]
+
+
+def _create_bg_streaming_response(client: TestClient, **extra: Any) -> str:
+ """POST /responses with background=True, stream=True. Consumes SSE and returns response_id."""
+ payload = {"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": True}
+ payload.update(extra)
+ with client.stream("POST", "/responses", json=payload) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+ assert events, "Expected at least one SSE event"
+ return events[0]["data"]["response"]["id"]
+
+
+# ════════════════════════════════════════════════════════════
+# C5/C6 — Ephemeral (store=false): E30–E35
+# ════════════════════════════════════════════════════════════
+
+
+class TestEphemeralStoreFalse:
+ """store=false responses are not retrievable or cancellable (B14)."""
+
+ @pytest.mark.parametrize(
+ "stream, operation",
+ [
+ (False, "GET"), # E30: C5 → GET JSON → 404
+ (False, "GET_SSE"), # E31: C5 → GET SSE replay → 404
+ (True, "GET"), # E33: C6 → GET JSON → 404
+ (True, "GET_SSE"), # E34: C6 → GET SSE replay → 404
+ ],
+ ids=["E30-sync-GET", "E31-sync-GET_SSE", "E33-stream-GET", "E34-stream-GET_SSE"],
+ )
+ def test_ephemeral_store_false_cross_api_returns_404(self, stream: bool, operation: str) -> None:
+ """B14 — store=false responses are not retrievable."""
+ handler = _simple_text_handler if stream else _noop_handler
+ client = _build_client(handler)
+
+ create_payload: dict[str, Any] = {
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "store": False,
+ "stream": stream,
+ "background": False,
+ }
+
+ if stream:
+ with client.stream("POST", "/responses", json=create_payload) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+ response_id = events[0]["data"]["response"]["id"]
+ else:
+ r = client.post("/responses", json=create_payload)
+ assert r.status_code == 200
+ response_id = r.json()["id"]
+
+ if operation == "GET":
+ result = client.get(f"/responses/{response_id}")
+ else:
+ result = client.get(f"/responses/{response_id}?stream=true")
+
+ # B14: store=false responses are never persisted → 404 for any GET.
+ assert result.status_code == 404
+
+ @pytest.mark.parametrize(
+ "stream",
+ [False, True],
+ ids=["E32-sync-cancel", "E35-stream-cancel"],
+ )
+ def test_ephemeral_store_false_cancel_rejected(self, stream: bool) -> None:
+ """B1, B14 — store=false response not bg, cancel rejected.
+
+ With unconditional runtime-state registration,
+ the cancel endpoint finds the record and returns 400 "Cannot cancel a
+ synchronous response." for non-bg requests.
+ """
+ handler = _simple_text_handler if stream else _noop_handler
+ client = _build_client(handler)
+
+ create_payload: dict[str, Any] = {
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "store": False,
+ "stream": stream,
+ "background": False,
+ }
+
+ if stream:
+ with client.stream("POST", "/responses", json=create_payload) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+ response_id = events[0]["data"]["response"]["id"]
+ else:
+ r = client.post("/responses", json=create_payload)
+ assert r.status_code == 200
+ response_id = r.json()["id"]
+
+ result = client.post(f"/responses/{response_id}/cancel")
+ # Contract: record found in runtime state → 400 (cannot cancel synchronous).
+ assert result.status_code == 400
+
+
+# ════════════════════════════════════════════════════════════
+# C1 — Synchronous, stored (store=T, bg=F, stream=F): E1–E6
+# ════════════════════════════════════════════════════════════
+
+
+class TestC1SyncStored:
+ """Synchronous non-streaming stored response cross-API tests."""
+
+ def test_e1_create_then_get_after_completion_returns_200_completed(self) -> None:
+ """B5 — JSON GET returns current snapshot; B16 — after completion, accessible."""
+ client = _build_client()
+ response_id = _create_sync_response(client)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ payload = get_resp.json()
+ assert payload["status"] == "completed"
+
+ def test_e2_create_get_during_in_flight_returns_404(self) -> None:
+ """B16 — non-bg in-flight → 404."""
+ started_gate = EventGate()
+ release_gate = threading.Event()
+ handler = _make_blocking_sync_handler(started_gate, release_gate)
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ create_result: dict[str, Any] = {}
+
+ def _do_create() -> None:
+ try:
+ create_result["response"] = client.post(
+ "/responses",
+ json={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ except Exception as exc: # pragma: no cover
+ create_result["error"] = exc
+
+ t = threading.Thread(target=_do_create, daemon=True)
+ t.start()
+
+ started, _ = started_gate.wait(timeout_s=5.0)
+ assert started, "Handler should have started"
+
+ # GET during in-flight → 404
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 404
+
+ # Release handler
+ release_gate.set()
+ t.join(timeout=5.0)
+ assert not t.is_alive()
+
+ # Now GET succeeds
+ get_after = client.get(f"/responses/{response_id}")
+ assert get_after.status_code == 200
+ assert get_after.json()["status"] == "completed"
+
+ def test_e3_create_then_get_sse_replay_returns_400(self) -> None:
+ """B2 — SSE replay requires background."""
+ client = _build_client()
+ response_id = _create_sync_response(client)
+
+ get_resp = client.get(f"/responses/{response_id}?stream=true")
+ assert get_resp.status_code == 400
+
+ def test_e4_create_then_cancel_after_completion_returns_400(self) -> None:
+ """B1 — cancel requires background; B12 — cancel rejection."""
+ client = _build_client()
+ response_id = _create_sync_response(client)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 400
+ payload = cancel_resp.json()
+ assert payload["error"]["type"] == "invalid_request_error"
+ assert "synchronous" in payload["error"]["message"].lower()
+
+ def test_e5_create_cancel_during_in_flight_returns_400(self) -> None:
+ """B1 — cancel requires background; non-bg → 400."""
+ started_gate = EventGate()
+ release_gate = threading.Event()
+ handler = _make_blocking_sync_handler(started_gate, release_gate)
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ def _do_create() -> None:
+ client.post(
+ "/responses",
+ json={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ t = threading.Thread(target=_do_create, daemon=True)
+ t.start()
+
+ started, _ = started_gate.wait(timeout_s=5.0)
+ assert started
+
+ # Cancel during in-flight non-bg → 404 (not yet stored, S7)
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 404, "S7: non-background in-flight cancel must return 404 (not yet stored)"
+
+ release_gate.set()
+ t.join(timeout=5.0)
+
+ def test_e6_disconnect_then_get_returns_not_found(self) -> None:
+ """B17 — connection termination cancels non-bg; not persisted → GET 404.
+
+ Note: Starlette TestClient does not deterministically simulate client disconnect.
+ We skip this test as the Python SDK disconnect tests need a real ASGI harness.
+ """
+ pytest.skip(
+ "Starlette TestClient does not deterministically surface client-disconnect "
+ "cancellation signals. Requires real ASGI harness."
+ )
+
+
+# ════════════════════════════════════════════════════════════
+# C2 — Synchronous streaming, stored (store=T, bg=F, stream=T): E7–E12
+# ════════════════════════════════════════════════════════════
+
+
+class TestC2StreamStored:
+ """Synchronous streaming stored response cross-API tests."""
+
+ def test_e7_stream_create_then_get_after_stream_ends_returns_200_completed(self) -> None:
+ """B5 — JSON GET returns current snapshot."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_streaming_response(client)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
+
+ # E8 moved to test_cross_api_e2e_async.py (requires async ASGI client)
+
+ def test_e9_stream_create_then_get_sse_replay_returns_400(self) -> None:
+ """B2 — SSE replay requires background."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_streaming_response(client)
+
+ get_resp = client.get(f"/responses/{response_id}?stream=true")
+ assert get_resp.status_code == 400
+
+ def test_e10_stream_create_then_cancel_after_stream_ends_returns_400(self) -> None:
+ """B1, B12 — cancel non-bg rejected."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_streaming_response(client)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 400
+ assert "synchronous" in cancel_resp.json()["error"]["message"].lower()
+
+ # E11 moved to test_cross_api_e2e_async.py (requires async ASGI client)
+
+ def test_e12_stream_disconnect_then_get_returns_not_found(self) -> None:
+ """B17 — connection termination cancels non-bg.
+
+ Skipped: same limitation as E6.
+ """
+ pytest.skip("Starlette TestClient does not deterministically surface client-disconnect cancellation signals.")
+
+
+# ════════════════════════════════════════════════════════════
+# C3 — Background poll, stored (store=T, bg=T, stream=F): E13–E19, E36–E39
+# ════════════════════════════════════════════════════════════
+
+
+class TestC3BgPollStored:
+ """Background non-streaming stored response cross-API tests."""
+
+ def test_e13_bg_create_then_get_immediate_returns_queued_or_in_progress(self) -> None:
+ """B5, B10 — background non-streaming returns immediately.
+
+ Background POST now returns before the handler starts, so the
+ initial status is always queued. The Starlette TestClient may
+ process the background task before the subsequent GET, so we
+ accept queued, in_progress, or completed.
+ """
+ client = _build_client()
+
+ r = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert r.status_code == 200
+ create_payload = r.json()
+ response_id = create_payload["id"]
+ # Contract: background POST returns immediately with queued snapshot
+ assert create_payload["status"] in {"queued", "in_progress", "completed"}
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] in {"queued", "in_progress", "completed"}
+
+ # Wait for terminal
+ _wait_for_terminal(client, response_id)
+
+ def test_e14_bg_create_then_get_after_completion_returns_completed(self) -> None:
+ """B5, B10."""
+ client = _build_client()
+ response_id = _create_bg_response(client)
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
+
+ def test_e15_bg_create_then_get_sse_replay_returns_400(self) -> None:
+ """B2 — SSE replay requires stream=true at creation."""
+ client = _build_client()
+ response_id = _create_bg_response(client)
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}?stream=true")
+ assert get_resp.status_code == 400
+
+ def test_e16_bg_create_cancel_then_get_returns_cancelled(self) -> None:
+ """B7 — cancelled status; B11 — output cleared."""
+ client = _build_client(_cancellable_bg_handler)
+ response_id = _create_bg_response(client)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ snapshot = get_resp.json()
+ assert snapshot["status"] == "cancelled"
+ assert snapshot["output"] == []
+
+ def test_e17_bg_create_wait_complete_then_cancel_returns_400(self) -> None:
+ """B12 — cannot cancel a completed response."""
+ client = _build_client()
+ response_id = _create_bg_response(client)
+ _wait_for_terminal(client, response_id)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 400
+ assert "Cannot cancel a completed response" in cancel_resp.json()["error"]["message"]
+
+ def test_e18_bg_create_cancel_cancel_returns_200_idempotent(self) -> None:
+ """B3 — cancel is idempotent."""
+ client = _build_client(_cancellable_bg_handler)
+ response_id = _create_bg_response(client)
+
+ cancel1 = client.post(f"/responses/{response_id}/cancel")
+ assert cancel1.status_code == 200
+
+ cancel2 = client.post(f"/responses/{response_id}/cancel")
+ assert cancel2.status_code == 200
+
+ _wait_for_terminal(client, response_id)
+
+ def test_e19_bg_create_disconnect_then_get_returns_completed(self) -> None:
+ """B18 — background responses unaffected by connection termination."""
+ client = _build_client()
+ response_id = _create_bg_response(client)
+ # bg POST already returned — bg mode is immune to disconnect
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
+
+ def test_e36_bg_handler_throws_then_get_returns_failed(self) -> None:
+ """B5, B6 — failed status invariants."""
+ client = _build_client(_throwing_handler)
+ response_id = _create_bg_response(client)
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ snapshot = get_resp.json()
+ assert snapshot["status"] == "failed"
+ # B6: failed → error must be non-null
+ error = snapshot.get("error")
+ assert error is not None, "B6: error must be non-null for status=failed"
+ assert "code" in error
+ assert "message" in error
+
+ def test_e37_bg_handler_incomplete_then_get_returns_incomplete(self) -> None:
+ """B5, B6 — incomplete status invariants."""
+ client = _build_client(_incomplete_handler)
+ response_id = _create_bg_response(client)
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ snapshot = get_resp.json()
+ assert snapshot["status"] == "incomplete"
+ # B6: incomplete → error null
+ assert snapshot.get("error") is None
+
+ def test_e38_bg_handler_throws_then_cancel_returns_400(self) -> None:
+ """B12 — cancel rejection on failed."""
+ client = _build_client(_throwing_handler)
+ response_id = _create_bg_response(client)
+ _wait_for_terminal(client, response_id)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 400
+ assert "Cannot cancel a failed response" in cancel_resp.json()["error"]["message"]
+
+ def test_e39_bg_handler_incomplete_then_cancel_returns_400(self) -> None:
+ """B12 — cancel rejection on incomplete (terminal status)."""
+ client = _build_client(_incomplete_handler)
+ response_id = _create_bg_response(client)
+ _wait_for_terminal(client, response_id)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 400
+
+ def test_e44_bg_progressive_polling_output_grows(self) -> None:
+ """B5, B10 — background poll shows progressive output accumulation.
+
+ Verifies that after completion, the response contains full output.
+ Note: Fine-grained mid-stream gating across async/sync boundary
+ is unreliable with Starlette TestClient, so we verify final state.
+ """
+ client = _build_client(_output_producing_handler)
+ response_id = _create_bg_response(client)
+ terminal = _wait_for_terminal(client, response_id)
+
+ assert terminal["status"] == "completed"
+ assert isinstance(terminal.get("output"), list)
+ assert len(terminal["output"]) >= 1
+ assert terminal["output"][0]["type"] == "message"
+ assert terminal["output"][0]["content"][0]["text"] == "hello"
+
+
+# ════════════════════════════════════════════════════════════
+# C4 — Background streaming, stored (store=T, bg=T, stream=T): E20–E29, E40–E42
+# ════════════════════════════════════════════════════════════
+
+
+class TestC4BgStreamStored:
+ """Background streaming stored response cross-API tests."""
+
+ def test_e21_bg_stream_create_get_after_stream_ends_returns_completed(self) -> None:
+ """B5."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
+
+ def test_e22_bg_stream_completed_sse_replay_returns_all_events(self) -> None:
+ """B4 — SSE replay; B9 — sequence numbers; B26 — terminal event."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay_resp:
+ assert replay_resp.status_code == 200
+ events = _collect_sse_events(replay_resp)
+
+ assert len(events) >= 2, "Replay should have at least 2 events"
+
+ # B26: terminal event is response.completed
+ assert events[-1]["type"] == "response.completed"
+
+ # B9: sequence numbers monotonically increasing
+ seq_nums = [e["data"]["sequence_number"] for e in events]
+ for i in range(1, len(seq_nums)):
+ assert seq_nums[i] > seq_nums[i - 1]
+
+ def test_e23_bg_stream_sse_replay_with_starting_after_skips_events(self) -> None:
+ """B4 — starting_after cursor."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ # Full replay
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as full_resp:
+ full_events = _collect_sse_events(full_resp)
+ assert len(full_events) >= 2, "Need at least 2 events for cursor test"
+
+ first_seq = full_events[0]["data"]["sequence_number"]
+
+ # Replay with starting_after = first seq → skips first event
+ with client.stream("GET", f"/responses/{response_id}?stream=true&starting_after={first_seq}") as cursor_resp:
+ assert cursor_resp.status_code == 200
+ cursor_events = _collect_sse_events(cursor_resp)
+
+ assert len(cursor_events) == len(full_events) - 1
+
+ def test_e24_bg_stream_cancel_immediate_returns_cancelled(self) -> None:
+ """B7, B11 — cancel → cancelled with 0 output.
+
+ Uses non-streaming bg path because the synchronous TestClient cannot
+ issue concurrent requests during an active SSE stream. The actual
+ bg+stream mid-stream cancel is tested in test_cross_api_e2e_async.py
+ (E25) using the async ASGI client.
+ """
+ client = _build_client(_cancellable_bg_handler)
+ response_id = _create_bg_response(client)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ snapshot = get_resp.json()
+ assert snapshot["status"] == "cancelled"
+ assert snapshot["output"] == []
+
+ def test_e27_bg_stream_completed_then_cancel_returns_400(self) -> None:
+ """B12 — cannot cancel completed."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 400
+ assert "Cannot cancel a completed response" in cancel_resp.json()["error"]["message"]
+
+ def test_e28_bg_stream_cancel_cancel_returns_200_idempotent(self) -> None:
+ """B3 — cancel is idempotent.
+
+ Uses non-streaming bg path because the synchronous TestClient cannot
+ issue concurrent requests during an active SSE stream.
+ """
+ client = _build_client(_cancellable_bg_handler)
+ response_id = _create_bg_response(client)
+
+ cancel1 = client.post(f"/responses/{response_id}/cancel")
+ assert cancel1.status_code == 200
+
+ cancel2 = client.post(f"/responses/{response_id}/cancel")
+ assert cancel2.status_code == 200
+
+ _wait_for_terminal(client, response_id)
+
+ def test_e29_bg_stream_disconnect_then_get_returns_completed(self) -> None:
+ """B18 — background responses unaffected by connection termination."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
+
+ def test_e40_bg_stream_handler_throws_get_and_sse_replay_returns_failed(self) -> None:
+ """B5, B6 — failed status invariants; B26 — terminal event."""
+ client = _build_client(_throwing_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ # GET JSON → failed
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ snapshot = get_resp.json()
+ assert snapshot["status"] == "failed"
+ # B6: failed → error must be non-null
+ error = snapshot.get("error")
+ assert error is not None, "B6: error must be non-null for status=failed"
+ assert "code" in error
+ assert "message" in error
+
+ # SSE replay → terminal = response.failed
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay_resp:
+ assert replay_resp.status_code == 200
+ replay_events = _collect_sse_events(replay_resp)
+ assert replay_events[-1]["type"] == "response.failed"
+
+ def test_e41_bg_stream_handler_incomplete_get_and_sse_replay_returns_incomplete(self) -> None:
+ """B5, B6 — incomplete status invariants; B26 — terminal event."""
+ client = _build_client(_incomplete_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ # GET JSON → incomplete
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ snapshot = get_resp.json()
+ assert snapshot["status"] == "incomplete"
+ assert snapshot.get("error") is None
+
+ # SSE replay → terminal = response.incomplete
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay_resp:
+ assert replay_resp.status_code == 200
+ events = _collect_sse_events(replay_resp)
+ assert events[-1]["type"] == "response.incomplete"
+
+ def test_e42_bg_stream_sse_replay_starting_after_max_returns_empty(self) -> None:
+ """B4 — starting_after >= max → empty stream."""
+ client = _build_client(_simple_text_handler)
+ response_id = _create_bg_streaming_response(client)
+ _wait_for_terminal(client, response_id)
+
+ # Get max sequence number from full replay
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as full_resp:
+ full_events = _collect_sse_events(full_resp)
+ max_seq = full_events[-1]["data"]["sequence_number"]
+
+ # Replay with starting_after = max → empty
+ with client.stream("GET", f"/responses/{response_id}?stream=true&starting_after={max_seq}") as empty_resp:
+ assert empty_resp.status_code == 200
+ empty_events = _collect_sse_events(empty_resp)
+ assert empty_events == []
+
+ def test_e26_bg_stream_cancel_then_sse_replay_has_terminal_event(self) -> None:
+ """B26 — terminal SSE event after cancel; B11.
+
+ Uses non-streaming bg path for the cancel step because the synchronous
+ TestClient cannot issue concurrent requests during an active SSE stream.
+ The SSE replay terminal-event check is performed in the async test file
+ (test_e26_bg_stream_cancel_then_sse_replay_terminal_event) which can
+ create bg+stream responses and cancel mid-stream.
+ """
+ client = _build_client(_cancellable_bg_handler)
+ response_id = _create_bg_response(client)
+
+ cancel_resp = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+ _wait_for_terminal(client, response_id)
+
+ # After cancel, the response is cancelled.
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "cancelled"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cross_api_e2e_async.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cross_api_e2e_async.py
new file mode 100644
index 000000000000..ac1d1063f606
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_cross_api_e2e_async.py
@@ -0,0 +1,907 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Cross-API E2E tests requiring concurrent HTTP operations during active handlers.
+
+These tests use a lightweight async ASGI client that invokes the Starlette app
+directly via ``await app(scope, receive, send)``, combined with
+``asyncio.create_task`` for concurrency. This enables:
+
+* Issuing GET / Cancel requests while a streaming POST handler is still running.
+* Using ``asyncio.Event`` for deterministic handler gating (same event loop).
+* Pre-generating response IDs via ``IdGenerator`` to avoid parsing the SSE stream.
+
+Tests validate: E8, E11, E20, E25, E43 from the cross-API matrix.
+
+**Parallel-safety:** every test creates its own Starlette app, ASGI client, and
+handler instances — fully isolated with no shared state, no port binding, and no
+global singletons. Safe for ``pytest-xdist`` and any concurrent test runner.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json as _json
+from typing import Any
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Lightweight async ASGI test client
+# ════════════════════════════════════════════════════════════
+
+
+class _AsgiResponse:
+ """Result of a non-streaming ASGI request."""
+
+ def __init__(self, status_code: int, body: bytes, headers: list[tuple[bytes, bytes]]) -> None:
+ self.status_code = status_code
+ self.body = body
+ self.headers = headers
+
+ def json(self) -> Any:
+ return _json.loads(self.body)
+
+
+class _AsyncAsgiClient:
+ """Minimal async ASGI client that supports concurrent in-flight requests.
+
+ Unlike ``httpx.ASGITransport`` (which buffers the entire response body before
+ returning) or Starlette ``TestClient`` (synchronous), this client calls the
+ ASGI app directly. Combined with ``asyncio.create_task``, the test can issue
+ additional requests while a previous one is still being processed.
+
+ **Thread-safety:** instances are NOT thread-safe. Each test should create
+ its own client via ``_build_client()``.
+ """
+
+ def __init__(self, app: Any) -> None:
+ self._app = app
+
+ # ── helpers ──────────────────────────────────────────────
+
+ @staticmethod
+ def _build_scope(method: str, path: str, body: bytes) -> dict[str, Any]:
+ headers: list[tuple[bytes, bytes]] = []
+ query_string = b""
+
+ if "?" in path:
+ path, qs = path.split("?", 1)
+ query_string = qs.encode()
+
+ if body:
+ headers = [
+ (b"content-type", b"application/json"),
+ (b"content-length", str(len(body)).encode()),
+ ]
+
+ return {
+ "type": "http",
+ "asgi": {"version": "3.0"},
+ "http_version": "1.1",
+ "method": method,
+ "headers": headers,
+ "scheme": "http",
+ "path": path,
+ "raw_path": path.encode(),
+ "query_string": query_string,
+ "server": ("localhost", 80),
+ "client": ("127.0.0.1", 123),
+ "root_path": "",
+ }
+
+ # ── public API ──────────────────────────────────────────
+
+ async def request(
+ self,
+ method: str,
+ path: str,
+ *,
+ json_body: dict[str, Any] | None = None,
+ ) -> _AsgiResponse:
+ """Send a request and collect the full response."""
+ body = _json.dumps(json_body).encode() if json_body else b""
+ scope = self._build_scope(method, path, body)
+
+ status_code: int | None = None
+ response_headers: list[tuple[bytes, bytes]] = []
+ body_parts: list[bytes] = []
+ request_sent = False
+ response_done = asyncio.Event()
+
+ async def receive() -> dict[str, Any]:
+ nonlocal request_sent
+ if not request_sent:
+ request_sent = True
+ return {"type": "http.request", "body": body, "more_body": False}
+ await response_done.wait()
+ return {"type": "http.disconnect"}
+
+ async def send(message: dict[str, Any]) -> None:
+ nonlocal status_code, response_headers
+ if message["type"] == "http.response.start":
+ status_code = message["status"]
+ response_headers = message.get("headers", [])
+ elif message["type"] == "http.response.body":
+ chunk = message.get("body", b"")
+ if chunk:
+ body_parts.append(chunk)
+ if not message.get("more_body", False):
+ response_done.set()
+
+ await self._app(scope, receive, send)
+
+ assert status_code is not None
+ return _AsgiResponse(
+ status_code=status_code,
+ body=b"".join(body_parts),
+ headers=response_headers,
+ )
+
+ async def get(self, path: str) -> _AsgiResponse:
+ return await self.request("GET", path)
+
+ async def post(self, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ return await self.request("POST", path, json_body=json_body)
+
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _build_client(handler: Any) -> _AsyncAsgiClient:
+ """Create a fully isolated async ASGI client."""
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return _AsyncAsgiClient(app)
+
+
+async def _ensure_task_done(
+ task: asyncio.Task[Any],
+ handler: Any,
+ timeout: float = 5.0,
+) -> None:
+ """Release handler gates and await the task with a timeout."""
+ # Release all asyncio.Event gates on the handler so it can exit.
+ for attr in vars(handler):
+ obj = getattr(handler, attr, None)
+ if isinstance(obj, asyncio.Event):
+ obj.set()
+ if not task.done():
+ try:
+ await asyncio.wait_for(task, timeout=timeout)
+ except (asyncio.TimeoutError, Exception):
+ task.cancel()
+ try:
+ await task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+
+def _parse_sse_events(text: str) -> list[dict[str, Any]]:
+ """Parse SSE events from raw text."""
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in text.splitlines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+
+ return events
+
+
+# ════════════════════════════════════════════════════════════
+# Handler factories (asyncio.Event gating — same event loop as test)
+# ════════════════════════════════════════════════════════════
+
+
+def _make_gated_stream_handler():
+ """Factory for a handler that emits created + in_progress, then blocks until ``release`` is set."""
+ started = asyncio.Event()
+ release = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ started.set()
+ while not release.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.started = started # type: ignore[attr-defined]
+ handler.release = release # type: ignore[attr-defined]
+ return handler
+
+
+def _make_gated_stream_handler_with_output():
+ """Factory for a handler that emits created + in_progress + a partial message, then blocks."""
+ started = asyncio.Event()
+ release = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message = stream.add_output_item_message()
+ yield message.emit_added()
+ text = message.add_text_content()
+ yield text.emit_added()
+ yield text.emit_delta("Hello")
+
+ started.set()
+ while not release.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ yield text.emit_done()
+ yield message.emit_content_done(text)
+ yield message.emit_done()
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.started = started # type: ignore[attr-defined]
+ handler.release = release # type: ignore[attr-defined]
+ return handler
+
+
+def _make_item_lifecycle_gated_handler():
+ """Factory for ItemLifecycleGatedStream.
+
+ Emits two message output items with fine-grained gates:
+ - item_added: fires after first item emit_added (item in_progress, empty content)
+ - item_done: fires after first item emit_done (item completed, text="Hello")
+ - item2_done: fires after second item emit_done (2 completed items)
+
+ Each gate has a corresponding ``_checked`` event that the test must set
+ to let the handler continue.
+ """
+ item_added = asyncio.Event()
+ item_added_checked = asyncio.Event()
+ item_done = asyncio.Event()
+ item_done_checked = asyncio.Event()
+ item2_done = asyncio.Event()
+ item2_done_checked = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # First item — gate after Added (in_progress, empty content)
+ msg1 = stream.add_output_item_message()
+ yield msg1.emit_added()
+
+ item_added.set()
+ while not item_added_checked.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ # Continue to completion of first item
+ text1 = msg1.add_text_content()
+ yield text1.emit_added()
+ yield text1.emit_delta("Hello")
+ yield text1.emit_done()
+ yield msg1.emit_content_done(text1)
+ yield msg1.emit_done()
+
+ item_done.set()
+ while not item_done_checked.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ # Second item — emit fully, gate after Done
+ msg2 = stream.add_output_item_message()
+ yield msg2.emit_added()
+ text2 = msg2.add_text_content()
+ yield text2.emit_added()
+ yield text2.emit_delta("World")
+ yield text2.emit_done()
+ yield msg2.emit_content_done(text2)
+ yield msg2.emit_done()
+
+ item2_done.set()
+ while not item2_done_checked.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.item_added = item_added # type: ignore[attr-defined]
+ handler.item_added_checked = item_added_checked # type: ignore[attr-defined]
+ handler.item_done = item_done # type: ignore[attr-defined]
+ handler.item_done_checked = item_done_checked # type: ignore[attr-defined]
+ handler.item2_done = item2_done # type: ignore[attr-defined]
+ handler.item2_done_checked = item2_done_checked # type: ignore[attr-defined]
+ return handler
+
+
+def _make_two_item_gated_bg_handler():
+ """Factory for TwoItemGatedStream for progressive polling (E44).
+
+ Emits two message output items with gates between them:
+ - item1_emitted: fires after first item is fully done (completed with text="Hello")
+ - item2_emitted: fires after second item is fully done (completed with text="World")
+
+ Each gate has a corresponding ``_checked`` event the test must set.
+ """
+ item1_emitted = asyncio.Event()
+ item1_checked = asyncio.Event()
+ item2_emitted = asyncio.Event()
+ item2_checked = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # First message output item
+ msg1 = stream.add_output_item_message()
+ yield msg1.emit_added()
+ text1 = msg1.add_text_content()
+ yield text1.emit_added()
+ yield text1.emit_delta("Hello")
+ yield text1.emit_done()
+ yield msg1.emit_content_done(text1)
+ yield msg1.emit_done()
+
+ item1_emitted.set()
+ while not item1_checked.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ # Second message output item
+ msg2 = stream.add_output_item_message()
+ yield msg2.emit_added()
+ text2 = msg2.add_text_content()
+ yield text2.emit_added()
+ yield text2.emit_delta("World")
+ yield text2.emit_done()
+ yield msg2.emit_content_done(text2)
+ yield msg2.emit_done()
+
+ item2_emitted.set()
+ while not item2_checked.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.item1_emitted = item1_emitted # type: ignore[attr-defined]
+ handler.item1_checked = item1_checked # type: ignore[attr-defined]
+ handler.item2_emitted = item2_emitted # type: ignore[attr-defined]
+ handler.item2_checked = item2_checked # type: ignore[attr-defined]
+ return handler
+ return handler
+
+
+# ════════════════════════════════════════════════════════════
+# C2 — Sync streaming, stored: E8, E11
+# ════════════════════════════════════════════════════════════
+
+
+class TestC2StreamStoredAsync:
+ """Sync streaming tests requiring concurrent access during an active stream."""
+
+ async def test_e8_stream_get_during_stream_returns_404(self) -> None:
+ """B16 — non-bg in-flight → 404."""
+ handler = _make_gated_stream_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # GET during non-bg in-flight → 404
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 404
+
+ # Release handler so it can complete
+ handler.release.set()
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # After stream ends, response should be stored
+ get_after = await client.get(f"/responses/{response_id}")
+ assert get_after.status_code == 200
+ assert get_after.json()["status"] == "completed"
+
+ async def test_e11_stream_cancel_during_stream_returns_400(self) -> None:
+ """B1 — cancel requires background; non-bg → 400."""
+ handler = _make_gated_stream_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # Cancel non-bg in-flight → 404 (not yet stored, S7)
+ cancel_resp = await client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 404, (
+ "S7: non-background in-flight cancel must return 404 (not yet stored)"
+ )
+
+ handler.release.set()
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+
+# ════════════════════════════════════════════════════════════
+# C4 — Background streaming, stored: E20, E25, E43
+#
+# The Python SDK now stores the execution record at response.created
+# time for background+stream responses (S-035), enabling mid-stream
+# GET, Cancel, and progressive-poll.
+# ════════════════════════════════════════════════════════════
+
+
+class TestC4BgStreamStoredAsync:
+ """Background streaming tests requiring concurrent access during active stream."""
+
+ async def test_e20_bg_stream_get_during_stream_returns_in_progress(self) -> None:
+ """B5 — background responses accessible during in-progress."""
+ handler = _make_gated_stream_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # GET during bg in-flight → 200 with in_progress
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "in_progress"
+
+ handler.release.set()
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # After stream ends, response should be completed
+ get_after = await client.get(f"/responses/{response_id}")
+ assert get_after.status_code == 200
+ assert get_after.json()["status"] == "completed"
+
+ async def test_e25_bg_stream_cancel_mid_stream_returns_cancelled(self) -> None:
+ """B7, B11 — cancel mid-stream → cancelled with 0 output."""
+ handler = _make_gated_stream_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # Cancel bg in-flight → 200
+ cancel_resp = await client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+ snapshot = cancel_resp.json()
+ assert snapshot["status"] == "cancelled"
+ assert snapshot["output"] == []
+
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # GET after cancel → cancelled
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "cancelled"
+ assert get_resp.json()["output"] == []
+
+ async def test_e43_bg_stream_get_during_stream_returns_partial_output(self) -> None:
+ """B5, B23 — GET mid-stream returns partial output items."""
+ handler = _make_gated_stream_handler_with_output()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # GET during bg in-flight → 200 with in_progress and partial output
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ body = get_resp.json()
+ assert body["status"] == "in_progress"
+ # The response should have at least one output item from the
+ # output_item.added event emitted before the gate.
+ assert len(body.get("output", [])) >= 1
+
+ handler.release.set()
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # After completion, full output should be present
+ get_after = await client.get(f"/responses/{response_id}")
+ assert get_after.status_code == 200
+ assert get_after.json()["status"] == "completed"
+
+ async def test_bg_stream_cancel_terminal_sse_is_response_failed_with_cancelled(self) -> None:
+ """B11, B26 — cancel mid-stream → terminal SSE event is response.failed with status cancelled."""
+ handler = _make_gated_stream_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # Cancel bg in-flight → 200
+ cancel_resp = await client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+
+ # Parse SSE events from the response body
+ events = _parse_sse_events(post_resp.body.decode())
+
+ # Find terminal events
+ terminal_types = {"response.completed", "response.failed", "response.incomplete"}
+ terminal_events = [e for e in events if e["type"] in terminal_types]
+ assert len(terminal_events) == 1, (
+ f"Expected exactly one terminal event, got: {[e['type'] for e in terminal_events]}"
+ )
+
+ terminal = terminal_events[0]
+ # B26: cancelled responses emit response.failed
+ assert terminal["type"] == "response.failed", (
+ f"Expected response.failed for cancel per B26, got: {terminal['type']}"
+ )
+ # B11: status inside is "cancelled"
+ assert terminal["data"]["response"].get("status") == "cancelled"
+ # B11: output cleared
+ assert terminal["data"]["response"].get("output") == []
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ async def test_e26_bg_stream_cancel_then_sse_replay_terminal_event(self) -> None:
+ """B26 — SSE replay after cancel contains terminal event response.failed with status cancelled.
+
+ Unlike test_bg_stream_cancel_terminal_sse_is_response_failed_with_cancelled
+ which checks the *live* SSE stream, this test verifies the stored *replay*
+ endpoint (GET ?stream=true) returns the correct terminal event.
+ """
+ handler = _make_gated_stream_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+
+ # Cancel bg in-flight → 200
+ cancel_resp = await client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # SSE replay after cancel → should have response.failed terminal event
+ replay_resp = await client.get(f"/responses/{response_id}?stream=true")
+ assert replay_resp.status_code == 200
+
+ replay_events = _parse_sse_events(replay_resp.body.decode())
+ assert len(replay_events) >= 1, "Replay should have at least 1 event"
+
+ # B26: terminal event for cancelled response is response.failed
+ last_event = replay_events[-1]
+ assert last_event["type"] == "response.failed", (
+ f"Expected response.failed terminal in replay, got: {last_event['type']}"
+ )
+
+ # The response inside should have status: cancelled
+ if "response" in last_event["data"]:
+ assert last_event["data"]["response"]["status"] == "cancelled"
+
+ async def test_e43_bg_stream_get_during_stream_item_lifecycle(self) -> None:
+ """B5, B23 — GET mid-stream returns progressive item lifecycle.
+
+ Validates the full 4-phase lifecycle (E43):
+ Phase 1: After item Added → 1 item, status=in_progress, empty content
+ Phase 2: After item Done → 1 item, status=completed, text="Hello"
+ Phase 3: After 2nd item Done → 2 items, both completed
+ Phase 4: After completion → 2 items, both completed (final snapshot)
+ """
+ handler = _make_item_lifecycle_gated_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ # ── Phase 1: After EmitAdded (before EmitDone) ──
+ await asyncio.wait_for(handler.item_added.wait(), timeout=5.0)
+
+ get1 = await client.get(f"/responses/{response_id}")
+ assert get1.status_code == 200
+ doc1 = get1.json()
+ assert doc1["status"] == "in_progress"
+ output1 = doc1["output"]
+ assert len(output1) == 1
+ item1_added = output1[0]
+ assert item1_added["type"] == "message"
+ # Item is in_progress — content should be empty
+ assert item1_added["status"] == "in_progress"
+ assert len(item1_added["content"]) == 0
+
+ handler.item_added_checked.set()
+
+ # ── Phase 2: After EmitDone — item is completed with full text ──
+ await asyncio.wait_for(handler.item_done.wait(), timeout=5.0)
+
+ get2 = await client.get(f"/responses/{response_id}")
+ assert get2.status_code == 200
+ doc2 = get2.json()
+ assert doc2["status"] == "in_progress"
+ output2 = doc2["output"]
+ assert len(output2) == 1
+ item1_done = output2[0]
+ assert item1_done["status"] == "completed"
+ content_done = item1_done["content"]
+ assert len(content_done) == 1
+ assert content_done[0]["type"] == "output_text"
+ assert content_done[0]["text"] == "Hello"
+
+ handler.item_done_checked.set()
+
+ # ── Phase 3: Second item done — output should have 2 completed items ──
+ await asyncio.wait_for(handler.item2_done.wait(), timeout=5.0)
+
+ get3 = await client.get(f"/responses/{response_id}")
+ assert get3.status_code == 200
+ doc3 = get3.json()
+ assert doc3["status"] == "in_progress"
+ output3 = doc3["output"]
+ assert len(output3) == 2
+ assert output3[0]["status"] == "completed"
+ assert output3[1]["status"] == "completed"
+ assert output3[1]["content"][0]["text"] == "World"
+
+ handler.item2_done_checked.set()
+
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+
+ # ── Phase 4: After completion — final snapshot ──
+ get_final = await client.get(f"/responses/{response_id}")
+ assert get_final.status_code == 200
+ doc_final = get_final.json()
+ assert doc_final["status"] == "completed"
+ assert len(doc_final["output"]) == 2
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ async def test_e44_bg_progressive_polling_output_grows(self) -> None:
+ """B5, B10 — background progressive polling shows output accumulation.
+
+ Validates the full 3-phase progressive polling (E44):
+ Phase 1: After item1 done → 1 completed item with text="Hello"
+ Phase 2: After item2 done → 2 completed items with text="Hello" and "World"
+ Phase 3: After completion → 2 items, full content preserved
+
+ Note: The Python server updates the stored record progressively only
+ for bg+stream responses (S-035), so this test uses stream=True.
+ The sync E44 test verifies the final-state-only path for bg non-streaming.
+ """
+ handler = _make_two_item_gated_bg_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ # Wait for first item to be fully emitted (done)
+ await asyncio.wait_for(handler.item1_emitted.wait(), timeout=5.0)
+
+ # Poll: should see 1 completed output item with text "Hello"
+ poll1 = await client.get(f"/responses/{response_id}")
+ assert poll1.status_code == 200
+ doc1 = poll1.json()
+ assert doc1["status"] == "in_progress"
+ output1 = doc1["output"]
+ assert len(output1) == 1
+ assert output1[0]["status"] == "completed"
+ assert output1[0]["content"][0]["text"] == "Hello"
+
+ # Release gate for second item
+ handler.item1_checked.set()
+
+ # Wait for second item
+ await asyncio.wait_for(handler.item2_emitted.wait(), timeout=5.0)
+
+ # Poll: should see 2 completed output items
+ poll2 = await client.get(f"/responses/{response_id}")
+ assert poll2.status_code == 200
+ doc2 = poll2.json()
+ assert doc2["status"] == "in_progress"
+ output2 = doc2["output"]
+ assert len(output2) == 2
+ assert output2[0]["status"] == "completed"
+ assert output2[0]["content"][0]["text"] == "Hello"
+ assert output2[1]["status"] == "completed"
+ assert output2[1]["content"][0]["text"] == "World"
+
+ # Release final gate
+ handler.item2_checked.set()
+
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+
+ # Final poll: completed with 2 items, full content preserved
+ poll_final = await client.get(f"/responses/{response_id}")
+ assert poll_final.status_code == 200
+ doc_final = poll_final.json()
+ assert doc_final["status"] == "completed"
+ output_final = doc_final["output"]
+ assert len(output_final) == 2
+ assert output_final[0]["content"][0]["text"] == "Hello"
+ assert output_final[1]["content"][0]["text"] == "World"
+ finally:
+ await _ensure_task_done(post_task, handler)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_delete_endpoint.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_delete_endpoint.py
new file mode 100644
index 000000000000..aca16fa27b28
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_delete_endpoint.py
@@ -0,0 +1,467 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for DELETE /responses/{response_id} endpoint behavior."""
+
+from __future__ import annotations
+
+import asyncio
+import threading
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from tests._helpers import EventGate, poll_until
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire the hosting surface in contract tests."""
+
+ async def _events():
+ if False: # pragma: no cover - required to keep async-generator shape.
+ yield None
+
+ return _events()
+
+
+def _delayed_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that keeps background execution in-flight for deterministic delete checks."""
+
+ async def _events():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.5)
+ if cancellation_signal.is_set():
+ return
+ if False: # pragma: no cover - required to keep async-generator shape.
+ yield None
+
+ return _events()
+
+
+def _build_client(handler: Any | None = None) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler or _noop_response_handler)
+ return TestClient(app)
+
+
+def _throwing_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Background handler that raises immediately — produces status=failed."""
+
+ async def _events():
+ raise RuntimeError("Simulated handler failure")
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+def _throwing_after_created_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Background handler that emits response.created then raises — produces status=failed.
+
+ Phase 3: by yielding response.created first, the POST returns HTTP 200 instead of 500.
+ """
+
+ async def _events():
+ yield {"type": "response.created", "response": {"status": "in_progress", "output": []}}
+ raise RuntimeError("Simulated handler failure")
+
+ return _events()
+
+
+def _cancellable_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then blocks until cancelled (Phase 3)."""
+
+ async def _events():
+ yield {"type": "response.created", "response": {"status": "in_progress", "output": []}}
+ while not cancellation_signal.is_set():
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+
+def _incomplete_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Background handler that emits an incomplete terminal event."""
+
+ async def _events():
+ yield {"type": "response.created", "response": {"status": "in_progress", "output": []}}
+ yield {"type": "response.incomplete", "response": {"status": "incomplete", "output": []}}
+
+ return _events()
+
+
+def test_delete__deletes_stored_completed_response() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 200
+ payload = delete_response.json()
+ assert payload.get("id") == response_id
+ assert payload.get("object") == "response.deleted"
+ assert payload.get("deleted") is True
+
+
+def test_delete__returns_400_for_background_in_flight_response() -> None:
+ client = _build_client(_cancellable_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 400
+ payload = delete_response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+ assert payload["error"].get("message") == "Cannot delete an in-flight response."
+
+
+def test_delete__returns_404_for_unknown_response_id() -> None:
+ client = _build_client()
+
+ delete_response = client.delete("/responses/resp_does_not_exist")
+ assert delete_response.status_code == 404
+ payload = delete_response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+
+
+def test_delete__returns_404_for_store_false_response() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": False,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 404
+ payload = delete_response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+
+
+def test_delete__get_returns_400_after_deletion() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 200
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 400
+ payload = get_response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+ assert "deleted" in (payload["error"].get("message") or "").lower()
+
+
+def test_delete__cancel_returns_404_after_deletion() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 200
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 404
+ payload = cancel_response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+
+
+def _make_blocking_sync_response_handler(started_gate: EventGate, release_gate: threading.Event):
+ """Factory for a handler that holds a sync request in-flight for concurrent operation tests."""
+
+ def _handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ started_gate.signal(True)
+ while not release_gate.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+ return _handler
+
+
+def test_delete__returns_404_for_non_bg_in_flight_response() -> None:
+ """FR-024 — Non-background in-flight responses are not findable → DELETE 404."""
+ started_gate = EventGate()
+ release_gate = threading.Event()
+ handler = _make_blocking_sync_response_handler(started_gate, release_gate)
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ client = TestClient(app)
+ response_id = IdGenerator.new_response_id()
+
+ create_result: dict[str, Any] = {}
+
+ def _do_create() -> None:
+ try:
+ create_result["response"] = client.post(
+ "/responses",
+ json={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ except Exception as exc: # pragma: no cover
+ create_result["error"] = exc
+
+ t = threading.Thread(target=_do_create, daemon=True)
+ t.start()
+
+ started, _ = started_gate.wait(timeout_s=2.0)
+ assert started, "Expected sync create to enter handler before DELETE"
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 404
+
+ release_gate.set()
+ t.join(timeout=2.0)
+ assert not t.is_alive()
+
+
+# ══════════════════════════════════════════════════════════
+# B6: DELETE on terminal statuses (failed / incomplete / cancelled)
+# ══════════════════════════════════════════════════════════
+
+
+def test_delete__deletes_stored_failed_response() -> None:
+ """B6 — DELETE on a failed (terminal) stored response returns 200 with deleted=True."""
+ client = _build_client(_throwing_after_created_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ ok, failure = poll_until(
+ lambda: client.get(f"/responses/{response_id}").json().get("status") == "failed",
+ timeout_s=5.0,
+ interval_s=0.05,
+ context_provider=lambda: client.get(f"/responses/{response_id}").json().get("status"),
+ label=f"status=failed for {response_id}",
+ )
+ assert ok, failure
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 200
+ payload = delete_response.json()
+ assert payload.get("id") == response_id
+ assert payload.get("object") == "response.deleted"
+ assert payload.get("deleted") is True
+
+
+def test_delete__deletes_stored_incomplete_response() -> None:
+ """B6 — DELETE on an incomplete (terminal) stored response returns 200 with deleted=True."""
+ client = _build_client(_incomplete_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ ok, failure = poll_until(
+ lambda: client.get(f"/responses/{response_id}").json().get("status") == "incomplete",
+ timeout_s=5.0,
+ interval_s=0.05,
+ context_provider=lambda: client.get(f"/responses/{response_id}").json().get("status"),
+ label=f"status=incomplete for {response_id}",
+ )
+ assert ok, failure
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 200
+ payload = delete_response.json()
+ assert payload.get("id") == response_id
+ assert payload.get("object") == "response.deleted"
+ assert payload.get("deleted") is True
+
+
+def test_delete__deletes_stored_cancelled_response() -> None:
+ """B6 — DELETE on a cancelled (terminal) stored response returns 200 with deleted=True."""
+ client = _build_client(_cancellable_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+
+ ok, failure = poll_until(
+ lambda: client.get(f"/responses/{response_id}").json().get("status") == "cancelled",
+ timeout_s=5.0,
+ interval_s=0.05,
+ context_provider=lambda: client.get(f"/responses/{response_id}").json().get("status"),
+ label=f"status=cancelled for {response_id}",
+ )
+ assert ok, failure
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 200
+ payload = delete_response.json()
+ assert payload.get("id") == response_id
+ assert payload.get("object") == "response.deleted"
+ assert payload.get("deleted") is True
+
+
+# ══════════════════════════════════════════════════════════
+# N-5: Second DELETE on already-deleted response → 404
+# ══════════════════════════════════════════════════════════
+
+
+def test_delete__second_delete_returns_404() -> None:
+ """FR-024 — Deletion is permanent; a second DELETE on an already-deleted ID returns 404."""
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ # First DELETE – should succeed
+ first_delete = client.delete(f"/responses/{response_id}")
+ assert first_delete.status_code == 200
+
+ # Second DELETE – response is gone, must return 404
+ second_delete = client.delete(f"/responses/{response_id}")
+ assert second_delete.status_code == 404, (
+ "Second DELETE on an already-deleted response must return 404 (response no longer exists)"
+ )
+ payload = second_delete.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+
+
+def test_delete__deletes_completed_background_response() -> None:
+ """DELETE a completed background response returns 200 with deletion confirmation."""
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ # Wait for background task to complete
+ ok, _ = poll_until(
+ lambda: (
+ client.get(f"/responses/{response_id}").json().get("status")
+ in {
+ "completed",
+ "failed",
+ }
+ ),
+ timeout_s=5.0,
+ interval_s=0.05,
+ label="wait for bg completion",
+ )
+ assert ok, "Background response did not reach terminal state within timeout"
+
+ delete = client.delete(f"/responses/{response_id}")
+ assert delete.status_code == 200
+ payload = delete.json()
+ assert payload["id"] == response_id
+ assert payload["deleted"] is True
+ assert payload.get("object") in {"response.deleted", "response"}, (
+ f"DELETE result must include a recognised object type, got: {payload.get('object')}"
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_get_endpoint.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_get_endpoint.py
new file mode 100644
index 000000000000..cb026d196507
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_get_endpoint.py
@@ -0,0 +1,621 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for GET /responses/{response_id} endpoint behavior."""
+
+from __future__ import annotations
+
+import asyncio
+import json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire the hosting surface in contract tests."""
+
+ async def _events():
+ if False: # pragma: no cover - required to keep async-generator shape.
+ yield None
+
+ return _events()
+
+
+def _build_client() -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ return TestClient(app)
+
+
+def _collect_replay_events(response: Any) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+
+ return events
+
+
+def _create_streaming_and_get_response_id(client: TestClient) -> str:
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as create_response:
+ assert create_response.status_code == 200
+ assert create_response.headers.get("content-type", "").startswith("text/event-stream")
+ events = _collect_replay_events(create_response)
+
+ assert events, "Expected streaming create to emit at least one event"
+ response_id = events[0]["data"]["response"].get("id")
+ assert isinstance(response_id, str)
+ return response_id
+
+
+def _create_background_streaming_and_get_response_id(client: TestClient) -> str:
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ ) as create_response:
+ assert create_response.status_code == 200
+ assert create_response.headers.get("content-type", "").startswith("text/event-stream")
+ events = _collect_replay_events(create_response)
+
+ assert events, "Expected background streaming create to emit at least one event"
+ response_id = events[0]["data"]["response"].get("id")
+ assert isinstance(response_id, str)
+ return response_id
+
+
+def test_get__returns_latest_snapshot_for_existing_response() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ assert payload.get("id") == response_id
+ assert payload.get("response_id") == response_id
+ assert payload.get("object") == "response"
+ assert isinstance(payload.get("agent_reference"), dict)
+ assert payload["agent_reference"].get("type") == "agent_reference"
+ assert payload.get("status") in {"queued", "in_progress", "completed", "failed", "incomplete", "cancelled"}
+ assert payload.get("model") == "gpt-4o-mini"
+ assert "sequence_number" not in payload
+
+
+def test_get__returns_404_for_unknown_response_id() -> None:
+ client = _build_client()
+
+ get_response = client.get("/responses/resp_does_not_exist")
+ assert get_response.status_code == 404
+ payload = get_response.json()
+ assert isinstance(payload.get("error"), dict)
+
+
+def test_get__returns_snapshot_for_stored_non_background_stream_response_after_completion() -> None:
+ client = _build_client()
+
+ response_id = _create_streaming_and_get_response_id(client)
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ assert payload.get("id") == response_id
+ assert payload.get("status") in {"completed", "failed", "incomplete", "cancelled"}
+
+
+def test_get_replay__rejects_request_when_replay_preconditions_are_not_met() -> None:
+ client = _build_client()
+
+ response_id = _create_streaming_and_get_response_id(client)
+
+ replay_response = client.get(f"/responses/{response_id}?stream=true")
+ assert replay_response.status_code == 400
+ payload = replay_response.json()
+ assert isinstance(payload.get("error"), dict)
+ assert payload["error"].get("type") == "invalid_request_error"
+ assert payload["error"].get("param") == "stream"
+
+
+def test_get_replay__rejects_invalid_starting_after_cursor_type() -> None:
+ client = _build_client()
+
+ response_id = _create_background_streaming_and_get_response_id(client)
+
+ replay_response = client.get(f"/responses/{response_id}?stream=true&starting_after=not-an-int")
+ assert replay_response.status_code == 400
+ payload = replay_response.json()
+ assert payload["error"].get("type") == "invalid_request_error"
+ assert payload["error"].get("param") == "starting_after"
+
+
+def test_get_replay__starting_after_returns_events_after_cursor() -> None:
+ client = _build_client()
+
+ response_id = _create_background_streaming_and_get_response_id(client)
+
+ with client.stream("GET", f"/responses/{response_id}?stream=true&starting_after=0") as replay_response:
+ assert replay_response.status_code == 200
+ assert replay_response.headers.get("content-type", "").startswith("text/event-stream")
+ replay_events = _collect_replay_events(replay_response)
+
+ assert replay_events, "Expected replay stream to include events after cursor"
+ sequence_numbers = [event["data"].get("sequence_number") for event in replay_events]
+ assert all(isinstance(sequence_number, int) for sequence_number in sequence_numbers)
+ assert min(sequence_numbers) > 0
+ terminal_events = {
+ "response.completed",
+ "response.failed",
+ "response.incomplete",
+ }
+ assert any(event["type"] in terminal_events for event in replay_events)
+
+
+def test_get_replay__rejects_bg_non_stream_response() -> None:
+ """B2 — SSE replay requires stream=true at creation. background=true, stream=false → 400."""
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ replay_response = client.get(f"/responses/{response_id}?stream=true")
+ assert replay_response.status_code == 400
+ payload = replay_response.json()
+ assert payload["error"]["type"] == "invalid_request_error"
+
+
+# ══════════════════════════════════════════════════════════
+# B5: SSE replay rejection message text
+# ══════════════════════════════════════════════════════════
+
+
+def test_get_replay__rejection_message_hints_at_background_true() -> None:
+ """B5 — SSE replay rejection error message contains 'background=true' hint.
+
+ Clients should know how to fix their request.
+ """
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ replay_response = client.get(f"/responses/{response_id}?stream=true")
+ assert replay_response.status_code == 400
+ payload = replay_response.json()
+ error_message = payload["error"].get("message", "")
+ assert "background=true" in error_message, (
+ f"Error message should hint at 'background=true' to guide the client, but got: {error_message!r}"
+ )
+
+
+# ════════════════════════════════════════════════════════
+# N-6: GET ?stream=true SSE response headers
+# ════════════════════════════════════════════════════════
+
+
+def test_get_replay__sse_response_headers_are_correct() -> None:
+ """SSE headers contract — GET ?stream=true replay must return required SSE response headers."""
+ client = _build_client()
+
+ response_id = _create_background_streaming_and_get_response_id(client)
+
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay_response:
+ assert replay_response.status_code == 200
+ headers = replay_response.headers
+
+ content_type = headers.get("content-type", "")
+ assert "text/event-stream" in content_type, (
+ f"SSE replay Content-Type must be text/event-stream, got: {content_type!r}"
+ )
+ assert headers.get("cache-control") == "no-cache", (
+ f"SSE replay Cache-Control must be no-cache, got: {headers.get('cache-control')!r}"
+ )
+ assert headers.get("connection", "").lower() == "keep-alive", (
+ f"SSE replay Connection must be keep-alive, got: {headers.get('connection')!r}"
+ )
+ assert headers.get("x-accel-buffering") == "no", (
+ f"SSE replay X-Accel-Buffering must be no, got: {headers.get('x-accel-buffering')!r}"
+ )
+
+
+# ══════════════════════════════════════════════════════════
+# Task 4.2 — _finalize_bg_stream / _finalize_non_bg_stream
+# ══════════════════════════════════════════════════════════
+
+
+def test_c2_sync_stream_stored_get_returns_200() -> None:
+ """T1 — store=True, bg=False, stream=True: POST then GET returns HTTP 200.
+
+ _finalize_non_bg_stream must register a ResponseExecution so that the
+ subsequent GET can find the stored non-background stream response.
+ """
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as create_response:
+ assert create_response.status_code == 200
+ events = _collect_replay_events(create_response)
+
+ assert events, "Expected at least one SSE event"
+ response_id = events[0]["data"]["response"].get("id")
+ assert isinstance(response_id, str)
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200, (
+ f"_finalize_non_bg_stream must persist the record so GET returns 200, got {get_response.status_code}"
+ )
+ payload = get_response.json()
+ assert payload.get("status") in {"completed", "failed", "incomplete", "cancelled"}, (
+ f"Non-bg stored stream must be terminal after POST completes, got status={payload.get('status')!r}"
+ )
+
+
+def test_c4_bg_stream_get_sse_replay() -> None:
+ """T2 — store=True, bg=True, stream=True: POST complete, then GET ?stream=true returns SSE replay.
+
+ _finalize_bg_stream must complete the subject so that the subsequent
+ replay GET can iterate the historical events to completion.
+ """
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": True},
+ ) as create_response:
+ assert create_response.status_code == 200
+ create_events = _collect_replay_events(create_response)
+
+ assert create_events, "Expected at least one SSE event from POST"
+ response_id = create_events[0]["data"]["response"].get("id")
+ assert isinstance(response_id, str)
+
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay_response:
+ assert replay_response.status_code == 200, (
+ f"bg+stream GET ?stream=true must return 200, got {replay_response.status_code}"
+ )
+ assert replay_response.headers.get("content-type", "").startswith("text/event-stream")
+ replay_events = _collect_replay_events(replay_response)
+
+ assert replay_events, "Expected at least one event in SSE replay"
+ replay_types = [e["type"] for e in replay_events]
+ terminal_types = {"response.completed", "response.failed", "response.incomplete"}
+ assert any(t in terminal_types for t in replay_types), (
+ f"SSE replay must include a terminal event, got: {replay_types}"
+ )
+ # Replay must start from the beginning (response.created should be present)
+ assert "response.created" in replay_types, f"SSE replay must include response.created, got: {replay_types}"
+
+
+def test_c6_non_stored_stream_no_get() -> None:
+ """T3 — store=False, bg=False, stream=True: GET returns HTTP 404.
+
+ _finalize_non_bg_stream must NOT register the execution record when
+ store=False, so a subsequent GET returns 404 (B16 / C6 contract).
+ """
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": False, "background": False},
+ ) as create_response:
+ assert create_response.status_code == 200
+ create_events = _collect_replay_events(create_response)
+
+ assert create_events, "Expected at least one SSE event from POST"
+ response_id = create_events[0]["data"]["response"].get("id")
+ assert isinstance(response_id, str)
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 404, (
+ f"store=False stream response must not be retrievable via GET (C6), got {get_response.status_code}"
+ )
+
+
+def test_bg_stream_cancelled_subject_completed() -> None:
+ """T4 — bg+stream response cancelled mid-stream: subject.complete() is called, no hang.
+
+ _finalize_bg_stream must call subject.complete() even when the record's
+ status is 'cancelled', so that live replay subscribers can exit cleanly.
+ """
+ from azure.ai.agentserver.responses._id_generator import IdGenerator
+ from tests._helpers import poll_until
+
+ gate_started: list[bool] = []
+
+ def _blocking_bg_stream_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ yield {"type": "response.created", "response": {"status": "in_progress", "output": []}}
+ gate_started.append(True)
+ # Block until cancelled
+ while not cancellation_signal.is_set():
+ import asyncio as _asyncio
+
+ await _asyncio.sleep(0.01)
+
+ return _events()
+
+ import threading
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_blocking_bg_stream_handler) # type: ignore[arg-type] # yields raw dicts to test coercion
+ app = _app
+
+ response_id = IdGenerator.new_response_id()
+ stream_events_received: list[str] = []
+ stream_done = threading.Event()
+
+ def _stream_thread() -> None:
+ from starlette.testclient import TestClient as _TC
+
+ _client = _TC(app)
+ with _client.stream(
+ "POST",
+ "/responses",
+ json={
+ "response_id": response_id,
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ ) as resp:
+ for line in resp.iter_lines():
+ stream_events_received.append(line)
+ stream_done.set()
+
+ t = threading.Thread(target=_stream_thread, daemon=True)
+ t.start()
+
+ # Wait for handler to start
+ ok, _ = poll_until(
+ lambda: bool(gate_started),
+ timeout_s=5.0,
+ interval_s=0.02,
+ label="wait for bg stream handler to start",
+ )
+ assert ok, "Handler did not start within timeout"
+
+ # Cancel the response
+ from starlette.testclient import TestClient as _TC2
+
+ _cancel_client = _TC2(app)
+ cancel_resp = _cancel_client.post(f"/responses/{response_id}/cancel")
+ assert cancel_resp.status_code == 200
+
+ # The SSE stream should terminate (subject.complete() unblocks the iterator)
+ assert stream_done.wait(timeout=5.0), (
+ "_finalize_bg_stream must call subject.complete() so SSE stream terminates after cancel"
+ )
+ t.join(timeout=1.0)
+
+
+# ---------------------------------------------------------------------------
+# Missing protocol parity tests (ported from GetResponseProtocolTests)
+# ---------------------------------------------------------------------------
+
+
+def _cancellable_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that blocks until cancelled — keeps bg response in_progress."""
+
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {"status": "in_progress", "output": []},
+ }
+ while not cancellation_signal.is_set():
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+
+def test_get__in_progress_bg_response_returns_200() -> None:
+ """GET on a background response that is still in_progress returns 200 with status in_progress."""
+ app = ResponsesAgentServerHost()
+ app.create_handler(_cancellable_bg_handler) # type: ignore[arg-type] # yields raw dicts to test coercion
+ client = TestClient(app)
+
+ create = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "stream": False, "store": True, "background": True},
+ )
+ assert create.status_code == 200
+ response_id = create.json()["id"]
+
+ get = client.get(f"/responses/{response_id}")
+ assert get.status_code == 200
+ assert get.json()["status"] == "in_progress"
+
+ # Clean up
+ client.post(f"/responses/{response_id}/cancel")
+
+
+def test_get__cancelled_bg_returns_200_with_cancelled_status() -> None:
+ """GET on a cancelled background response returns 200 with status=cancelled and empty output."""
+ app = ResponsesAgentServerHost()
+ app.create_handler(_cancellable_bg_handler) # type: ignore[arg-type] # yields raw dicts to test coercion
+ client = TestClient(app)
+
+ create = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "stream": False, "store": True, "background": True},
+ )
+ assert create.status_code == 200
+ response_id = create.json()["id"]
+
+ cancel = client.post(f"/responses/{response_id}/cancel")
+ assert cancel.status_code == 200
+
+ get = client.get(f"/responses/{response_id}")
+ assert get.status_code == 200
+ payload = get.json()
+ assert payload["status"] == "cancelled"
+ assert payload.get("output", []) == [], "Cancelled response must have 0 output items"
+
+
+def test_get__sse_replay_starting_after_max_returns_no_events() -> None:
+ """SSE replay with starting_after >= max sequence number returns an empty event stream."""
+ client = _build_client()
+ response_id = _create_background_streaming_and_get_response_id(client)
+
+ # starting_after=9999 — way beyond any sequence in a simple handler
+ with client.stream(
+ "GET",
+ f"/responses/{response_id}?stream=true&starting_after=9999",
+ ) as replay:
+ assert replay.status_code == 200
+ events = _collect_replay_events(replay)
+
+ assert events == [], "Replay with starting_after >= max seq must return 0 events"
+
+
+def test_get__sse_replay_store_false_returns_404() -> None:
+ """SSE replay on a store=false response returns 404."""
+ client = _build_client()
+
+ create = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "stream": False, "store": False},
+ )
+ assert create.status_code == 200
+ response_id = create.json()["id"]
+
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay:
+ assert replay.status_code == 404
+
+
+def test_get__stream_false_returns_json_snapshot() -> None:
+ """Explicit ?stream=false returns a JSON snapshot, not SSE."""
+ client = _build_client()
+ response_id = _create_background_streaming_and_get_response_id(client)
+
+ get = client.get(f"/responses/{response_id}?stream=false")
+ assert get.status_code == 200
+ assert get.headers.get("content-type", "").startswith("application/json")
+ assert get.json()["id"] == response_id
+
+
+def test_get__sse_replay_has_correct_sequence_numbers() -> None:
+ """SSE replay produces monotonically increasing sequence numbers starting from 0."""
+ client = _build_client()
+ response_id = _create_background_streaming_and_get_response_id(client)
+
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay:
+ assert replay.status_code == 200
+ events = _collect_replay_events(replay)
+
+ assert len(events) >= 2, "Expected at least 2 replayed SSE events"
+ seq_nums = [e["data"].get("sequence_number") for e in events]
+ assert seq_nums[0] == 0, "First sequence_number must be 0"
+ for i in range(1, len(seq_nums)):
+ assert seq_nums[i] > seq_nums[i - 1], (
+ f"Sequence numbers not monotonically increasing at index {i}: {seq_nums[i - 1]} → {seq_nums[i]}"
+ )
+
+
+def test_get__accept_sse_without_stream_true_returns_json_snapshot() -> None:
+ """Accept: text/event-stream WITHOUT ?stream=true returns JSON snapshot — Accept header is NOT a trigger for SSE.
+
+ Ported from GetResponseProtocolTests.GET_WithAcceptSse_WithoutStreamTrue_Returns200_JsonSnapshot.
+ """
+ client = _build_client()
+ response_id = _create_background_streaming_and_get_response_id(client)
+
+ get = client.get(
+ f"/responses/{response_id}",
+ headers={"Accept": "text/event-stream"},
+ )
+ assert get.status_code == 200
+ content_type = get.headers.get("content-type", "")
+ assert content_type.startswith("application/json"), (
+ f"Expected application/json when Accept: text/event-stream but no ?stream=true, got {content_type!r}"
+ )
+ assert get.json()["id"] == response_id
+
+
+def test_get__store_false_returns_404() -> None:
+ """GET on a store=false response returns 404.
+
+ Ported from GetResponseProtocolTests.GET_StoreFalse_Returns404.
+ """
+ client = _build_client()
+
+ create = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "stream": False, "store": False},
+ )
+ assert create.status_code == 200
+ response_id = create.json()["id"]
+
+ get = client.get(f"/responses/{response_id}")
+ assert get.status_code == 404
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_handler_driven_persistence.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_handler_driven_persistence.py
new file mode 100644
index 000000000000..3760a2681e4f
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_handler_driven_persistence.py
@@ -0,0 +1,437 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for handler-driven persistence (US1).
+
+Verifies FR-001 (no persistence before handler runs),
+FR-002 (bg=true: Create at response.created, Update at terminal),
+FR-003 (bg=false: single Create at terminal state).
+
+Python port of HandlerDrivenPersistenceTests.
+
+NOTE: The reference tests use a RecordingProvider (spy) to verify exactly when
+CreateResponseAsync and UpdateResponseAsync are called. The Python SDK uses
+the in-memory FoundryStorageProvider (default) which we probe via GET to
+confirm persistence timing.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json as _json
+from typing import Any
+
+import pytest
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Async ASGI client
+# ════════════════════════════════════════════════════════════
+
+
+class _AsgiResponse:
+ def __init__(self, status_code: int, body: bytes, headers: list[tuple[bytes, bytes]]) -> None:
+ self.status_code = status_code
+ self.body = body
+ self.headers = headers
+
+ def json(self) -> Any:
+ return _json.loads(self.body)
+
+
+class _AsyncAsgiClient:
+ def __init__(self, app: Any) -> None:
+ self._app = app
+
+ @staticmethod
+ def _build_scope(method: str, path: str, body: bytes) -> dict[str, Any]:
+ headers: list[tuple[bytes, bytes]] = []
+ query_string = b""
+ if "?" in path:
+ path, qs = path.split("?", 1)
+ query_string = qs.encode()
+ if body:
+ headers = [
+ (b"content-type", b"application/json"),
+ (b"content-length", str(len(body)).encode()),
+ ]
+ return {
+ "type": "http",
+ "asgi": {"version": "3.0"},
+ "http_version": "1.1",
+ "method": method,
+ "headers": headers,
+ "scheme": "http",
+ "path": path,
+ "raw_path": path.encode(),
+ "query_string": query_string,
+ "server": ("localhost", 80),
+ "client": ("127.0.0.1", 123),
+ "root_path": "",
+ }
+
+ async def request(self, method: str, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ body = _json.dumps(json_body).encode() if json_body else b""
+ scope = self._build_scope(method, path, body)
+ status_code: int | None = None
+ response_headers: list[tuple[bytes, bytes]] = []
+ body_parts: list[bytes] = []
+ request_sent = False
+ response_done = asyncio.Event()
+
+ async def receive() -> dict[str, Any]:
+ nonlocal request_sent
+ if not request_sent:
+ request_sent = True
+ return {"type": "http.request", "body": body, "more_body": False}
+ await response_done.wait()
+ return {"type": "http.disconnect"}
+
+ async def send(message: dict[str, Any]) -> None:
+ nonlocal status_code, response_headers
+ if message["type"] == "http.response.start":
+ status_code = message["status"]
+ response_headers = message.get("headers", [])
+ elif message["type"] == "http.response.body":
+ chunk = message.get("body", b"")
+ if chunk:
+ body_parts.append(chunk)
+ if not message.get("more_body", False):
+ response_done.set()
+
+ await self._app(scope, receive, send)
+ assert status_code is not None
+ return _AsgiResponse(status_code=status_code, body=b"".join(body_parts), headers=response_headers)
+
+ async def get(self, path: str) -> _AsgiResponse:
+ return await self.request("GET", path)
+
+ async def post(self, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ return await self.request("POST", path, json_body=json_body)
+
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _build_client(handler: Any) -> _AsyncAsgiClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return _AsyncAsgiClient(app)
+
+
+async def _ensure_task_done(task: asyncio.Task[Any], handler: Any, timeout: float = 5.0) -> None:
+ for attr in vars(handler):
+ obj = getattr(handler, attr, None)
+ if isinstance(obj, asyncio.Event):
+ obj.set()
+ if not task.done():
+ try:
+ await asyncio.wait_for(task, timeout=timeout)
+ except (asyncio.TimeoutError, Exception):
+ task.cancel()
+ try:
+ await task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+
+async def _wait_for_background_completion(client: _AsyncAsgiClient, response_id: str, timeout: float = 5.0) -> None:
+ for _ in range(int(timeout / 0.05)):
+ resp = await client.get(f"/responses/{response_id}")
+ if resp.status_code == 404:
+ await asyncio.sleep(0.05)
+ continue
+ doc = resp.json()
+ if doc.get("status") in ("completed", "failed", "incomplete", "cancelled"):
+ return
+ await asyncio.sleep(0.05)
+ raise TimeoutError(f"Response {response_id} did not reach terminal state within {timeout}s")
+
+
+def _make_delaying_handler():
+ """Handler that signals when started, then waits for a gate before yielding any events.
+
+ Used to test FR-001: no persistence before handler runs.
+ """
+ started = asyncio.Event()
+ gate = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ started.set()
+ await gate.wait()
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.started = started
+ handler.gate = gate
+ return handler
+
+
+def _make_simple_handler():
+ """Handler that emits created + completed immediately."""
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+# ════════════════════════════════════════════════════════════
+# T015: bg+stream — provider NOT called until response.created
+#
+# FR-001: No persistence before handler emits response.created.
+# Verifies that GET returns 404 before response.created is emitted.
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_bg_stream_not_persisted_until_response_created() -> None:
+ """T015/FR-001 — bg+stream: response not accessible before response.created."""
+ handler = _make_delaying_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ "stream": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+ await asyncio.sleep(0.1)
+
+ # GET before response.created — should NOT be accessible yet
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 404, (
+ f"FR-001: response should not be persisted before response.created, got status {get_resp.status_code}"
+ )
+
+ # Release handler → response.created will be yielded
+ handler.gate.set()
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # After handler completes, response should be accessible
+ await _wait_for_background_completion(client, response_id)
+ get_after = await client.get(f"/responses/{response_id}")
+ assert get_after.status_code == 200
+
+
+# ════════════════════════════════════════════════════════════
+# T016: bg+nostream — provider NOT called until response.created
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_bg_nostream_not_persisted_until_response_created() -> None:
+ """T016/FR-001 — bg+nostream: response not accessible before response.created."""
+ handler = _make_delaying_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(handler.started.wait(), timeout=5.0)
+ await asyncio.sleep(0.1)
+
+ # GET before response.created — should NOT be accessible
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 404, (
+ f"FR-001: response should not be persisted before response.created, got status {get_resp.status_code}"
+ )
+
+ # Release handler
+ handler.gate.set()
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # After handler completes, response should be accessible
+ await _wait_for_background_completion(client, response_id)
+ get_after = await client.get(f"/responses/{response_id}")
+ assert get_after.status_code == 200
+
+
+# ════════════════════════════════════════════════════════════
+# T017: bg=true — exactly 1 Create + 1 Update
+#
+# FR-002: bg mode persists Create at response.created, Update at terminal.
+# We verify via GET that the response is accessible during in-progress
+# and that after completion the status is updated.
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_bg_mode_response_accessible_during_and_after_handler() -> None:
+ """T017/FR-002 — bg mode: response accessible at in_progress and completed."""
+ started = asyncio.Event()
+ release = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ started.set()
+ while not release.is_set():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.01)
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.started = started
+ handler.release = release
+
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(started.wait(), timeout=5.0)
+
+ # During handler execution — response should be accessible as in_progress
+ get_mid = await client.get(f"/responses/{response_id}")
+ assert get_mid.status_code == 200
+ assert get_mid.json()["status"] == "in_progress"
+
+ # Release handler
+ release.set()
+ await asyncio.wait_for(post_task, timeout=5.0)
+ finally:
+ for attr in ("started", "release"):
+ obj = getattr(handler, attr, None)
+ if isinstance(obj, asyncio.Event):
+ obj.set()
+ if not post_task.done():
+ post_task.cancel()
+ try:
+ await post_task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+ # After completion — response updated to completed
+ await _wait_for_background_completion(client, response_id)
+ get_final = await client.get(f"/responses/{response_id}")
+ assert get_final.status_code == 200
+ assert get_final.json()["status"] == "completed"
+
+
+# ════════════════════════════════════════════════════════════
+# T018: bg=false — single Create at terminal (no mid-flight GET)
+#
+# FR-003: non-bg mode does a single Create at terminal. Not accessible mid-flight.
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_non_bg_not_accessible_until_terminal() -> None:
+ """T018/FR-003 — non-bg: response only accessible after terminal state."""
+ started = asyncio.Event()
+ release = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ started.set()
+ await release.wait()
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.started = started
+ handler.release = release
+
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "stream": False,
+ "background": False,
+ },
+ )
+ )
+ try:
+ await asyncio.wait_for(started.wait(), timeout=5.0)
+
+ # During non-bg handler execution — response should NOT be accessible
+ get_mid = await client.get(f"/responses/{response_id}")
+ assert get_mid.status_code == 404, (
+ f"FR-003: non-bg response should not be accessible mid-flight, got {get_mid.status_code}"
+ )
+
+ release.set()
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+ finally:
+ started.set()
+ release.set()
+ if not post_task.done():
+ post_task.cancel()
+ try:
+ await post_task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+ # After terminal — response is accessible as completed
+ get_after = await client.get(f"/responses/{response_id}")
+ assert get_after.status_code == 200
+ assert get_after.json()["status"] == "completed"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_input_items_endpoint.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_input_items_endpoint.py
new file mode 100644
index 000000000000..60c127365a38
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_input_items_endpoint.py
@@ -0,0 +1,431 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for GET /responses/{response_id}/input_items behavior."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire the hosting surface in contract tests."""
+
+ async def _events():
+ if False: # pragma: no cover - required to keep async-generator shape.
+ yield None
+
+ return _events()
+
+
+def _build_client() -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ return TestClient(app)
+
+
+def _message_input(item_id: str, text: str) -> dict[str, Any]:
+ return {
+ "id": item_id,
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": text}],
+ }
+
+
+def _create_response(
+ client: TestClient,
+ *,
+ input_items: list[dict[str, Any]] | None,
+ store: bool = True,
+ background: bool = False,
+ previous_response_id: str | None = None,
+) -> str:
+ payload: dict[str, Any] = {
+ "model": "gpt-4o-mini",
+ "stream": False,
+ "store": store,
+ "background": background,
+ "input": input_items if input_items is not None else [],
+ }
+ if previous_response_id is not None:
+ payload["previous_response_id"] = previous_response_id
+
+ create_response = client.post("/responses", json=payload)
+ assert create_response.status_code == 200
+ response_id = create_response.json().get("id")
+ assert isinstance(response_id, str)
+ return response_id
+
+
+def _assert_error_envelope(response: Any, expected_status: int) -> dict[str, Any]:
+ assert response.status_code == expected_status
+ try:
+ payload = response.json()
+ except Exception as exc: # pragma: no cover - defensive diagnostics for routing regressions.
+ raise AssertionError(
+ f"Expected JSON error envelope with status {expected_status}, got non-JSON body: {response.text!r}"
+ ) from exc
+ assert isinstance(payload.get("error"), dict)
+ assert "message" in payload["error"]
+ assert "type" in payload["error"]
+ assert "param" in payload["error"]
+ assert "code" in payload["error"]
+ return payload
+
+
+def test_input_items_returns_200_with_items_and_paged_fields() -> None:
+ client = _build_client()
+
+ response_id = _create_response(
+ client,
+ input_items=[
+ _message_input("msg_001", "one"),
+ _message_input("msg_002", "two"),
+ _message_input("msg_003", "three"),
+ ],
+ )
+
+ response = client.get(f"/responses/{response_id}/input_items")
+ assert response.status_code == 200
+ payload = response.json()
+
+ assert payload.get("object") == "list"
+ assert isinstance(payload.get("data"), list)
+ assert len(payload["data"]) == 3
+ # Items are converted to OutputItem with generated IDs (msg_ prefix)
+ for item in payload["data"]:
+ assert item.get("id", "").startswith("msg_")
+ assert item.get("type") == "message"
+ assert payload.get("first_id") is not None
+ assert payload.get("last_id") is not None
+ assert payload.get("has_more") is False
+
+
+def test_input_items_returns_200_with_empty_data() -> None:
+ client = _build_client()
+
+ response_id = _create_response(client, input_items=[])
+
+ response = client.get(f"/responses/{response_id}/input_items")
+ assert response.status_code == 200
+ payload = response.json()
+
+ assert payload.get("object") == "list"
+ assert payload.get("data") == []
+ assert payload.get("has_more") is False
+
+
+def test_input_items_returns_400_for_invalid_limit() -> None:
+ client = _build_client()
+
+ response_id = _create_response(client, input_items=[_message_input("msg_001", "one")])
+
+ low_limit = client.get(f"/responses/{response_id}/input_items?limit=0")
+ low_payload = _assert_error_envelope(low_limit, 400)
+ assert low_payload["error"].get("type") == "invalid_request_error"
+
+ high_limit = client.get(f"/responses/{response_id}/input_items?limit=101")
+ high_payload = _assert_error_envelope(high_limit, 400)
+ assert high_payload["error"].get("type") == "invalid_request_error"
+
+
+def test_input_items_returns_400_for_invalid_order() -> None:
+ client = _build_client()
+
+ response_id = _create_response(client, input_items=[_message_input("msg_001", "one")])
+
+ response = client.get(f"/responses/{response_id}/input_items?order=invalid")
+ payload = _assert_error_envelope(response, 400)
+ assert payload["error"].get("type") == "invalid_request_error"
+
+
+def test_input_items_returns_400_for_deleted_response() -> None:
+ client = _build_client()
+
+ response_id = _create_response(client, input_items=[_message_input("msg_001", "one")])
+
+ delete_response = client.delete(f"/responses/{response_id}")
+ assert delete_response.status_code == 200
+
+ response = client.get(f"/responses/{response_id}/input_items")
+ payload = _assert_error_envelope(response, 400)
+ assert payload["error"].get("type") == "invalid_request_error"
+ assert "deleted" in (payload["error"].get("message") or "").lower()
+
+
+def test_input_items_returns_404_for_missing_or_non_stored_response() -> None:
+ client = _build_client()
+
+ missing_response = client.get("/responses/resp_does_not_exist/input_items")
+ missing_payload = _assert_error_envelope(missing_response, 404)
+ assert missing_payload["error"].get("type") == "invalid_request_error"
+
+ non_stored_id = _create_response(
+ client,
+ input_items=[_message_input("msg_001", "one")],
+ store=False,
+ )
+ non_stored_response = client.get(f"/responses/{non_stored_id}/input_items")
+ non_stored_payload = _assert_error_envelope(non_stored_response, 404)
+ assert non_stored_payload["error"].get("type") == "invalid_request_error"
+
+
+def test_input_items_default_limit_is_20_and_has_more_when_truncated() -> None:
+ client = _build_client()
+
+ input_items = [_message_input(f"msg_{index:03d}", f"item-{index:03d}") for index in range(1, 26)]
+ response_id = _create_response(client, input_items=input_items)
+
+ response = client.get(f"/responses/{response_id}/input_items")
+ assert response.status_code == 200
+ payload = response.json()
+
+ assert payload.get("object") == "list"
+ assert isinstance(payload.get("data"), list)
+ assert len(payload["data"]) == 20
+ assert payload.get("has_more") is True
+ # first_id and last_id should be valid generated msg_ IDs
+ assert payload.get("first_id", "").startswith("msg_")
+ assert payload.get("last_id", "").startswith("msg_")
+
+
+def test_input_items_supports_order_and_cursor_pagination() -> None:
+ client = _build_client()
+
+ response_id = _create_response(
+ client,
+ input_items=[
+ _message_input("msg_001", "one"),
+ _message_input("msg_002", "two"),
+ _message_input("msg_003", "three"),
+ _message_input("msg_004", "four"),
+ ],
+ )
+
+ # Ascending order, limit=2
+ asc_response = client.get(f"/responses/{response_id}/input_items?order=asc&limit=2")
+ assert asc_response.status_code == 200
+ asc_payload = asc_response.json()
+ assert len(asc_payload.get("data", [])) == 2
+ assert asc_payload.get("has_more") is True
+ first_id = asc_payload["data"][0].get("id")
+ second_id = asc_payload["data"][1].get("id")
+
+ # Cursor-based: after second item should return items 3 & 4
+ after_response = client.get(f"/responses/{response_id}/input_items?order=asc&after={second_id}")
+ assert after_response.status_code == 200
+ after_payload = after_response.json()
+ assert len(after_payload.get("data", [])) == 2
+
+ # Cursor-based: before last of all items should return first 3
+ all_asc = client.get(f"/responses/{response_id}/input_items?order=asc")
+ all_ids = [item.get("id") for item in all_asc.json().get("data", [])]
+ before_response = client.get(f"/responses/{response_id}/input_items?order=asc&before={all_ids[-1]}")
+ assert before_response.status_code == 200
+ before_payload = before_response.json()
+ assert len(before_payload.get("data", [])) == 3
+
+
+def test_input_items_returns_history_plus_current_input_in_desc_order() -> None:
+ client = _build_client()
+
+ first_response_id = _create_response(
+ client,
+ input_items=[
+ _message_input("msg_hist_001", "history-1"),
+ _message_input("msg_hist_002", "history-2"),
+ ],
+ )
+
+ second_response_id = _create_response(
+ client,
+ input_items=[_message_input("msg_curr_001", "current-1")],
+ previous_response_id=first_response_id,
+ )
+
+ response = client.get(f"/responses/{second_response_id}/input_items?order=desc")
+ assert response.status_code == 200
+ payload = response.json()
+
+ # Should have 3 items: 2 from history + 1 current.
+ # Items have generated IDs; verify count and structure.
+ assert len(payload.get("data", [])) == 3
+ for item in payload["data"]:
+ assert item.get("id", "").startswith("msg_")
+ assert item.get("type") == "message"
+ assert payload.get("has_more") is False
+
+
+# ---------------------------------------------------------------------------
+# Task 6.1 — input_items sourced from parsed model
+# ---------------------------------------------------------------------------
+
+
+def test_input_items_string_input_treated_as_message() -> None:
+ """T1: string input is expanded to an ItemMessage and converted to OutputItemMessage."""
+ client = _build_client()
+
+ # Send a create request where 'input' is a plain string, not a list.
+ create_response = client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "stream": False, "store": True, "input": "hello"},
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json().get("id")
+ assert isinstance(response_id, str)
+
+ response = client.get(f"/responses/{response_id}/input_items")
+ assert response.status_code == 200
+ payload = response.json()
+ # String input is expanded to a single ItemMessage → OutputItemMessage
+ assert len(payload.get("data", [])) == 1
+ assert payload["data"][0].get("type") == "message"
+ assert payload["data"][0].get("role") == "user"
+
+
+def test_input_items_list_input_preserved() -> None:
+ """T2: list input items are preserved and retrievable via GET /input_items."""
+ client = _build_client()
+
+ item = {"id": "msg_x01", "type": "message", "role": "user", "content": [{"type": "input_text", "text": "hi"}]}
+ response_id = _create_response(client, input_items=[item])
+
+ response = client.get(f"/responses/{response_id}/input_items?order=asc")
+ assert response.status_code == 200
+ payload = response.json()
+ assert len(payload.get("data", [])) == 1
+ # ID is generated (not preserved from input), but type and content are
+ assert payload["data"][0].get("id", "").startswith("msg_")
+ assert payload["data"][0].get("type") == "message"
+ assert payload["data"][0].get("role") == "user"
+
+
+def test_previous_response_id_propagated() -> None:
+ """T3: previous_response_id is propagated so input_items chain walk works."""
+ client = _build_client()
+
+ parent_id = _create_response(
+ client,
+ input_items=[_message_input("msg_parent_001", "parent-item")],
+ )
+ child_id = _create_response(
+ client,
+ input_items=[_message_input("msg_child_001", "child-item")],
+ previous_response_id=parent_id,
+ )
+
+ response = client.get(f"/responses/{child_id}/input_items?order=asc")
+ assert response.status_code == 200
+ payload = response.json()
+ # Both parent and child items appear (2 total), all with generated msg_ IDs
+ assert len(payload.get("data", [])) == 2
+ for item in payload["data"]:
+ assert item.get("id", "").startswith("msg_")
+ assert item.get("type") == "message"
+
+
+def test_empty_previous_response_id_handled() -> None:
+ """T4: an empty string for previous_response_id should not raise; treated as absent."""
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "stream": False,
+ "store": True,
+ "input": [],
+ "previous_response_id": "",
+ },
+ )
+ # The server should accept the request (empty string treated as absent).
+ assert create_response.status_code == 200
+ response_id = create_response.json().get("id")
+ assert isinstance(response_id, str)
+
+ response = client.get(f"/responses/{response_id}/input_items")
+ assert response.status_code == 200
+
+
+# ---------------------------------------------------------------------------
+# Task 6.2 — provider/runtime_state branch alignment + pagination edge cases
+# ---------------------------------------------------------------------------
+
+
+def test_input_items_in_flight_fallback_to_runtime() -> None:
+ """T3: background response stores input_items that are retrievable via GET.
+
+ Uses a fast handler so the TestClient doesn't block. After the background
+ response completes, input items are available from either the provider or
+ the runtime_state fallback.
+ """
+ from typing import Any as _Any
+
+ def _fast_handler(request: _Any, context: _Any, cancellation_signal: _Any):
+ async def _events():
+ yield {"type": "response.created", "response": {"status": "in_progress", "output": []}}
+
+ return _events()
+
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_fast_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ item = _message_input("inflight_msg_001", "in-flight-content")
+ payload: Any = {
+ "model": "gpt-4o-mini",
+ "stream": False,
+ "store": True,
+ "background": True,
+ "input": [item],
+ }
+ create_response = client.post("/responses", json=payload)
+ assert create_response.status_code == 200
+ response_id = create_response.json().get("id")
+ assert isinstance(response_id, str)
+
+ # GET /input_items — items are returned from either runtime_state or provider
+ items_response = client.get(f"/responses/{response_id}/input_items")
+ assert items_response.status_code == 200
+ items_payload = items_response.json()
+ assert items_payload.get("object") == "list"
+ assert len(items_payload.get("data", [])) == 1
+ assert items_payload["data"][0].get("id", "").startswith("msg_")
+ assert items_payload["data"][0].get("type") == "message"
+
+
+def test_input_items_limit_boundary_1() -> None:
+ """T4: limit=1 returns exactly one item."""
+ client = _build_client()
+
+ response_id = _create_response(
+ client,
+ input_items=[
+ _message_input("msg_a", "a"),
+ _message_input("msg_b", "b"),
+ ],
+ )
+
+ response = client.get(f"/responses/{response_id}/input_items?limit=1")
+ assert response.status_code == 200
+ payload = response.json()
+ assert len(payload.get("data", [])) == 1
+ assert payload.get("has_more") is True
+
+
+def test_input_items_limit_boundary_100() -> None:
+ """T5: limit=100 returns at most 100 items."""
+ client = _build_client()
+
+ input_items = [_message_input(f"msg_{i:03d}", f"item-{i}") for i in range(1, 51)]
+ response_id = _create_response(client, input_items=input_items)
+
+ response = client.get(f"/responses/{response_id}/input_items?order=asc&limit=100")
+ assert response.status_code == 200
+ payload = response.json()
+ assert len(payload.get("data", [])) == 50
+ assert payload.get("has_more") is False
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_keep_alive.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_keep_alive.py
new file mode 100644
index 000000000000..19ce47f2c0e6
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_keep_alive.py
@@ -0,0 +1,212 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for SSE keep-alive comment frames during streaming."""
+
+from __future__ import annotations
+
+import asyncio
+import json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._options import ResponsesServerOptions
+
+
+def _make_slow_handler(delay_seconds: float = 0.5, event_count: int = 2):
+ """Factory for a handler that yields events with a configurable delay between them."""
+
+ def _handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ for i in range(event_count):
+ if i > 0:
+ await asyncio.sleep(delay_seconds)
+ yield {
+ "type": "response.created" if i == 0 else "response.completed",
+ "response": {
+ "status": "in_progress" if i == 0 else "completed",
+ },
+ }
+
+ return _events()
+
+ return _handler
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler producing an empty stream."""
+
+ async def _events():
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+
+def _build_client(
+ handler: Any | None = None,
+ *,
+ keep_alive_seconds: int | None = None,
+) -> TestClient:
+ options = ResponsesServerOptions(sse_keep_alive_interval_seconds=keep_alive_seconds)
+ app = ResponsesAgentServerHost(options=options)
+ app.create_handler(handler or _noop_handler)
+ return TestClient(app)
+
+
+def _parse_raw_lines(response: Any) -> list[str]:
+ """Collect all raw lines (including SSE comments) from a streaming response."""
+ return list(response.iter_lines())
+
+
+def _collect_events_and_comments(response: Any) -> tuple[list[dict[str, Any]], list[str]]:
+ """Parse SSE stream into (events, comments).
+
+ Events are objects with ``type`` and ``data`` keys.
+ Comments are raw lines starting with ``:``.
+ """
+ events: list[dict[str, Any]] = []
+ comments: list[str] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ parsed: dict[str, Any] = {}
+ if current_data:
+ parsed = json.loads(current_data)
+ events.append({"type": current_type, "data": parsed})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith(":"):
+ comments.append(line)
+ elif line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ parsed = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": parsed})
+
+ return events, comments
+
+
+def _stream_post(client: TestClient, **extra_json: Any) -> Any:
+ """Issue a streaming POST /responses and return the streaming context manager."""
+ payload = {
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ **extra_json,
+ }
+ return client.stream("POST", "/responses", json=payload)
+
+
+# -- Tests: keep-alive disabled (default) -----------------------------------
+
+
+def test_keep_alive__disabled_by_default_no_comment_frames() -> None:
+ """When keep-alive is not configured, no SSE comment frames should appear."""
+ handler = _make_slow_handler(delay_seconds=0.3, event_count=2)
+ client = _build_client(handler)
+
+ with _stream_post(client) as response:
+ assert response.status_code == 200
+ events, comments = _collect_events_and_comments(response)
+
+ assert len(events) >= 1
+ assert len(comments) == 0, f"Expected no keep-alive comments, got: {comments}"
+
+
+# -- Tests: keep-alive enabled -----------------------------------------------
+
+
+def test_keep_alive__enabled_interleaves_comment_frames_during_slow_handler() -> None:
+ """When keep-alive is enabled with a short interval, SSE comment frames
+ should appear between handler events when the handler is slow."""
+ # Handler delays 1.5s between events; keep-alive fires every 0.2s
+ handler = _make_slow_handler(delay_seconds=1.5, event_count=2)
+ client = _build_client(handler, keep_alive_seconds=1)
+
+ with _stream_post(client) as response:
+ assert response.status_code == 200
+ events, comments = _collect_events_and_comments(response)
+
+ # At least one keep-alive comment should have been sent during the 1.5s gap
+ assert len(comments) >= 1, (
+ f"Expected at least one keep-alive comment, got {len(comments)}. Events: {[e['type'] for e in events]}"
+ )
+ # All comments should be the standard keep-alive format
+ for comment in comments:
+ assert comment == ": keep-alive"
+
+
+def test_keep_alive__comment_format_is_sse_compliant() -> None:
+ """Keep-alive frames must be valid SSE comments (colon-prefixed)."""
+ handler = _make_slow_handler(delay_seconds=1.5, event_count=2)
+ client = _build_client(handler, keep_alive_seconds=1)
+
+ with _stream_post(client) as response:
+ assert response.status_code == 200
+ raw_lines = _parse_raw_lines(response)
+
+ keep_alive_lines = [line for line in raw_lines if line.startswith(": keep-alive")]
+ assert len(keep_alive_lines) >= 1
+ for line in keep_alive_lines:
+ # SSE comments start with colon; must not contain "event:" or "data:"
+ assert line.startswith(":")
+ assert "event:" not in line
+ assert "data:" not in line
+
+
+def test_keep_alive__does_not_disrupt_event_stream_integrity() -> None:
+ """Even with keep-alive enabled, all handler events should be present
+ with correct types, ordering, and monotonic sequence numbers."""
+ handler = _make_slow_handler(delay_seconds=1.5, event_count=2)
+ client = _build_client(handler, keep_alive_seconds=1)
+
+ with _stream_post(client) as response:
+ assert response.status_code == 200
+ events, comments = _collect_events_and_comments(response)
+
+ event_types = [e["type"] for e in events]
+ assert "response.created" in event_types
+ # Sequence numbers should still be monotonically increasing
+ seq_nums = [e["data"].get("sequence_number") for e in events if "sequence_number" in e["data"]]
+ assert seq_nums == sorted(seq_nums)
+
+
+def test_keep_alive__no_comments_after_stream_ends() -> None:
+ """After the handler finishes, no trailing keep-alive comments should appear."""
+ handler = _make_slow_handler(delay_seconds=0.0, event_count=2)
+ client = _build_client(handler, keep_alive_seconds=1)
+
+ with _stream_post(client) as response:
+ assert response.status_code == 200
+ events, comments = _collect_events_and_comments(response)
+
+ # Handler is fast (0s delay), so no keep-alive should be needed
+ # (the stream finishes before the 1s interval fires)
+ assert len(events) >= 1
+ # No comments expected since the handler is faster than the keep-alive interval
+ assert len(comments) == 0
+
+
+def test_keep_alive__fallback_stream_does_not_include_keep_alive() -> None:
+ """When the handler yields no events (empty generator → fallback stream),
+ keep-alive should not appear since the fallback stream is immediate."""
+ client = _build_client(_noop_handler, keep_alive_seconds=1)
+
+ with _stream_post(client) as response:
+ assert response.status_code == 200
+ events, comments = _collect_events_and_comments(response)
+
+ assert len(events) >= 1 # fallback auto-generates lifecycle events
+ assert len(comments) == 0
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_output_manipulation_detection.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_output_manipulation_detection.py
new file mode 100644
index 000000000000..68ea43f751b7
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_output_manipulation_detection.py
@@ -0,0 +1,123 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for detecting direct output manipulation (FR-008a).
+
+Validates that when a handler directly adds/removes items from
+ResponseObject.Output without emitting corresponding output_item events,
+the SDK detects the inconsistency and fails with a server error.
+
+Python port of OutputManipulationDetectionTests.
+"""
+
+from __future__ import annotations
+
+import json as _json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ return events
+
+
+def _output_manipulation_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that directly manipulates Output without emitting output_item events.
+
+ This violates FR-008a — the SDK should detect this and fail.
+ """
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ # Directly manipulate the response output list without using builder events
+ # This is an FR-008a violation
+ stream.response.output.append(
+ {
+ "id": "fake-item-id",
+ "type": "message",
+ "role": "assistant",
+ "status": "completed",
+ "content": [],
+ }
+ )
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _build_client(handler: Any) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return TestClient(app)
+
+
+# ════════════════════════════════════════════════════════════
+# T027: Direct Output manipulation → bad handler error (non-streaming)
+# ════════════════════════════════════════════════════════════
+
+
+def test_direct_output_add_without_builder_events_returns_bad_handler_error() -> None:
+ """FR-008a — direct output manipulation detected → response fails with server_error.
+
+ The handler directly adds an item to response.output without emitting
+ output_item.added. The SDK should detect the inconsistency and fail.
+ """
+ client = _build_client(_output_manipulation_handler)
+
+ response = client.post("/responses", json={"model": "test"})
+
+ # Output manipulation detected → response lifecycle completes as failed
+ assert response.status_code == 200
+ doc = response.json()
+ assert doc["status"] == "failed"
+ error = doc["error"]
+ assert error["code"] == "server_error"
+ assert error["message"] == "An internal server error occurred."
+
+
+# ════════════════════════════════════════════════════════════
+# Streaming variant — direct output manipulation → response.failed emitted
+# ════════════════════════════════════════════════════════════
+
+
+def test_streaming_direct_output_add_emits_failed_event() -> None:
+ """FR-008a — direct output manipulation in streaming mode emits response.failed."""
+ client = _build_client(_output_manipulation_handler)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ # Should have response.created (handler emitted it) then response.failed
+ event_types = [e["type"] for e in events]
+ assert "response.created" in event_types
+ assert "response.failed" in event_types
+ assert "response.completed" not in event_types
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_id_auto_stamp.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_id_auto_stamp.py
new file mode 100644
index 000000000000..8627357ac1f3
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_id_auto_stamp.py
@@ -0,0 +1,308 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for auto-stamping ``response_id`` on output items (US2).
+
+Validates that every output item emitted by the SDK has ``response_id`` matching
+the parent response ID, and that handler-set values take precedence.
+
+Python port of ResponseIdAutoStampProtocolTests.
+"""
+
+from __future__ import annotations
+
+import json as _json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+from tests._helpers import poll_until
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ """Parse SSE lines from a streaming response."""
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ return events
+
+
+def _handler_with_output(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits a single message output item using the builder."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ text = msg.add_text_content()
+ yield text.emit_added()
+ yield text.emit_delta("Hello")
+ yield text.emit_done()
+ yield msg.emit_content_done(text)
+ yield msg.emit_done()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _handler_with_custom_response_id(custom_id: str):
+ """Handler that creates output items and overrides response_id on them."""
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ # Use the builder to create the item, then modify response_id on the emitted event
+ msg = stream.add_output_item_message()
+ added_event = msg.emit_added()
+ # Override response_id on the item to test handler-set precedence
+ added_event["item"]["response_id"] = custom_id
+ yield added_event
+
+ done_event = msg.emit_done()
+ done_event["item"]["response_id"] = custom_id
+ yield done_event
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+def _handler_with_multiple_outputs(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits two message output items."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ msg1 = stream.add_output_item_message()
+ yield msg1.emit_added()
+ text1 = msg1.add_text_content()
+ yield text1.emit_added()
+ yield text1.emit_delta("Hello")
+ yield text1.emit_done()
+ yield msg1.emit_content_done(text1)
+ yield msg1.emit_done()
+
+ msg2 = stream.add_output_item_message()
+ yield msg2.emit_added()
+ text2 = msg2.add_text_content()
+ yield text2.emit_added()
+ yield text2.emit_delta("World")
+ yield text2.emit_done()
+ yield msg2.emit_content_done(text2)
+ yield msg2.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _direct_yield_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that directly yields events without using builders.
+
+ Does NOT set response_id on output items. Layer 2 (event consumption loop)
+ must auto-stamp it.
+ """
+
+ async def _events():
+ # Use builder for response.created (well-formed)
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ # Directly yield output_item events without response_id using wire format
+ item_id = f"caitem_{context.response_id[7:25]}directyield00000000000000000001"
+ yield {
+ "type": "response.output_item.added",
+ "item": {
+ "id": item_id,
+ "type": "message",
+ "role": "assistant",
+ "status": "in_progress",
+ "content": [],
+ # response_id intentionally NOT set — Layer 2 should stamp it
+ },
+ "output_index": 0,
+ }
+ yield {
+ "type": "response.output_item.done",
+ "item": {
+ "id": item_id,
+ "type": "message",
+ "role": "assistant",
+ "status": "completed",
+ "content": [],
+ # response_id intentionally NOT set
+ },
+ "output_index": 0,
+ }
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _build_client(handler: Any) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return TestClient(app)
+
+
+def _wait_for_terminal(client: TestClient, response_id: str) -> None:
+ ok, diag = poll_until(
+ lambda: (
+ client.get(f"/responses/{response_id}").json().get("status")
+ in ("completed", "failed", "incomplete", "cancelled")
+ ),
+ timeout_s=5.0,
+ label="wait_for_terminal",
+ )
+ assert ok, diag
+
+
+# ════════════════════════════════════════════════════════════
+# T012: Streaming output items have response_id from response.created
+# ════════════════════════════════════════════════════════════
+
+
+def test_streaming_output_items_have_response_id_matching_response_created() -> None:
+ """T012 — response_id on output items must match the response ID."""
+ client = _build_client(_handler_with_output)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ # Extract response ID from response.created
+ created_event = next(e for e in events if e["type"] == "response.created")
+ response_id = created_event["data"]["response"]["id"]
+
+ # All output_item.added and output_item.done events must have matching response_id
+ item_events = [e for e in events if e["type"] in ("response.output_item.added", "response.output_item.done")]
+ assert item_events, "Expected at least one output item event"
+
+ for evt in item_events:
+ item = evt["data"]["item"]
+ assert item.get("response_id") == response_id, (
+ f"Expected response_id={response_id}, got {item.get('response_id')} on event {evt['type']}"
+ )
+
+
+# ════════════════════════════════════════════════════════════
+# T013: Handler-set response_id is preserved
+# ════════════════════════════════════════════════════════════
+
+
+def test_handler_set_response_id_is_preserved() -> None:
+ """T013 — handler-set response_id takes precedence over auto-stamping."""
+ custom_id = "custom-response-id-override"
+ client = _build_client(_handler_with_custom_response_id(custom_id))
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ item_added = next(e for e in events if e["type"] == "response.output_item.added")
+ item = item_added["data"]["item"]
+ assert item.get("response_id") == custom_id
+
+
+# ════════════════════════════════════════════════════════════
+# T014: Multiple output items all get same response_id
+# ════════════════════════════════════════════════════════════
+
+
+def test_multiple_output_items_all_have_same_response_id() -> None:
+ """T014 — all output items share the same response_id."""
+ client = _build_client(_handler_with_multiple_outputs)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ created_event = next(e for e in events if e["type"] == "response.created")
+ response_id = created_event["data"]["response"]["id"]
+
+ item_events = [e for e in events if e["type"] in ("response.output_item.added", "response.output_item.done")]
+ assert len(item_events) >= 4, f"Expected at least 4 item events (2 added + 2 done), got {len(item_events)}"
+
+ for evt in item_events:
+ item = evt["data"]["item"]
+ assert item.get("response_id") == response_id
+
+
+# ════════════════════════════════════════════════════════════
+# T015: GET JSON snapshot has response_id on output items
+# ════════════════════════════════════════════════════════════
+
+
+def test_get_json_snapshot_has_response_id_on_output_items() -> None:
+ """T015 — GET JSON snapshot includes response_id on output items."""
+ client = _build_client(_handler_with_output)
+
+ # Create a stored response (bg=True for GETable state)
+ r = client.post(
+ "/responses",
+ json={"model": "test", "stream": False, "store": True, "background": True},
+ )
+ assert r.status_code == 200
+ response_id = r.json()["id"]
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ doc = get_resp.json()
+ output = doc["output"]
+ assert len(output) > 0, "Expected at least one output item"
+
+ for item in output:
+ assert item.get("response_id") == response_id, (
+ f"Expected response_id={response_id} on GET output item, got {item.get('response_id')}"
+ )
+
+
+# ════════════════════════════════════════════════════════════
+# T016: Direct-yield handler gets response_id auto-stamped (Layer 2)
+# ════════════════════════════════════════════════════════════
+
+
+def test_direct_yield_handler_gets_response_id_auto_stamped() -> None:
+ """T016 — Layer 2 auto-stamps response_id on items from direct-yield handlers."""
+ client = _build_client(_direct_yield_handler)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ created_event = next(e for e in events if e["type"] == "response.created")
+ response_id = created_event["data"]["response"]["id"]
+
+ item_added = next(e for e in events if e["type"] == "response.output_item.added")
+ item = item_added["data"]["item"]
+ assert item.get("response_id") == response_id, (
+ f"Expected auto-stamped response_id={response_id}, got {item.get('response_id')}"
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_id_header.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_id_header.py
new file mode 100644
index 000000000000..0fe5989d79ea
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_id_header.py
@@ -0,0 +1,195 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for ``x-agent-response-id`` header (B38).
+
+When the header is present with a non-empty value, the SDK MUST use that value
+as the response ID instead of generating one.
+
+Python port of ResponseIdHeaderTests.
+"""
+
+from __future__ import annotations
+
+import json as _json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+from tests._helpers import poll_until
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ return events
+
+
+_last_context: Any = None
+
+
+def _tracking_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that records its context for inspection."""
+ global _last_context
+ _last_context = context
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ if False:
+ yield None
+
+ return _events()
+
+
+def _build_client(handler: Any = None) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler or _noop_handler)
+ return TestClient(app)
+
+
+def _wait_for_terminal(client: TestClient, response_id: str) -> None:
+ ok, diag = poll_until(
+ lambda: (
+ client.get(f"/responses/{response_id}").json().get("status")
+ in ("completed", "failed", "incomplete", "cancelled")
+ ),
+ timeout_s=5.0,
+ label="wait_for_terminal",
+ )
+ assert ok, diag
+
+
+# ════════════════════════════════════════════════════════════
+# Default mode: x-agent-response-id
+# ════════════════════════════════════════════════════════════
+
+
+def test_default_with_header_uses_header_value() -> None:
+ """B38 — default mode: x-agent-response-id header overrides response ID."""
+ custom_id = IdGenerator.new_response_id()
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={"model": "test"},
+ headers={"x-agent-response-id": custom_id},
+ )
+ assert response.status_code == 200
+ assert response.json()["id"] == custom_id
+
+
+def test_default_without_header_generates_caresp_id() -> None:
+ """B38 — without header, generates caresp_ prefixed ID."""
+ client = _build_client()
+
+ response = client.post("/responses", json={"model": "test"})
+ assert response.status_code == 200
+ assert response.json()["id"].startswith("caresp_")
+
+
+def test_default_with_empty_header_generates_caresp_id() -> None:
+ """B38 — empty header is ignored, generates caresp_ prefixed ID."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={"model": "test"},
+ headers={"x-agent-response-id": ""},
+ )
+ assert response.status_code == 200
+ assert response.json()["id"].startswith("caresp_")
+
+
+# ════════════════════════════════════════════════════════════
+# Streaming mode: x-agent-response-id
+# ════════════════════════════════════════════════════════════
+
+
+def test_streaming_with_header_uses_header_value() -> None:
+ """B38 — streaming mode: x-agent-response-id header overrides response ID."""
+ custom_id = IdGenerator.new_response_id()
+ client = _build_client(_tracking_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "stream": True},
+ headers={"x-agent-response-id": custom_id},
+ ) as resp:
+ events = _collect_sse_events(resp)
+
+ created_event = next(e for e in events if e["type"] == "response.created")
+ response_id = created_event["data"]["response"]["id"]
+ assert response_id == custom_id
+
+
+# ════════════════════════════════════════════════════════════
+# Background mode: x-agent-response-id
+# ════════════════════════════════════════════════════════════
+
+
+def test_background_with_header_uses_header_value() -> None:
+ """B38 — background mode: x-agent-response-id header overrides response ID."""
+ custom_id = IdGenerator.new_response_id()
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={"model": "test", "background": True},
+ headers={"x-agent-response-id": custom_id},
+ )
+ assert response.status_code == 200
+ assert response.json()["id"] == custom_id
+
+
+# ════════════════════════════════════════════════════════════
+# Handler receives the correct ResponseId on context
+# ════════════════════════════════════════════════════════════
+
+
+def test_handler_context_has_correct_response_id() -> None:
+ """B38 — handler context receives the header-specified response ID."""
+ global _last_context
+ _last_context = None
+
+ custom_id = IdGenerator.new_response_id()
+ client = _build_client(_tracking_handler)
+
+ client.post(
+ "/responses",
+ json={"model": "test"},
+ headers={"x-agent-response-id": custom_id},
+ )
+
+ assert _last_context is not None
+ assert _last_context.response_id == custom_id
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_invariants.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_invariants.py
new file mode 100644
index 000000000000..1d1cf7b18fb4
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_response_invariants.py
@@ -0,0 +1,901 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for response field invariants across statuses (B6, B19, B33)."""
+
+from __future__ import annotations
+
+import asyncio
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+from tests._helpers import poll_until
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler — auto-completes."""
+
+ async def _events():
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+
+def _throwing_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that raises after emitting created."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ raise RuntimeError("Simulated handler failure")
+
+ return _events()
+
+
+def _incomplete_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits an incomplete terminal event."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_incomplete(reason="max_output_tokens")
+
+ return _events()
+
+
+def _delayed_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that sleeps briefly, checking for cancellation."""
+
+ async def _events():
+ if cancellation_signal.is_set():
+ return
+ await asyncio.sleep(0.25)
+ if cancellation_signal.is_set():
+ return
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+
+def _cancellable_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then blocks until cancelled (Phase 3)."""
+
+ async def _events():
+ yield {"type": "response.created", "response": {"status": "in_progress", "output": []}}
+ while not cancellation_signal.is_set():
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+
+def _build_client(handler: Any | None = None) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler or _noop_handler)
+ return TestClient(app)
+
+
+def _wait_for_status(
+ client: TestClient,
+ response_id: str,
+ expected_status: str,
+ *,
+ timeout_s: float = 5.0,
+) -> None:
+ latest_status: str | None = None
+
+ def _check() -> bool:
+ nonlocal latest_status
+ r = client.get(f"/responses/{response_id}")
+ if r.status_code != 200:
+ return False
+ latest_status = r.json().get("status")
+ return latest_status == expected_status
+
+ ok, failure = poll_until(
+ _check,
+ timeout_s=timeout_s,
+ interval_s=0.05,
+ context_provider=lambda: {"status": latest_status},
+ label=f"wait for {expected_status}",
+ )
+ assert ok, failure
+
+
+# ══════════════════════════════════════════════════════════
+# B6: completed_at invariant
+# ══════════════════════════════════════════════════════════
+
+
+def test_completed_at__nonnull_only_for_completed_status() -> None:
+ """B6 — completed_at is non-null only when status is completed."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "completed"
+ assert payload.get("completed_at") is not None, "completed_at should be non-null for completed status"
+ assert isinstance(payload["completed_at"], (int, float)), "completed_at should be a Unix timestamp"
+
+
+def test_completed_at__null_for_failed_status() -> None:
+ """B6 — completed_at is null when status is failed."""
+ client = _build_client(_throwing_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+ _wait_for_status(client, response_id, "failed")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ assert payload["status"] == "failed"
+ assert payload.get("completed_at") is None, "completed_at should be null for failed status"
+
+
+def test_completed_at__null_for_cancelled_status() -> None:
+ """B6 — completed_at is null when status is cancelled."""
+ client = _build_client(_cancellable_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+
+ _wait_for_status(client, response_id, "cancelled")
+
+ get_response = client.get(f"/responses/{response_id}")
+ payload = get_response.json()
+ assert payload["status"] == "cancelled"
+ assert payload.get("completed_at") is None, "completed_at should be null for cancelled status"
+
+
+def test_completed_at__null_for_incomplete_status() -> None:
+ """B6 — completed_at is null when status is incomplete."""
+ client = _build_client(_incomplete_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+ _wait_for_status(client, response_id, "incomplete")
+
+ get_response = client.get(f"/responses/{response_id}")
+ payload = get_response.json()
+ assert payload["status"] == "incomplete"
+ assert payload.get("completed_at") is None, "completed_at should be null for incomplete status"
+
+
+# ══════════════════════════════════════════════════════════
+# B19: x-platform-server header
+# ══════════════════════════════════════════════════════════
+
+
+def test_x_platform_server_header__present_on_post_response() -> None:
+ """B19 — All responses include x-platform-server header."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ header = response.headers.get("x-platform-server")
+ assert header is not None, "x-platform-server header must be present per B19"
+ assert isinstance(header, str) and len(header) > 0
+ # Both core and responses segments must appear
+ assert "azure-ai-agentserver-core/" in header
+ assert "azure-ai-agentserver-responses/" in header
+
+
+def test_x_platform_server_header__present_on_get_response() -> None:
+ """B19 — x-platform-server header on GET responses."""
+ client = _build_client()
+
+ create = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ response_id = create.json()["id"]
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ header = get_response.headers.get("x-platform-server")
+ assert header is not None, "x-platform-server header must be present on GET per B19"
+
+
+# ══════════════════════════════════════════════════════════
+# B33: Token usage
+# ══════════════════════════════════════════════════════════
+
+
+def test_token_usage__structure_valid_when_present() -> None:
+ """B33 — Terminal events include optional usage field. When present, check structure."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "completed"
+ # B33: usage is optional. If present, verify structure.
+ usage = payload.get("usage")
+ if usage is not None:
+ assert isinstance(usage.get("input_tokens"), int), "input_tokens should be int"
+ assert isinstance(usage.get("output_tokens"), int), "output_tokens should be int"
+ assert isinstance(usage.get("total_tokens"), int), "total_tokens should be int"
+ assert usage["total_tokens"] == usage["input_tokens"] + usage["output_tokens"]
+
+
+# ══════════════════════════════════════════════════════════
+# B7: created_at present on every response
+# ══════════════════════════════════════════════════════════
+
+
+def test_created_at__present_on_sync_response() -> None:
+ """B7 — created_at field must be present (and numeric) on every response object."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ created_at = payload.get("created_at")
+ assert created_at is not None, "created_at must be present on every response"
+ assert isinstance(created_at, (int, float)), f"created_at must be numeric, got: {type(created_at)}"
+
+
+def test_created_at__present_on_background_response() -> None:
+ """B7 — created_at is also present when fetching a background response via GET."""
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ _wait_for_status(client, response_id, "completed")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ created_at = payload.get("created_at")
+ assert created_at is not None, "created_at must be present on background response"
+ assert isinstance(created_at, (int, float)), f"created_at must be numeric, got: {type(created_at)}"
+
+
+# ══════════════════════════════════════════════════════════
+# B8: ResponseError shape (only code + message, no type/param)
+# ══════════════════════════════════════════════════════════
+
+
+def test_response_error__shape_has_only_code_and_message() -> None:
+ """B8 — The error field on a failed response has code and message but NOT type or param."""
+ client = _build_client(_throwing_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ _wait_for_status(client, response_id, "failed")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ error = payload.get("error")
+ assert error is not None, "error field must be present on a failed response"
+ assert isinstance(error, dict)
+ # ResponseError shape: MUST have code and message
+ assert "code" in error, f"error must have 'code' field, got: {list(error.keys())}"
+ assert "message" in error, f"error must have 'message' field, got: {list(error.keys())}"
+ # ResponseError shape: must NOT have type or param (those are for request errors)
+ assert "type" not in error, (
+ f"error must NOT have 'type' field (that is for request errors), got: {list(error.keys())}"
+ )
+ assert "param" not in error, (
+ f"error must NOT have 'param' field (that is for request errors), got: {list(error.keys())}"
+ )
+
+
+# ══════════════════════════════════════════════════════════
+# B12: GET /responses/{id} returns 200 for all terminal statuses
+# ══════════════════════════════════════════════════════════
+
+
+def test_get__returns_200_for_failed_response() -> None:
+ """B12 — GET returns HTTP 200 for a response in failed status."""
+ client = _build_client(_throwing_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+ _wait_for_status(client, response_id, "failed")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ assert get_response.json()["status"] == "failed"
+
+
+def test_get__returns_200_for_incomplete_response() -> None:
+ """B12 — GET returns HTTP 200 for a response in incomplete status."""
+ client = _build_client(_incomplete_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+ _wait_for_status(client, response_id, "incomplete")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ assert get_response.json()["status"] == "incomplete"
+
+
+def test_get__returns_200_for_cancelled_response() -> None:
+ """B12 — GET returns HTTP 200 for a response in cancelled status."""
+ client = _build_client(_cancellable_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+ _wait_for_status(client, response_id, "cancelled")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ assert get_response.json()["status"] == "cancelled"
+
+
+# ════════════════════════════════════════════════════════
+# N-8, B6: error=null for non-failed terminal statuses
+# ════════════════════════════════════════════════════════
+
+
+def test_error_field__null_for_completed_status() -> None:
+ """B6 — error must be null for status=completed."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "completed"
+ assert payload.get("error") is None, "B6: error must be null for status=completed"
+
+
+def test_error_field__null_for_cancelled_status() -> None:
+ """B6 — error must be null for status=cancelled."""
+ client = _build_client(_cancellable_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+ _wait_for_status(client, response_id, "cancelled")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ assert payload["status"] == "cancelled"
+ assert payload.get("error") is None, "B6: error must be null for status=cancelled"
+
+
+# ════════════════════════════════════════════════════════
+# N-1, N-2, B20/B21: response_id and agent_reference on output items
+# ════════════════════════════════════════════════════════
+
+
+def _output_item_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits a single output message item."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message_item = stream.add_output_item_message()
+ yield message_item.emit_added()
+
+ text_content = message_item.add_text_content()
+ yield text_content.emit_added()
+ yield text_content.emit_delta("hi")
+ yield text_content.emit_done()
+ yield message_item.emit_content_done(text_content)
+ yield message_item.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def test_output_item__response_id_stamped_on_item() -> None:
+ """B20 — Output items carry response_id stamped from the parent Response."""
+ client = _build_client(_output_item_handler)
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "completed"
+ assert len(payload.get("output", [])) == 1
+ item = payload["output"][0]
+ assert item.get("response_id") == payload["id"], (
+ f"B20: response_id on output item must match parent Response id, got: {item!r}"
+ )
+
+
+def test_output_item__agent_reference_stamped_on_item() -> None:
+ """B21 — agent_reference from the request is stamped on output items when the stream knows about it."""
+
+ def _handler_with_agent_ref(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that creates a stream with agent_reference and emits a message item."""
+ agent_ref = None
+ if hasattr(request, "agent_reference") and request.agent_reference is not None:
+ agent_ref_raw = request.agent_reference
+ if hasattr(agent_ref_raw, "as_dict"):
+ agent_ref = agent_ref_raw.as_dict()
+ elif isinstance(agent_ref_raw, dict):
+ agent_ref = agent_ref_raw
+
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ agent_reference=agent_ref,
+ )
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message_item = stream.add_output_item_message()
+ yield message_item.emit_added()
+
+ text_content = message_item.add_text_content()
+ yield text_content.emit_added()
+ yield text_content.emit_delta("hi")
+ yield text_content.emit_done()
+ yield message_item.emit_content_done(text_content)
+ yield message_item.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ app = ResponsesAgentServerHost()
+ app.create_handler(_handler_with_agent_ref)
+ client = TestClient(app)
+
+ agent_ref = {"type": "agent_reference", "name": "my-agent", "version": "v2"}
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "agent_reference": agent_ref,
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "completed"
+ # agent_reference is propagated to the Response
+ assert payload.get("agent_reference", {}).get("name") == "my-agent"
+ assert payload.get("agent_reference", {}).get("version") == "v2"
+ # B21: agent_reference is also stamped on individual output items
+ assert len(payload.get("output", [])) == 1
+ item = payload["output"][0]
+ assert item.get("agent_reference") is not None, (
+ f"B21: agent_reference must be stamped on output items, got: {item!r}"
+ )
+ assert item["agent_reference"].get("name") == "my-agent"
+ assert item["agent_reference"].get("version") == "v2"
+
+
+# ════════════════════════════════════════════════════════
+# N-3, B19: x-platform-server on SSE streaming responses
+# ════════════════════════════════════════════════════════
+
+
+def test_x_platform_server_header__present_on_sse_streaming_post_response() -> None:
+ """B19 — x-platform-server header must be present on SSE streaming POST /responses."""
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ header = response.headers.get("x-platform-server")
+
+ assert header is not None, "B19: x-platform-server header must be present on SSE streaming POST per B19"
+ assert isinstance(header, str) and len(header) > 0
+
+
+def test_x_platform_server_header__present_on_sse_replay_get_response() -> None:
+ """B19 — x-platform-server header must be present on GET ?stream=true replay."""
+ import json as _json
+
+ client = _build_client()
+
+ # Create a background+stream response so SSE replay is available
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ ) as post_response:
+ assert post_response.status_code == 200
+ first_data: str | None = None
+ for line in post_response.iter_lines():
+ if line.startswith("data:"):
+ first_data = line.split(":", 1)[1].strip()
+ break
+ assert first_data is not None
+ response_id = _json.loads(first_data)["response"]["id"]
+
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay_response:
+ assert replay_response.status_code == 200
+ header = replay_response.headers.get("x-platform-server")
+
+ assert header is not None, "B19: x-platform-server header must be present on SSE replay GET per B19"
+ assert isinstance(header, str) and len(header) > 0
+
+
+# ══════════════════════════════════════════════════════════
+# B14: x-platform-server header on 4xx error responses
+# ══════════════════════════════════════════════════════════
+
+
+def test_x_platform_server__present_on_400_create_error() -> None:
+ """B14 — x-platform-server header must be present on 4xx error responses (not just 2xx)."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": "not-a-bool", # invalid value → 400
+ },
+ )
+ assert response.status_code == 400
+ header = response.headers.get("x-platform-server")
+ assert header is not None, "x-platform-server header must be present on 400 error responses per B14"
+
+
+# ══════════════════════════════════════════════════════════
+# B15: output[] preserved for completed, cleared for cancelled
+# ══════════════════════════════════════════════════════════
+
+
+def test_output__preserved_for_completed_response() -> None:
+ """B15 — output[] is preserved (may be non-empty) for completed responses."""
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "completed"
+ # output must be present as a list (may be empty for noop handler, but must not be absent)
+ assert "output" in payload, "output field must be present on completed response"
+ assert isinstance(payload["output"], list), f"output must be a list, got: {type(payload['output'])}"
+
+
+def test_output__cleared_for_cancelled_response() -> None:
+ """B15 — output[] is cleared (empty list) when a response is cancelled."""
+ client = _build_client(_cancellable_bg_handler)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+ _wait_for_status(client, response_id, "cancelled")
+
+ get_response = client.get(f"/responses/{response_id}")
+ assert get_response.status_code == 200
+ payload = get_response.json()
+ assert payload.get("output") == [], (
+ f"output must be cleared (empty []) for cancelled responses, got: {payload.get('output')}"
+ )
+
+
+# ══════════════════════════════════════════════════════════
+# StatusLifecycle: queued status round-trip
+# (ported from StatusLifecycleTests.cs)
+# ══════════════════════════════════════════════════════════
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ """Collect SSE events from a streaming response."""
+ import json as _json
+
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ return events
+
+
+def _queued_then_completed_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits created(queued) → in_progress → completed."""
+
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created(status="queued")
+ yield stream.emit_in_progress()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def test_streaming_queued_status_honoured_in_created_event() -> None:
+ """Handler that sets queued status — the response.created SSE event must reflect status: 'queued'.
+
+ Ported from StatusLifecycleTests.Streaming_QueuedStatus_HonouredInCreatedEvent.
+ """
+ client = _build_client(_queued_then_completed_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "input": "hello", "stream": True},
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ created = [e for e in events if e["type"] == "response.created"]
+ assert created, "Expected response.created event"
+ assert created[0]["data"]["response"]["status"] == "queued", (
+ f"Expected queued status on response.created, got {created[0]['data']['response']['status']!r}"
+ )
+
+
+def test_background_queued_status_honoured_in_post_response() -> None:
+ """Background mode: POST response body reflects status: 'queued' when handler sets it.
+
+ Ported from StatusLifecycleTests.Background_QueuedStatus_HonouredInPostResponse.
+ """
+
+ def _queued_waiting_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits created(queued), pauses, then in_progress → completed."""
+
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created(status="queued")
+ # Pause to ensure the bg POST response sees 'queued' status
+ await asyncio.sleep(0.3)
+ yield stream.emit_in_progress()
+ yield stream.emit_completed()
+
+ return _events()
+
+ client = _build_client(_queued_waiting_handler)
+
+ response = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "background": True, "store": True},
+ )
+ assert response.status_code == 200
+ payload = response.json()
+ # Initial status must be queued (from the response.created event the handler emits)
+ assert payload["status"] == "queued", (
+ f"Expected queued status on background POST response, got {payload['status']!r}"
+ )
+
+
+def test_background_queued_status_eventually_completes() -> None:
+ """Background queued response eventually transitions to status: 'completed' after handler finishes.
+
+ Ported from StatusLifecycleTests.Background_QueuedStatus_EventuallyCompletes.
+ """
+ client = _build_client(_queued_then_completed_handler)
+
+ response = client.post(
+ "/responses",
+ json={"model": "test", "input": "hello", "background": True, "store": True},
+ )
+ assert response.status_code == 200
+ response_id = response.json()["id"]
+
+ # Poll until the response reaches completed
+ _wait_for_status(client, response_id, "completed", timeout_s=5.0)
+
+ get = client.get(f"/responses/{response_id}")
+ assert get.status_code == 200
+ assert get.json()["status"] == "completed"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_sentinel_removal.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_sentinel_removal.py
new file mode 100644
index 000000000000..c4093004f5bf
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_sentinel_removal.py
@@ -0,0 +1,176 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for sentinel removal (US1, US2).
+
+Validates that no SSE stream contains ``data: [DONE]`` under any scenario.
+Validates: B26 — Terminal SSE events (no [DONE] sentinel).
+
+Python port of SentinelRemovalProtocolTests.
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+from tests._helpers import poll_until
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _simple_text_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits a complete text message output."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ text = msg.add_text_content()
+ yield text.emit_added()
+ yield text.emit_delta("Hello")
+ yield text.emit_done()
+ yield msg.emit_content_done(text)
+ yield msg.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _failing_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then raises an exception."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ raise RuntimeError("Simulated handler failure")
+
+ return _events()
+
+
+def _incomplete_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then response.incomplete."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_incomplete()
+
+ return _events()
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ if False:
+ yield None
+
+ return _events()
+
+
+def _build_client(handler: Any) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return TestClient(app)
+
+
+def _wait_for_terminal(client: TestClient, response_id: str) -> None:
+ ok, diag = poll_until(
+ lambda: (
+ client.get(f"/responses/{response_id}").json().get("status")
+ in ("completed", "failed", "incomplete", "cancelled")
+ ),
+ timeout_s=5.0,
+ label="wait_for_terminal",
+ )
+ assert ok, diag
+
+
+# ════════════════════════════════════════════════════════════
+# US1: Live streams must not contain [DONE] sentinel
+# ════════════════════════════════════════════════════════════
+
+
+def test_live_stream_completed_no_done_sentinel() -> None:
+ """Live SSE stream for completed response must not contain data: [DONE]."""
+ client = _build_client(_simple_text_handler)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ lines = list(resp.iter_lines())
+ body = "\n".join(lines)
+
+ assert "data: [DONE]" not in body
+
+
+def test_live_stream_failed_no_done_sentinel() -> None:
+ """Live SSE stream for failed response must not contain data: [DONE]."""
+ client = _build_client(_failing_handler)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ lines = list(resp.iter_lines())
+ body = "\n".join(lines)
+
+ assert "data: [DONE]" not in body
+ # Verify the stream contains a response.failed terminal event
+ assert "response.failed" in body
+
+
+def test_live_stream_incomplete_no_done_sentinel() -> None:
+ """Live SSE stream for incomplete response must not contain data: [DONE]."""
+ client = _build_client(_incomplete_handler)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ assert resp.status_code == 200
+ lines = list(resp.iter_lines())
+ body = "\n".join(lines)
+
+ assert "data: [DONE]" not in body
+ # Verify the stream contains a response.incomplete terminal event
+ assert "response.incomplete" in body
+
+
+# ════════════════════════════════════════════════════════════
+# US2: Replay streams must not contain [DONE] sentinel
+# ════════════════════════════════════════════════════════════
+
+
+def test_replay_stream_completed_no_done_sentinel() -> None:
+ """Replayed SSE stream for completed bg+stream response must not contain data: [DONE]."""
+ from azure.ai.agentserver.responses._id_generator import IdGenerator
+
+ client = _build_client(_simple_text_handler)
+ response_id = IdGenerator.new_response_id()
+
+ # Create a bg+stream response (required for SSE replay)
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "response_id": response_id,
+ "model": "test",
+ "background": True,
+ "stream": True,
+ "store": True,
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ # Consume SSE stream to completion
+ list(resp.iter_lines())
+
+ _wait_for_terminal(client, response_id)
+
+ # GET SSE replay
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay:
+ assert replay.status_code == 200
+ lines = list(replay.iter_lines())
+ body = "\n".join(lines)
+
+ assert "data: [DONE]" not in body
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_session_id_resolution.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_session_id_resolution.py
new file mode 100644
index 000000000000..7df097a81f33
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_session_id_resolution.py
@@ -0,0 +1,425 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for session ID resolution (B39).
+
+Priority: request payload ``agent_session_id`` → ``FOUNDRY_AGENT_SESSION_ID``
+env var → generated UUID.
+The resolved session ID MUST be auto-stamped on the
+``ResponseObject.agent_session_id``.
+
+Python port of SessionIdResolutionTests.
+"""
+
+from __future__ import annotations
+
+import json
+import os
+import uuid
+from typing import Any
+from unittest.mock import patch
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+from tests._helpers import poll_until
+
+# ════════════════════════════════════════════════════════════
+# Shared helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler — emits no events (framework auto-completes)."""
+
+ async def _events():
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+
+def _simple_text_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits created + completed."""
+
+ async def _events():
+ stream = ResponseEventStream(
+ response_id=context.response_id,
+ model=getattr(request, "model", None),
+ )
+ yield stream.emit_created()
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _build_client(handler: Any | None = None) -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler or _noop_handler)
+ return TestClient(app)
+
+
+def _collect_sse_events(response: Any) -> list[dict[str, Any]]:
+ """Parse SSE lines from a streaming response into a list of event dicts."""
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ payload = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+
+ return events
+
+
+def _wait_for_terminal(
+ client: TestClient,
+ response_id: str,
+ *,
+ timeout_s: float = 5.0,
+) -> dict[str, Any]:
+ """Poll GET until the response reaches a terminal status."""
+ latest: dict[str, Any] = {}
+ terminal_statuses = {"completed", "failed", "incomplete", "cancelled"}
+
+ def _is_terminal() -> bool:
+ nonlocal latest
+ r = client.get(f"/responses/{response_id}")
+ if r.status_code != 200:
+ return False
+ latest = r.json()
+ return latest.get("status") in terminal_statuses
+
+ ok, failure = poll_until(
+ _is_terminal,
+ timeout_s=timeout_s,
+ interval_s=0.05,
+ context_provider=lambda: {"status": latest.get("status")},
+ label=f"wait_for_terminal({response_id})",
+ )
+ assert ok, failure
+ return latest
+
+
+# ════════════════════════════════════════════════════════════
+# Tier 1: Payload agent_session_id takes priority
+# ════════════════════════════════════════════════════════════
+
+
+class TestPayloadSessionId:
+ """Payload agent_session_id is stamped on the response."""
+
+ def test_default_payload_session_id_stamped_on_response(self) -> None:
+ """B39 P1: non-streaming response carries the payload session ID."""
+ session_id = "my-session-from-payload"
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "test",
+ "agent_session_id": session_id,
+ },
+ )
+
+ assert response.status_code == 200
+ assert response.json()["agent_session_id"] == session_id
+
+ def test_streaming_payload_session_id_stamped_on_response(self) -> None:
+ """B39 P1: streaming response.created and response.completed carry the payload session ID."""
+ session_id = "streaming-session-xyz"
+ client = _build_client(_simple_text_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "stream": True,
+ "agent_session_id": session_id,
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ # Check response.created event
+ created_events = [e for e in events if e["type"] == "response.created"]
+ assert len(created_events) == 1
+ assert created_events[0]["data"]["response"]["agent_session_id"] == session_id
+
+ # Check response.completed event
+ completed_events = [e for e in events if e["type"] == "response.completed"]
+ assert len(completed_events) == 1
+ assert completed_events[0]["data"]["response"]["agent_session_id"] == session_id
+
+ def test_background_payload_session_id_stamped_on_response(self) -> None:
+ """B39 P1: background response carries the payload session ID.
+
+ Background POST returns a queued snapshot immediately (before the handler
+ runs), so agent_session_id is stamped once the handler processes. We poll
+ GET to verify the session ID after completion.
+ """
+ session_id = "bg-session-abc"
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "test",
+ "background": True,
+ "agent_session_id": session_id,
+ },
+ )
+ assert response.status_code == 200
+ response_id = response.json()["id"]
+
+ terminal = _wait_for_terminal(client, response_id)
+ assert terminal["agent_session_id"] == session_id
+
+
+# ════════════════════════════════════════════════════════════
+# Tier 2: Fallback to FOUNDRY_AGENT_SESSION_ID env var
+# ════════════════════════════════════════════════════════════
+
+
+class TestEnvVarFallback:
+ """FOUNDRY_AGENT_SESSION_ID env var is used when no payload field."""
+
+ def test_no_payload_session_id_falls_back_to_env_var(self) -> None:
+ """B39 P2: env var used when no payload session ID."""
+ env_session_id = "env-session-from-foundry"
+
+ with patch.dict(os.environ, {"FOUNDRY_AGENT_SESSION_ID": env_session_id}):
+ client = _build_client()
+ response = client.post(
+ "/responses",
+ json={"model": "test"},
+ )
+
+ assert response.status_code == 200
+ assert response.json()["agent_session_id"] == env_session_id
+
+ def test_payload_session_id_overrides_env_var(self) -> None:
+ """B39: payload field takes precedence over env var."""
+ payload_session_id = "payload-wins"
+ env_session_id = "env-loses"
+
+ with patch.dict(os.environ, {"FOUNDRY_AGENT_SESSION_ID": env_session_id}):
+ client = _build_client()
+ response = client.post(
+ "/responses",
+ json={
+ "model": "test",
+ "agent_session_id": payload_session_id,
+ },
+ )
+
+ assert response.status_code == 200
+ assert response.json()["agent_session_id"] == payload_session_id
+
+
+# ════════════════════════════════════════════════════════════
+# Tier 3: Fallback to generated UUID
+# ════════════════════════════════════════════════════════════
+
+
+class TestGeneratedUuidFallback:
+ """Generated UUID when no payload field or env var."""
+
+ def test_no_payload_or_env_generates_session_id(self) -> None:
+ """B39 P3: generated UUID when nothing else is available."""
+ with patch.dict(os.environ, {}, clear=False):
+ # Remove FOUNDRY_AGENT_SESSION_ID if present
+ env = os.environ.copy()
+ env.pop("FOUNDRY_AGENT_SESSION_ID", None)
+ with patch.dict(os.environ, env, clear=True):
+ client = _build_client()
+ response = client.post(
+ "/responses",
+ json={"model": "test"},
+ )
+
+ assert response.status_code == 200
+ session_id = response.json()["agent_session_id"]
+ assert session_id is not None and session_id != ""
+ # Verify it's a valid UUID
+ uuid.UUID(session_id)
+
+ def test_generated_session_id_is_different_per_request(self) -> None:
+ """B39 P3: generated session IDs are unique per request."""
+ with patch.dict(os.environ, {}, clear=False):
+ env = os.environ.copy()
+ env.pop("FOUNDRY_AGENT_SESSION_ID", None)
+ with patch.dict(os.environ, env, clear=True):
+ client = _build_client()
+ response1 = client.post(
+ "/responses",
+ json={"model": "test"},
+ )
+ response2 = client.post(
+ "/responses",
+ json={"model": "test"},
+ )
+
+ session_id1 = response1.json()["agent_session_id"]
+ session_id2 = response2.json()["agent_session_id"]
+ assert session_id1 != session_id2, "Generated session IDs should be unique per request"
+
+
+# ════════════════════════════════════════════════════════════
+# Cross-mode consistency
+# ════════════════════════════════════════════════════════════
+
+
+class TestCrossModeConsistency:
+ """Session ID stamping works consistently across modes."""
+
+ def test_streaming_no_payload_or_env_stamps_generated_session_id(self) -> None:
+ """B39: streaming mode generates and stamps a UUID session ID."""
+ with patch.dict(os.environ, {}, clear=False):
+ env = os.environ.copy()
+ env.pop("FOUNDRY_AGENT_SESSION_ID", None)
+ with patch.dict(os.environ, env, clear=True):
+ client = _build_client(_simple_text_handler)
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "test", "stream": True},
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ completed_events = [e for e in events if e["type"] == "response.completed"]
+ assert len(completed_events) == 1
+ session_id = completed_events[0]["data"]["response"]["agent_session_id"]
+ assert session_id is not None and session_id != ""
+ uuid.UUID(session_id)
+
+ def test_background_no_payload_or_env_stamps_generated_session_id(self) -> None:
+ """B39: background mode generates and stamps a UUID session ID."""
+ with patch.dict(os.environ, {}, clear=False):
+ env = os.environ.copy()
+ env.pop("FOUNDRY_AGENT_SESSION_ID", None)
+ with patch.dict(os.environ, env, clear=True):
+ client = _build_client()
+ response = client.post(
+ "/responses",
+ json={"model": "test", "background": True},
+ )
+ assert response.status_code == 200
+ response_id = response.json()["id"]
+ terminal = _wait_for_terminal(client, response_id)
+
+ session_id = terminal["agent_session_id"]
+ assert session_id is not None and session_id != ""
+ uuid.UUID(session_id)
+
+ def test_background_streaming_payload_session_id_on_all_events(self) -> None:
+ """B39: bg+streaming with payload session ID stamps all lifecycle events."""
+ session_id = "bg-stream-session-42"
+ client = _build_client(_simple_text_handler)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "stream": True,
+ "background": True,
+ "agent_session_id": session_id,
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ events = _collect_sse_events(resp)
+
+ # All response.* lifecycle events should have the session ID
+ lifecycle_types = {
+ "response.created",
+ "response.in_progress",
+ "response.completed",
+ "response.failed",
+ "response.incomplete",
+ }
+ lifecycle_events = [e for e in events if e["type"] in lifecycle_types]
+ assert len(lifecycle_events) >= 1, "Expected at least one lifecycle event"
+
+ for event in lifecycle_events:
+ resp_payload = event["data"].get("response", event["data"])
+ assert resp_payload.get("agent_session_id") == session_id, (
+ f"Missing/wrong agent_session_id on {event['type']}"
+ )
+
+ def test_session_id_consistent_between_create_and_get(self) -> None:
+ """B39: session ID on POST matches session ID on subsequent GET."""
+ session_id = "consistent-session-check"
+ client = _build_client()
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "test",
+ "background": True,
+ "agent_session_id": session_id,
+ },
+ )
+ assert response.status_code == 200
+ response_id = response.json()["id"]
+
+ _wait_for_terminal(client, response_id)
+
+ get_resp = client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["agent_session_id"] == session_id
+
+ def test_session_id_consistent_between_create_and_sse_replay(self) -> None:
+ """B39: session ID on create matches session ID in SSE replay events."""
+ session_id = "replay-session-check"
+ client = _build_client(_simple_text_handler)
+
+ # Create bg+stream response
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "test",
+ "stream": True,
+ "background": True,
+ "agent_session_id": session_id,
+ },
+ ) as resp:
+ assert resp.status_code == 200
+ create_events = _collect_sse_events(resp)
+
+ # Extract response ID from creation events
+ created = [e for e in create_events if e["type"] == "response.created"]
+ assert len(created) == 1
+ response_id = created[0]["data"]["response"]["id"]
+
+ _wait_for_terminal(client, response_id)
+
+ # SSE replay should carry the same session ID
+ with client.stream("GET", f"/responses/{response_id}?stream=true") as replay_resp:
+ assert replay_resp.status_code == 200
+ replay_events = _collect_sse_events(replay_resp)
+
+ lifecycle_types = {"response.created", "response.completed"}
+ for event in replay_events:
+ if event["type"] in lifecycle_types:
+ resp_payload = event["data"].get("response", event["data"])
+ assert resp_payload.get("agent_session_id") == session_id, (
+ f"SSE replay {event['type']} missing agent_session_id"
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_snapshot_consistency.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_snapshot_consistency.py
new file mode 100644
index 000000000000..09c94171da05
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_snapshot_consistency.py
@@ -0,0 +1,332 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Protocol conformance tests for immutable event snapshots (US1).
+
+Verifies that SSE events and GET responses contain point-in-time snapshot data,
+not mutable references that change with subsequent mutations.
+
+Python port of SnapshotConsistencyTests.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json as _json
+from typing import Any
+
+import pytest
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+# ════════════════════════════════════════════════════════════
+# Lightweight async ASGI client (same pattern as test_cross_api_e2e_async)
+# ════════════════════════════════════════════════════════════
+
+
+class _AsgiResponse:
+ def __init__(self, status_code: int, body: bytes, headers: list[tuple[bytes, bytes]]) -> None:
+ self.status_code = status_code
+ self.body = body
+ self.headers = headers
+
+ def json(self) -> Any:
+ return _json.loads(self.body)
+
+
+class _AsyncAsgiClient:
+ def __init__(self, app: Any) -> None:
+ self._app = app
+
+ @staticmethod
+ def _build_scope(method: str, path: str, body: bytes) -> dict[str, Any]:
+ headers: list[tuple[bytes, bytes]] = []
+ query_string = b""
+ if "?" in path:
+ path, qs = path.split("?", 1)
+ query_string = qs.encode()
+ if body:
+ headers = [
+ (b"content-type", b"application/json"),
+ (b"content-length", str(len(body)).encode()),
+ ]
+ return {
+ "type": "http",
+ "asgi": {"version": "3.0"},
+ "http_version": "1.1",
+ "method": method,
+ "headers": headers,
+ "scheme": "http",
+ "path": path,
+ "raw_path": path.encode(),
+ "query_string": query_string,
+ "server": ("localhost", 80),
+ "client": ("127.0.0.1", 123),
+ "root_path": "",
+ }
+
+ async def request(self, method: str, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ body = _json.dumps(json_body).encode() if json_body else b""
+ scope = self._build_scope(method, path, body)
+ status_code: int | None = None
+ response_headers: list[tuple[bytes, bytes]] = []
+ body_parts: list[bytes] = []
+ request_sent = False
+ response_done = asyncio.Event()
+
+ async def receive() -> dict[str, Any]:
+ nonlocal request_sent
+ if not request_sent:
+ request_sent = True
+ return {"type": "http.request", "body": body, "more_body": False}
+ await response_done.wait()
+ return {"type": "http.disconnect"}
+
+ async def send(message: dict[str, Any]) -> None:
+ nonlocal status_code, response_headers
+ if message["type"] == "http.response.start":
+ status_code = message["status"]
+ response_headers = message.get("headers", [])
+ elif message["type"] == "http.response.body":
+ chunk = message.get("body", b"")
+ if chunk:
+ body_parts.append(chunk)
+ if not message.get("more_body", False):
+ response_done.set()
+
+ await self._app(scope, receive, send)
+ assert status_code is not None
+ return _AsgiResponse(status_code=status_code, body=b"".join(body_parts), headers=response_headers)
+
+ async def get(self, path: str) -> _AsgiResponse:
+ return await self.request("GET", path)
+
+ async def post(self, path: str, *, json_body: dict[str, Any] | None = None) -> _AsgiResponse:
+ return await self.request("POST", path, json_body=json_body)
+
+
+# ════════════════════════════════════════════════════════════
+# Helpers
+# ════════════════════════════════════════════════════════════
+
+
+def _parse_sse_events(text: str) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in text.splitlines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ return events
+
+
+def _build_client(handler: Any) -> _AsyncAsgiClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return _AsyncAsgiClient(app)
+
+
+async def _ensure_task_done(task: asyncio.Task[Any], handler: Any, timeout: float = 5.0) -> None:
+ for attr in vars(handler):
+ obj = getattr(handler, attr, None)
+ if isinstance(obj, asyncio.Event):
+ obj.set()
+ if not task.done():
+ try:
+ await asyncio.wait_for(task, timeout=timeout)
+ except (asyncio.TimeoutError, Exception):
+ task.cancel()
+ try:
+ await task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+
+def _make_multi_output_handler():
+ """Handler that emits 2 output items sequentially for snapshot isolation testing."""
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+
+ msg1 = stream.add_output_item_message()
+ yield msg1.emit_added()
+ text1 = msg1.add_text_content()
+ yield text1.emit_added()
+ yield text1.emit_delta("First")
+ yield text1.emit_done()
+ yield msg1.emit_content_done(text1)
+ yield msg1.emit_done()
+
+ msg2 = stream.add_output_item_message()
+ yield msg2.emit_added()
+ text2 = msg2.add_text_content()
+ yield text2.emit_added()
+ yield text2.emit_delta("Second")
+ yield text2.emit_done()
+ yield msg2.emit_content_done(text2)
+ yield msg2.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+def _make_replay_gated_handler():
+ """Handler for replay snapshot test — waits for gate before completing."""
+ done = asyncio.Event()
+
+ def handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ await done.wait()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ handler.done = done
+ return handler
+
+
+# ════════════════════════════════════════════════════════════
+# T010: SSE event snapshot isolation
+#
+# response.created event's embedded response does not include items added
+# after it was emitted. response.completed contains all output items.
+# ════════════════════════════════════════════════════════════
+
+
+def test_sse_events_contain_snapshot_not_live_reference() -> None:
+ """T010 — SSE events reflect point-in-time snapshots.
+
+ response.created should show in_progress with no output.
+ response.completed should show completed with all output items.
+ """
+ from starlette.testclient import TestClient
+
+ handler = _make_multi_output_handler()
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ client = TestClient(app)
+
+ with client.stream("POST", "/responses", json={"model": "test", "stream": True}) as resp:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in resp.iter_lines():
+ if not line:
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ payload = _json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": payload})
+
+ # response.created should have in_progress status
+ created_event = next(e for e in events if e["type"] == "response.created")
+ created_status = created_event["data"]["response"]["status"]
+ assert created_status == "in_progress"
+
+ # response.completed should have completed status with all items
+ completed_event = next(e for e in events if e["type"] == "response.completed")
+ completed_status = completed_event["data"]["response"]["status"]
+ assert completed_status == "completed"
+ completed_output = completed_event["data"]["response"]["output"]
+ assert len(completed_output) >= 2, "completed should have all output items"
+
+ # CRITICAL: response.created should have fewer outputs than completed
+ created_output = created_event["data"]["response"]["output"]
+ assert len(created_output) < len(completed_output), (
+ f"created event should have fewer outputs ({len(created_output)}) "
+ f"than completed event ({len(completed_output)}) — snapshot isolation"
+ )
+
+
+# ════════════════════════════════════════════════════════════
+# T011 / SC-002: Replay snapshot integrity
+#
+# Replayed response.created has status in_progress (emission-time state),
+# not completed (current state).
+# ════════════════════════════════════════════════════════════
+
+
+@pytest.mark.asyncio
+async def test_sse_replay_reflects_emission_time_state() -> None:
+ """T011 — replayed response.created shows in_progress (emission-time), not completed."""
+ handler = _make_replay_gated_handler()
+ client = _build_client(handler)
+ response_id = IdGenerator.new_response_id()
+
+ post_task = asyncio.create_task(
+ client.post(
+ "/responses",
+ json_body={
+ "response_id": response_id,
+ "model": "test",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ )
+ )
+ try:
+ # Let handler complete
+ await asyncio.sleep(0.3)
+ handler.done.set()
+
+ post_resp = await asyncio.wait_for(post_task, timeout=5.0)
+ assert post_resp.status_code == 200
+ finally:
+ await _ensure_task_done(post_task, handler)
+
+ # Wait for background completion
+ for _ in range(50):
+ get_resp = await client.get(f"/responses/{response_id}")
+ if get_resp.status_code == 200 and get_resp.json().get("status") in ("completed", "failed"):
+ break
+ await asyncio.sleep(0.1)
+
+ # Verify response is completed
+ get_resp = await client.get(f"/responses/{response_id}")
+ assert get_resp.status_code == 200
+ assert get_resp.json()["status"] == "completed"
+
+ # Replay — response.created should show in_progress (emission-time state)
+ replay_resp = await client.get(f"/responses/{response_id}?stream=true")
+ assert replay_resp.status_code == 200
+
+ replay_events = _parse_sse_events(replay_resp.body.decode())
+ replay_created = next((e for e in replay_events if e["type"] == "response.created"), None)
+ assert replay_created is not None
+ replay_created_status = replay_created["data"]["response"]["status"]
+ assert replay_created_status == "in_progress", (
+ f"Replayed response.created should show 'in_progress' (emission-time state), "
+ f"not '{replay_created_status}' (current state)"
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_streaming_behavior.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_streaming_behavior.py
new file mode 100644
index 000000000000..d866fcac7bd1
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_streaming_behavior.py
@@ -0,0 +1,524 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for SSE streaming behavior."""
+
+from __future__ import annotations
+
+import asyncio
+import json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire the hosting surface in contract tests."""
+
+ async def _events():
+ if False: # pragma: no cover - required to keep async-generator shape.
+ yield None
+
+ return _events()
+
+
+def _build_client() -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ return TestClient(app)
+
+
+def _throwing_before_yield_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that raises before yielding any event.
+
+ Used to test pre-creation error handling in SSE streaming mode.
+ """
+
+ async def _events():
+ raise RuntimeError("Simulated pre-creation failure")
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+def _throwing_after_created_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then raises.
+
+ Used to test post-creation error handling in SSE streaming mode.
+ """
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ raise RuntimeError("Simulated post-creation failure")
+
+ return _events()
+
+
+def _collect_stream_events(response: Any) -> list[dict[str, Any]]:
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ parsed_data: dict[str, Any] = {}
+ if current_data:
+ parsed_data = json.loads(current_data)
+ events.append({"type": current_type, "data": parsed_data})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ parsed_data = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": parsed_data})
+
+ return events
+
+
+def test_streaming__first_event_is_response_created() -> None:
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ assert response.headers.get("content-type", "").startswith("text/event-stream")
+ events = _collect_stream_events(response)
+
+ assert events, "Expected at least one SSE event"
+ assert events[0]["type"] == "response.created"
+ # Contract (B8): response.created event status must be queued or in_progress
+ created_status = events[0]["data"]["response"].get("status")
+ assert created_status in {"queued", "in_progress"}, (
+ f"response.created status must be queued or in_progress per B8, got: {created_status}"
+ )
+
+
+def test_streaming__sequence_number_is_monotonic_and_contiguous() -> None:
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ assert events, "Expected at least one SSE event"
+ sequence_numbers = [event["data"].get("sequence_number") for event in events]
+ assert all(isinstance(sequence_number, int) for sequence_number in sequence_numbers)
+ assert sequence_numbers == sorted(sequence_numbers)
+ assert sequence_numbers == list(range(len(sequence_numbers)))
+
+
+def test_streaming__has_exactly_one_terminal_event() -> None:
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ event_types = [event["type"] for event in events]
+ terminal_types = {"response.completed", "response.failed", "response.incomplete"}
+ terminal_count = sum(1 for event_type in event_types if event_type in terminal_types)
+ assert terminal_count == 1
+
+
+def test_streaming__identity_fields_are_consistent_across_events() -> None:
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ assert events, "Expected at least one SSE event"
+ # The first event is response.created — a lifecycle event whose data wraps the
+ # Response snapshot under the "response" key per the ResponseCreatedEvent contract.
+ first_response = events[0]["data"]["response"]
+ response_id = first_response.get("response_id")
+ assert response_id == first_response.get("id")
+ assert isinstance(first_response.get("agent_reference"), dict)
+
+ _LIFECYCLE_TYPES = {
+ "response.queued",
+ "response.created",
+ "response.in_progress",
+ "response.completed",
+ "response.failed",
+ "response.incomplete",
+ }
+ lifecycle_events = [e for e in events if e["type"] in _LIFECYCLE_TYPES]
+ for event in lifecycle_events:
+ response_payload = event["data"]["response"]
+ assert response_payload.get("response_id") == response_id
+ assert response_payload.get("id") == response_id
+ assert response_payload.get("agent_reference") == first_response.get("agent_reference")
+
+
+def test_streaming__forwards_emitted_event_before_late_handler_failure() -> None:
+ def _fail_after_first_event_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {
+ "status": "in_progress",
+ },
+ }
+ await asyncio.sleep(0)
+ raise RuntimeError("late stream failure")
+
+ return _events()
+
+ app = ResponsesAgentServerHost()
+ app.create_handler(_fail_after_first_event_handler)
+ client = TestClient(app)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ assert response.headers.get("content-type", "").startswith("text/event-stream")
+ first_event_line = ""
+ for line in response.iter_lines():
+ if line.startswith("event:"):
+ first_event_line = line
+ break
+
+ assert first_event_line == "event: response.created"
+
+
+def test_streaming__sse_response_headers_per_contract() -> None:
+ """SSE Response Headers: Content-Type with charset, Connection, Cache-Control, X-Accel-Buffering."""
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ content_type = response.headers.get("content-type", "")
+ assert content_type == "text/event-stream; charset=utf-8", (
+ f"Expected Content-Type with charset per SSE headers contract, got: {content_type}"
+ )
+ assert response.headers.get("connection") == "keep-alive", "Missing Connection: keep-alive"
+ assert response.headers.get("cache-control") == "no-cache", "Missing Cache-Control: no-cache"
+ assert response.headers.get("x-accel-buffering") == "no", "Missing X-Accel-Buffering: no"
+ list(response.iter_lines())
+
+
+def test_streaming__wire_format_has_no_sse_id_field() -> None:
+ """B27 — SSE wire format must not contain id: lines. Sequence number is in JSON payload."""
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ raw_lines = list(response.iter_lines())
+
+ id_lines = [line for line in raw_lines if line.startswith("id:")]
+ assert id_lines == [], f"SSE stream must not contain id: lines per B27, found: {id_lines}"
+
+
+def test_streaming__background_stream_may_include_response_queued_event() -> None:
+ """B8 — response.queued is optional in background mode SSE streams."""
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": True,
+ },
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ assert events, "Expected at least one SSE event"
+ event_types = [e["type"] for e in events]
+ # response.created must be first
+ assert event_types[0] == "response.created"
+ # If response.queued is present, it must be right after response.created
+ if "response.queued" in event_types:
+ queued_idx = event_types.index("response.queued")
+ assert queued_idx == 1, "response.queued should be the second event if present"
+
+
+# ══════════════════════════════════════════════════════════
+# B4, B10, B13: Handler failure and in_progress event
+# ══════════════════════════════════════════════════════════
+
+
+def test_streaming__pre_creation_handler_failure_produces_terminal_event() -> None:
+ """B4 — Handler raising before any yield in streaming mode → SSE stream terminates with a proper terminal event."""
+ handler = _throwing_before_yield_handler
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ client = TestClient(app, raise_server_exceptions=False)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ event_types = [e["type"] for e in events]
+ # B8: pre-creation error → standalone `error` SSE event only.
+ # No response.created must precede it.
+ assert "error" in event_types, (
+ f"SSE stream must emit standalone 'error' event for pre-creation failure, got: {event_types}"
+ )
+ assert "response.created" not in event_types, (
+ f"Pre-creation error must NOT emit response.created before 'error' event, got: {event_types}"
+ )
+
+
+def test_streaming__response_in_progress_event_is_in_stream() -> None:
+ """B10 — response.in_progress must appear in the SSE stream between response.created and the terminal event."""
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ event_types = [e["type"] for e in events]
+ assert "response.in_progress" in event_types, f"Expected response.in_progress in SSE stream, got: {event_types}"
+ created_idx = event_types.index("response.created")
+ in_progress_idx = event_types.index("response.in_progress")
+ terminal_set = {"response.completed", "response.failed", "response.incomplete"}
+ terminal_idx = next((i for i, t in enumerate(event_types) if t in terminal_set), None)
+ assert terminal_idx is not None, f"No terminal event found in: {event_types}"
+ assert created_idx < in_progress_idx < terminal_idx, (
+ f"response.in_progress must appear after response.created and before terminal event. Order was: {event_types}"
+ )
+
+
+def test_streaming__post_creation_error_yields_response_failed_not_error_event() -> None:
+ """B13 — Handler raising after response.created → terminal is response.failed, NOT a standalone error event."""
+ handler = _throwing_after_created_handler
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ client = TestClient(app, raise_server_exceptions=False)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ event_types = [e["type"] for e in events]
+ assert "response.failed" in event_types, (
+ f"Expected response.failed terminal event after post-creation error, got: {event_types}"
+ )
+ # After response.created has been emitted, no standalone 'error' event should appear.
+ # The failure must be surfaced as response.failed, not a raw error event.
+ assert "error" not in event_types, (
+ f"Standalone 'error' event must not appear after response.created. Events: {event_types}"
+ )
+
+
+# ══════════════════════════════════════════════════════════
+# Task 4.1 — _process_handler_events pipeline contract tests
+# ══════════════════════════════════════════════════════════
+
+
+def test_stream_pre_creation_error_emits_error_event() -> None:
+ """T1 — Handler raises before yielding; stream=True → SSE stream contains only a standalone error event.
+
+ B8: The standalone ``error`` event must be the only event; ``response.created`` must NOT appear.
+ """
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_throwing_before_yield_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ event_types = [e["type"] for e in events]
+ assert event_types == ["error"], f"Pre-creation error must produce exactly one 'error' event, got: {event_types}"
+ assert "response.created" not in event_types
+
+
+def test_stream_post_creation_error_emits_response_failed() -> None:
+ """T2 — Handler raises after response.created; stream=True → SSE ends with response.failed.
+
+ B13: After response.created, handler failures surface as ``response.failed``, not raw ``error``.
+ """
+ _app = ResponsesAgentServerHost()
+ _app.create_handler(_throwing_after_created_handler)
+ client = TestClient(_app, raise_server_exceptions=False)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ event_types = [e["type"] for e in events]
+ assert "response.failed" in event_types, (
+ f"Expected response.failed terminal after post-creation error, got: {event_types}"
+ )
+ assert "error" not in event_types, f"No standalone error event expected after response.created, got: {event_types}"
+ # Exactly one terminal event
+ terminal_types = {"response.completed", "response.failed", "response.incomplete"}
+ assert sum(1 for t in event_types if t in terminal_types) == 1
+
+
+def test_stream_empty_handler_emits_full_lifecycle() -> None:
+ """T3 — Handler yields zero events; _process_handler_events synthesises full lifecycle.
+
+ The SSE stream must contain response.created → response.in_progress → response.completed.
+ """
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ event_types = [e["type"] for e in events]
+ assert "response.created" in event_types, f"Missing response.created: {event_types}"
+ assert "response.in_progress" in event_types, f"Missing response.in_progress: {event_types}"
+ terminal_types = {"response.completed", "response.failed", "response.incomplete"}
+ assert any(t in terminal_types for t in event_types), f"Missing terminal event in: {event_types}"
+ # created must come before in_progress which must come before terminal
+ created_idx = event_types.index("response.created")
+ in_progress_idx = event_types.index("response.in_progress")
+ terminal_idx = next(i for i, t in enumerate(event_types) if t in terminal_types)
+ assert created_idx < in_progress_idx < terminal_idx, f"Lifecycle order violated: {event_types}"
+
+
+def test_stream_sequence_numbers_monotonic() -> None:
+ """T4 — SSE events from a streaming response have strictly monotonically
+ increasing sequence numbers starting at 0."""
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hello", "stream": True, "store": True, "background": False},
+ ) as response:
+ assert response.status_code == 200
+ events = _collect_stream_events(response)
+
+ assert events, "Expected at least one SSE event"
+ sequence_numbers = [e["data"].get("sequence_number") for e in events]
+ assert all(isinstance(sn, int) for sn in sequence_numbers), (
+ f"All events must carry an integer sequence_number, got: {sequence_numbers}"
+ )
+ assert sequence_numbers[0] == 0, f"First sequence_number must be 0, got {sequence_numbers[0]}"
+ assert sequence_numbers == sorted(sequence_numbers), (
+ f"Sequence numbers must be monotonically non-decreasing: {sequence_numbers}"
+ )
+ assert len(set(sequence_numbers)) == len(sequence_numbers), (
+ f"Sequence numbers must be unique (strictly increasing): {sequence_numbers}"
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_tracing.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_tracing.py
new file mode 100644
index 000000000000..545278bb4fd2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/contract/test_tracing.py
@@ -0,0 +1,217 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Contract tests for distributed tracing on POST /responses.
+
+These tests verify the span display name, GenAI parity tags, additional OTEL
+tags, request ID propagation, and baggage items emitted by ``handle_create``.
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost, ResponsesServerOptions
+from azure.ai.agentserver.responses.hosting._observability import InMemoryCreateSpanHook
+
+
+def _noop_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ if False: # pragma: no cover
+ yield None
+
+ return _events()
+
+
+def _build_client(hook: InMemoryCreateSpanHook | None = None) -> TestClient:
+ options = ResponsesServerOptions(create_span_hook=hook)
+ app = ResponsesAgentServerHost(options=options)
+ app.create_handler(_noop_handler)
+ return TestClient(app)
+
+
+# ---------------------------------------------------------------------------
+# Span display name
+# ---------------------------------------------------------------------------
+
+
+def test_tracing__span_name_includes_model() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hi", "stream": False},
+ )
+
+ assert len(hook.spans) == 1
+ assert hook.spans[0].name == "create_response"
+
+
+def test_tracing__span_name_without_model_falls_back_to_create_response() -> None:
+ """When model is absent the span name should still be emitted."""
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={"input": "hi", "stream": False},
+ )
+
+ # Span must be recorded regardless.
+ assert len(hook.spans) == 1
+ assert hook.spans[0].name == "create_response"
+
+
+# ---------------------------------------------------------------------------
+# GenAI parity tags
+# ---------------------------------------------------------------------------
+
+
+def test_tracing__span_tags_include_genai_parity_fields() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o",
+ "input": "hello",
+ "stream": False,
+ "agent_reference": {"type": "agent_reference", "name": "my-agent", "version": "v2"},
+ },
+ )
+
+ assert len(hook.spans) == 1
+ tags = hook.spans[0].tags
+
+ assert isinstance(tags.get("gen_ai.response.id"), str)
+ assert tags["gen_ai.response.id"].startswith("caresp_") or tags["gen_ai.response.id"].startswith("resp_")
+ assert tags["gen_ai.agent.name"] == "my-agent"
+ assert tags["gen_ai.agent.id"] == "my-agent:v2"
+ assert tags["gen_ai.provider.name"] == "AzureAI Hosted Agents"
+ assert tags["service.name"] == "azure.ai.agentserver"
+
+
+# ---------------------------------------------------------------------------
+# Additional OTEL tags
+# ---------------------------------------------------------------------------
+
+
+def test_tracing__span_tags_include_operation_name_invoke_agent() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hi", "stream": False},
+ )
+
+ assert hook.spans[0].tags["gen_ai.operation.name"] == "invoke_agent"
+
+
+def test_tracing__span_tags_include_model() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hi", "stream": False},
+ )
+
+ assert hook.spans[0].tags["gen_ai.request.model"] == "gpt-4o-mini"
+
+
+def test_tracing__span_tags_include_conversation_id_when_present() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hi",
+ "stream": False,
+ "conversation": "conv_abc123",
+ },
+ )
+
+ assert hook.spans[0].tags.get("gen_ai.conversation.id") == "conv_abc123"
+
+
+def test_tracing__span_tags_omit_conversation_id_when_absent() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hi", "stream": False},
+ )
+
+ assert "gen_ai.conversation.id" not in hook.spans[0].tags
+
+
+def test_tracing__span_tags_include_agent_version() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hi",
+ "stream": False,
+ "agent_reference": {"type": "agent_reference", "name": "bot", "version": "1.0"},
+ },
+ )
+
+ assert hook.spans[0].tags.get("gen_ai.agent.version") == "1.0"
+
+
+# ---------------------------------------------------------------------------
+# Request ID propagation
+# ---------------------------------------------------------------------------
+
+
+def test_tracing__span_tags_include_request_id_from_header() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hi", "stream": False},
+ headers={"X-Request-Id": "req-abc-123"},
+ )
+
+ assert hook.spans[0].tags.get("request.id") == "req-abc-123"
+
+
+def test_tracing__request_id_truncated_to_256_characters() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ long_id = "x" * 300
+
+ client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hi", "stream": False},
+ headers={"X-Request-Id": long_id},
+ )
+
+ value = hook.spans[0].tags.get("request.id")
+ assert value is not None
+ assert len(value) == 256
+ assert value == "x" * 256
+
+
+def test_tracing__span_tags_omit_request_id_when_header_absent() -> None:
+ hook = InMemoryCreateSpanHook()
+ client = _build_client(hook)
+
+ client.post(
+ "/responses",
+ json={"model": "gpt-4o-mini", "input": "hi", "stream": False},
+ )
+
+ assert "request.id" not in hook.spans[0].tags
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/data/minimal_openapi.json b/sdk/agentserver/azure-ai-agentserver-responses/tests/data/minimal_openapi.json
new file mode 100644
index 000000000000..f5b41faf41d2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/data/minimal_openapi.json
@@ -0,0 +1,28 @@
+{
+ "paths": {
+ "/responses": {
+ "post": {
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreateResponse"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "components": {
+ "schemas": {
+ "CreateResponse": {
+ "type": "object",
+ "required": ["model"],
+ "properties": {
+ "model": {"type": "string"}
+ }
+ }
+ }
+ }
+}
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/test_proxy_e2e.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/test_proxy_e2e.py
new file mode 100644
index 000000000000..e58c6d850e79
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/test_proxy_e2e.py
@@ -0,0 +1,562 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""End-to-end proxy tests for OpenAI-compatible upstream forwarding.
+
+Architecture:
+
+ Test client ──▶ Server A (proxy handler via openai SDK) ──▶ Server B (backend)
+
+Server B is a ResponsesAgentServerHost with handlers that emit rich SSE
+streams (multi-output, failed, text-only). Server A is a proxy that uses
+the ``openai`` Python SDK to call Server B via ``httpx.ASGITransport``.
+Tests verify the full round-trip including streaming, non-streaming,
+multi-output, failure propagation, and response-ID independence.
+"""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+import httpx
+import openai
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+)
+
+# ---------------------------------------------------------------------------
+# SSE helpers (same pattern as test_sample_e2e.py)
+# ---------------------------------------------------------------------------
+
+
+def _collect_stream_events(response: Any) -> list[dict[str, Any]]:
+ """Parse SSE lines from a streaming response into structured events."""
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ parsed_data: dict[str, Any] = {}
+ if current_data:
+ parsed_data = json.loads(current_data)
+ events.append({"type": current_type, "data": parsed_data})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ parsed_data = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": parsed_data})
+
+ return events
+
+
+def _post_stream(client: TestClient, payload: dict[str, Any]) -> list[dict[str, Any]]:
+ payload["stream"] = True
+ with client.stream("POST", "/responses", json=payload) as resp:
+ assert resp.status_code == 200
+ events = _collect_stream_events(resp)
+ return events
+
+
+def _base_payload(input_text: str = "hello", **overrides: Any) -> dict[str, Any]:
+ payload: dict[str, Any] = {
+ "model": "test-model",
+ "input": [
+ {
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": input_text}],
+ }
+ ],
+ }
+ payload.update(overrides)
+ return payload
+
+
+# ---------------------------------------------------------------------------
+# Server B handlers (backend that emits rich SSE streams)
+# ---------------------------------------------------------------------------
+
+
+def _emit_text_only_handler(text: str):
+ """Return a handler that emits a single text message."""
+
+ def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ tc = msg.add_text_content()
+ yield tc.emit_added()
+ yield tc.emit_delta(text)
+ yield tc.emit_done(text)
+ yield msg.emit_content_done(tc)
+ yield msg.emit_done()
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+def _emit_multi_output_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: Any):
+ """Emit 3 output items: reasoning + function_call + text message."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # 1. Reasoning item
+ reasoning = stream.add_output_item_reasoning_item()
+ yield reasoning.emit_added()
+ sp = reasoning.add_summary_part()
+ yield sp.emit_added()
+ yield sp.emit_text_delta("Thinking about")
+ yield sp.emit_text_delta(" the answer...")
+ yield sp.emit_text_done("Thinking about the answer...")
+ yield sp.emit_done()
+ reasoning.emit_summary_part_done(sp)
+ yield reasoning.emit_done()
+
+ # 2. Function call
+ fc = stream.add_output_item_function_call("get_weather", "call_proxy_001")
+ yield fc.emit_added()
+ yield fc.emit_arguments_delta('{"city":')
+ yield fc.emit_arguments_delta('"Seattle"}')
+ yield fc.emit_arguments_done('{"city":"Seattle"}')
+ yield fc.emit_done()
+
+ # 3. Text message
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ tc = msg.add_text_content()
+ yield tc.emit_added()
+ yield tc.emit_delta("The answer")
+ yield tc.emit_delta(" is 42.")
+ yield tc.emit_done("The answer is 42.")
+ yield msg.emit_content_done(tc)
+ yield msg.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _emit_failed_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: Any):
+ """Emit created, in_progress, then failed."""
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+ yield stream.emit_failed(code="server_error", message="Backend processing error")
+
+ return _events()
+
+
+# ---------------------------------------------------------------------------
+# Server A handler (proxy using openai SDK → Server B)
+# ---------------------------------------------------------------------------
+
+
+def _make_streaming_proxy_handler(upstream_client: openai.AsyncOpenAI):
+ """Create a streaming proxy handler that forwards to upstream via openai SDK."""
+
+ def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ tc = msg.add_text_content()
+ yield tc.emit_added()
+
+ user_text = await context.get_input_text() or "hello"
+ full_text: list[str] = []
+
+ async with await upstream_client.responses.create(
+ model=request.model or "gpt-4o-mini",
+ input=user_text,
+ stream=True,
+ ) as upstream_stream:
+ async for event in upstream_stream:
+ if event.type == "response.output_text.delta":
+ full_text.append(event.delta)
+ yield tc.emit_delta(event.delta)
+
+ result_text = "".join(full_text)
+ yield tc.emit_done(result_text)
+ yield msg.emit_content_done(tc)
+ yield msg.emit_done()
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+def _make_non_streaming_proxy_handler(upstream_client: openai.AsyncOpenAI):
+ """Create a non-streaming proxy handler that forwards to upstream via openai SDK."""
+
+ def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: Any):
+ async def _events():
+ user_text = await context.get_input_text() or "hello"
+
+ result = await upstream_client.responses.create(
+ model=request.model or "gpt-4o-mini",
+ input=user_text,
+ )
+
+ # Extract output text
+ output_text = ""
+ for item in result.output:
+ if item.type == "message":
+ for part in item.content:
+ if part.type == "output_text":
+ output_text += part.text
+
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ for event in stream.output_item_message(output_text):
+ yield event
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+def _make_upstream_integration_handler(upstream_client: openai.AsyncOpenAI):
+ """Create an upstream integration handler (mirrors Sample 10).
+
+ Owns the response lifecycle, translates upstream content events, and
+ stamps its own response ID on all events. Skips upstream lifecycle events
+ (created, in_progress) and handles completed/failed from upstream.
+ """
+
+ def handler(request: CreateResponse, context: ResponseContext, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ user_text = await context.get_input_text() or "hello"
+ upstream_failed = False
+
+ # Track builders by output_index for multi-output
+ reasoning_builder = None
+ reasoning_sp = None
+ fc_builder = None
+ msg_builder = None
+ text_builder = None
+
+ async with await upstream_client.responses.create(
+ model=request.model or "gpt-4o-mini",
+ input=user_text,
+ stream=True,
+ ) as upstream_stream:
+ async for event in upstream_stream:
+ event_type = event.type
+
+ # Skip upstream lifecycle events
+ if event_type in ("response.created", "response.in_progress"):
+ continue
+ if event_type == "response.completed":
+ break
+ if event_type == "response.failed":
+ upstream_failed = True
+ break
+
+ # Reasoning item events
+ if event_type == "response.output_item.added":
+ item = event.item
+ if item.type == "reasoning":
+ reasoning_builder = stream.add_output_item_reasoning_item()
+ yield reasoning_builder.emit_added()
+ elif item.type == "function_call":
+ fc_builder = stream.add_output_item_function_call(item.name, item.call_id)
+ yield fc_builder.emit_added()
+ elif item.type == "message":
+ msg_builder = stream.add_output_item_message()
+ yield msg_builder.emit_added()
+
+ elif event_type == "response.reasoning_summary_part.added":
+ if reasoning_builder is not None:
+ reasoning_sp = reasoning_builder.add_summary_part()
+ yield reasoning_sp.emit_added()
+
+ elif event_type == "response.reasoning_summary_text.delta":
+ if reasoning_sp is not None:
+ yield reasoning_sp.emit_text_delta(event.delta)
+
+ elif event_type == "response.reasoning_summary_text.done":
+ if reasoning_sp is not None:
+ yield reasoning_sp.emit_text_done(event.text)
+
+ elif event_type == "response.reasoning_summary_part.done":
+ if reasoning_sp is not None and reasoning_builder is not None:
+ yield reasoning_sp.emit_done()
+ reasoning_builder.emit_summary_part_done(reasoning_sp)
+
+ elif event_type == "response.output_item.done":
+ item = event.item
+ if item.type == "reasoning" and reasoning_builder is not None:
+ yield reasoning_builder.emit_done()
+ elif item.type == "function_call" and fc_builder is not None:
+ yield fc_builder.emit_done()
+ elif item.type == "message" and msg_builder is not None:
+ yield msg_builder.emit_done()
+
+ elif event_type == "response.function_call_arguments.delta":
+ if fc_builder is not None:
+ yield fc_builder.emit_arguments_delta(event.delta)
+
+ elif event_type == "response.function_call_arguments.done":
+ if fc_builder is not None:
+ yield fc_builder.emit_arguments_done(event.arguments)
+
+ elif event_type == "response.content_part.added":
+ if msg_builder is not None:
+ text_builder = msg_builder.add_text_content()
+ yield text_builder.emit_added()
+
+ elif event_type == "response.output_text.delta":
+ if text_builder is not None:
+ yield text_builder.emit_delta(event.delta)
+
+ elif event_type == "response.output_text.done":
+ if text_builder is not None:
+ yield text_builder.emit_done(event.text)
+
+ elif event_type == "response.content_part.done":
+ if msg_builder is not None and text_builder is not None:
+ yield msg_builder.emit_content_done(text_builder)
+
+ if upstream_failed:
+ yield stream.emit_failed(code="server_error", message="Upstream request failed")
+ else:
+ yield stream.emit_completed()
+
+ return _events()
+
+ return handler
+
+
+# ---------------------------------------------------------------------------
+# Factory helpers
+# ---------------------------------------------------------------------------
+
+
+def _create_server_b(handler) -> ResponsesAgentServerHost:
+ """Create Server B with the given handler."""
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ return app
+
+
+def _create_openai_client_for_app(app: ResponsesAgentServerHost) -> openai.AsyncOpenAI:
+ """Create an openai.AsyncOpenAI client that routes to an in-process ASGI app."""
+ transport = httpx.ASGITransport(app=app)
+ http_client = httpx.AsyncClient(transport=transport, base_url="http://server-b")
+ return openai.AsyncOpenAI(
+ base_url="http://server-b/",
+ api_key="unused",
+ http_client=http_client,
+ )
+
+
+def _create_server_a(upstream_client: openai.AsyncOpenAI, handler_factory) -> ResponsesAgentServerHost:
+ """Create Server A (proxy) with the given handler factory."""
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler_factory(upstream_client))
+ return app
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestStreamingProxy:
+ """Streaming proxy: Server A forwards streaming events from Server B."""
+
+ def test_streaming_proxy_full_roundtrip(self) -> None:
+ """Server B streams 'Hello, World!' → proxy → client gets text."""
+ server_b = _create_server_b(_emit_text_only_handler("Hello, World!"))
+ upstream_client = _create_openai_client_for_app(server_b)
+ server_a = _create_server_a(upstream_client, _make_streaming_proxy_handler)
+ client = TestClient(server_a)
+
+ events = _post_stream(client, _base_payload("Say hello"))
+
+ event_types = [e["type"] for e in events]
+ assert "response.created" in event_types
+ assert "response.output_item.added" in event_types
+ assert "response.completed" in event_types
+
+ delta_events = [e for e in events if e["type"] == "response.output_text.delta"]
+ full_text = "".join(e["data"]["delta"] for e in delta_events)
+ assert full_text == "Hello, World!"
+
+ def test_streaming_proxy_preserves_model(self) -> None:
+ """Server A preserves the model from the request."""
+ server_b = _create_server_b(_emit_text_only_handler("test"))
+ upstream_client = _create_openai_client_for_app(server_b)
+ server_a = _create_server_a(upstream_client, _make_streaming_proxy_handler)
+ client = TestClient(server_a)
+
+ events = _post_stream(client, _base_payload("test", model="my-custom-model"))
+
+ completed = [e for e in events if e["type"] == "response.completed"]
+ assert len(completed) == 1
+ response_obj = completed[0]["data"]["response"]
+ assert response_obj["model"] == "my-custom-model"
+
+
+class TestNonStreamingProxy:
+ """Non-streaming proxy: Server A calls Server B, returns JSON."""
+
+ def test_non_streaming_proxy_full_roundtrip(self) -> None:
+ """Non-streaming: completed JSON response with text."""
+ server_b = _create_server_b(_emit_text_only_handler("Hello, World!"))
+ upstream_client = _create_openai_client_for_app(server_b)
+ server_a = _create_server_a(upstream_client, _make_non_streaming_proxy_handler)
+ client = TestClient(server_a)
+
+ resp = client.post("/responses", json=_base_payload("Say hello"))
+ assert resp.status_code == 200
+ body = resp.json()
+
+ assert body["status"] == "completed"
+ assert body["model"] == "test-model"
+ output = body["output"]
+ assert len(output) >= 1
+ text_parts = [p for p in output[0]["content"] if p.get("type") == "output_text"]
+ assert text_parts[0]["text"] == "Hello, World!"
+
+
+class TestUpstreamIntegration:
+ """Upstream integration: openai SDK → Server A → Server B (rich backend)."""
+
+ def test_upstream_multi_output_all_roundtrip(self) -> None:
+ """3 output items (reasoning + function_call + message) arrive at client."""
+ server_b = _create_server_b(_emit_multi_output_handler)
+ upstream_client = _create_openai_client_for_app(server_b)
+ server_a = _create_server_a(upstream_client, _make_upstream_integration_handler)
+ client = TestClient(server_a)
+
+ resp = client.post("/responses", json=_base_payload("What's the weather?"))
+ assert resp.status_code == 200
+ body = resp.json()
+
+ assert body["status"] == "completed"
+ output = body["output"]
+ assert len(output) == 3
+
+ # Reasoning item
+ assert output[0]["type"] == "reasoning"
+
+ # Function call
+ assert output[1]["type"] == "function_call"
+ assert output[1]["name"] == "get_weather"
+ assert output[1]["call_id"] == "call_proxy_001"
+ assert json.loads(output[1]["arguments"]) == {"city": "Seattle"}
+
+ # Text message
+ assert output[2]["type"] == "message"
+ text_parts = [p for p in output[2]["content"] if p.get("type") == "output_text"]
+ assert text_parts[0]["text"] == "The answer is 42."
+
+ def test_upstream_multi_output_streaming_all_roundtrip(self) -> None:
+ """Streaming: all deltas from 3 output types arrive."""
+ server_b = _create_server_b(_emit_multi_output_handler)
+ upstream_client = _create_openai_client_for_app(server_b)
+ server_a = _create_server_a(upstream_client, _make_upstream_integration_handler)
+ client = TestClient(server_a)
+
+ events = _post_stream(client, _base_payload("What's the weather?"))
+ event_types = [e["type"] for e in events]
+
+ # 3 output_item.added (reasoning, function_call, message)
+ added_events = [e for e in events if e["type"] == "response.output_item.added"]
+ assert len(added_events) == 3
+
+ # 3 output_item.done
+ done_events = [e for e in events if e["type"] == "response.output_item.done"]
+ assert len(done_events) == 3
+
+ # Reasoning summary deltas
+ reasoning_deltas = [e for e in events if e["type"] == "response.reasoning_summary_text.delta"]
+ assert len(reasoning_deltas) > 0
+
+ # Function call argument deltas
+ arg_deltas = [e for e in events if e["type"] == "response.function_call_arguments.delta"]
+ assert len(arg_deltas) > 0
+
+ # Text deltas
+ text_deltas = [e for e in events if e["type"] == "response.output_text.delta"]
+ full_text = "".join(e["data"]["delta"] for e in text_deltas)
+ assert full_text == "The answer is 42."
+
+ # Terminal
+ assert "response.completed" in event_types
+
+ def test_upstream_failed_propagates_failure(self) -> None:
+ """Server B fails → client sees response.failed."""
+ server_b = _create_server_b(_emit_failed_handler)
+ upstream_client = _create_openai_client_for_app(server_b)
+ server_a = _create_server_a(upstream_client, _make_upstream_integration_handler)
+ client = TestClient(server_a)
+
+ events = _post_stream(client, _base_payload("trigger failure"))
+ event_types = [e["type"] for e in events]
+
+ assert "response.failed" in event_types
+ failed_event = next(e for e in events if e["type"] == "response.failed")
+ response_obj = failed_event["data"]["response"]
+ assert response_obj["status"] == "failed"
+
+ # Should not have response.completed
+ assert "response.completed" not in event_types
+
+ def test_upstream_response_ids_are_independent(self) -> None:
+ """Server A stamps its own response ID, not Server B's."""
+ server_b = _create_server_b(_emit_multi_output_handler)
+ upstream_client = _create_openai_client_for_app(server_b)
+ server_a = _create_server_a(upstream_client, _make_upstream_integration_handler)
+ client = TestClient(server_a)
+
+ events = _post_stream(client, _base_payload("test"))
+
+ # response.created carries Server A's response ID
+ created_event = next(e for e in events if e["type"] == "response.created")
+ server_a_id = created_event["data"]["response"]["id"]
+ assert server_a_id
+ assert server_a_id.startswith("caresp_")
+
+ # response.completed should carry the same ID
+ completed_event = next(e for e in events if e["type"] == "response.completed")
+ completed_id = completed_event["data"]["response"]["id"]
+ assert completed_id == server_a_id
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/test_sample_e2e.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/test_sample_e2e.py
new file mode 100644
index 000000000000..61a3f6948a98
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/e2e/test_sample_e2e.py
@@ -0,0 +1,1017 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""End-to-end tests for Samples 1-11."""
+
+from __future__ import annotations
+
+import asyncio
+import json
+from typing import Any
+
+import pytest
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+ ResponsesServerOptions,
+ TextResponse,
+ get_input_expanded,
+)
+from azure.ai.agentserver.responses.models import FunctionCallOutputItemParam, ItemMessage
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_app(handler, **kwargs) -> TestClient:
+ app = ResponsesAgentServerHost(**kwargs)
+ app.create_handler(handler)
+ return TestClient(app)
+
+
+def _collect_stream_events(response: Any) -> list[dict[str, Any]]:
+ """Parse SSE lines from a streaming response into structured events."""
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+
+ for line in response.iter_lines():
+ if not line:
+ if current_type is not None:
+ parsed_data: dict[str, Any] = {}
+ if current_data:
+ parsed_data = json.loads(current_data)
+ events.append({"type": current_type, "data": parsed_data})
+ current_type = None
+ current_data = None
+ continue
+
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+
+ if current_type is not None:
+ parsed_data = json.loads(current_data) if current_data else {}
+ events.append({"type": current_type, "data": parsed_data})
+
+ return events
+
+
+def _post_json(client: TestClient, payload: dict[str, Any]) -> Any:
+ return client.post("/responses", json=payload)
+
+
+def _post_stream(client: TestClient, payload: dict[str, Any]) -> list[dict[str, Any]]:
+ payload["stream"] = True
+ with client.stream("POST", "/responses", json=payload) as resp:
+ assert resp.status_code == 200
+ events = _collect_stream_events(resp)
+ return events
+
+
+def _base_payload(input_value: Any = "hello", **overrides) -> dict[str, Any]:
+ payload: dict[str, Any] = {
+ "model": "test-model",
+ "input": input_value,
+ "stream": False,
+ }
+ payload.update(overrides)
+ return payload
+
+
+# ---------------------------------------------------------------------------
+# Sample 1: Getting Started — Echo handler
+# ---------------------------------------------------------------------------
+
+
+def _sample1_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Echo handler: returns the user's input text using TextResponse."""
+
+ async def _create_text():
+ return await context.get_input_text()
+
+ return TextResponse(
+ context,
+ request,
+ create_text=_create_text,
+ )
+
+
+def test_sample1_echo_handler_echoes_input_text() -> None:
+ """Non-streaming echo returns correct text."""
+ client = _make_app(_sample1_handler)
+ resp = _post_json(client, _base_payload("Say something"))
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ output = body["output"]
+ assert len(output) == 1
+ assert output[0]["type"] == "message"
+ text_parts = [p for p in output[0]["content"] if p.get("type") == "output_text"]
+ assert len(text_parts) == 1
+ assert text_parts[0]["text"] == "Say something"
+
+
+def test_sample1_echo_handler_structured_input() -> None:
+ """Structured message input still echoes text."""
+ client = _make_app(_sample1_handler)
+ structured_input = [
+ {
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": "structured hello"}],
+ }
+ ]
+ resp = _post_json(client, _base_payload(structured_input))
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ output = body["output"]
+ assert len(output) == 1
+ text_parts = [p for p in output[0]["content"] if p.get("type") == "output_text"]
+ assert text_parts[0]["text"] == "structured hello"
+
+
+# ---------------------------------------------------------------------------
+# Sample 2: Streaming Text Deltas
+# ---------------------------------------------------------------------------
+
+
+async def _sample2_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Streaming handler: emits text in token-by-token deltas using TextResponse with configure."""
+ user_text = await context.get_input_text()
+ tokens = user_text.split() if user_text else ["Hello", "World"]
+
+ async def _stream():
+ for token in tokens:
+ yield token + " "
+
+ return TextResponse(
+ context,
+ request,
+ configure=lambda response: setattr(response, "temperature", 0.7),
+ create_text_stream=_stream,
+ )
+
+
+def test_sample2_streaming_handler_streams_token_deltas() -> None:
+ """Streaming response emits delta events."""
+ client = _make_app(_sample2_handler)
+ events = _post_stream(client, _base_payload("one two three"))
+
+ delta_events = [e for e in events if e["type"] == "response.output_text.delta"]
+ assert len(delta_events) == 3
+ joined = "".join(e["data"]["delta"] for e in delta_events)
+ assert joined.strip() == "one two three"
+
+
+def test_sample2_streaming_handler_non_streaming_returns_full_text() -> None:
+ """Non-streaming fallback returns full text."""
+ client = _make_app(_sample2_handler)
+ resp = _post_json(client, _base_payload("alpha beta"))
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert text_parts[0]["text"].strip() == "alpha beta"
+
+
+# ---------------------------------------------------------------------------
+# Sample 3: Full Control — All lifecycle events
+# ---------------------------------------------------------------------------
+
+
+async def _sample3_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Convenience handler: emits a greeting using output_item_message()."""
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+
+ stream.response.temperature = 0.7
+ stream.response.max_output_tokens = 1024
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ user_text = await context.get_input_text()
+ for event in stream.output_item_message(f"Hello, {user_text}! Welcome."):
+ yield event
+
+ yield stream.emit_completed()
+
+
+def test_sample3_full_lifecycle_events() -> None:
+ """Streaming response contains all lifecycle event types."""
+ client = _make_app(_sample3_handler)
+ events = _post_stream(client, _base_payload("World"))
+
+ event_types = [e["type"] for e in events]
+ expected_types = [
+ "response.created",
+ "response.output_item.added",
+ "response.content_part.added",
+ "response.output_text.delta",
+ "response.output_text.done",
+ "response.content_part.done",
+ "response.output_item.done",
+ "response.completed",
+ ]
+ for expected in expected_types:
+ assert expected in event_types, f"Missing event type: {expected}"
+
+
+def test_sample3_greeting_includes_input() -> None:
+ """Greeting text includes user input."""
+ client = _make_app(_sample3_handler)
+ resp = _post_json(client, _base_payload("Alice"))
+
+ body = resp.json()
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert "Alice" in text_parts[0]["text"]
+ assert "Hello" in text_parts[0]["text"]
+
+
+# ---------------------------------------------------------------------------
+# Sample 4: Function Calling
+# ---------------------------------------------------------------------------
+
+
+async def _sample4_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Function-calling handler: uses convenience generators for both turns."""
+ items = get_input_expanded(request)
+ has_fn_output = any(isinstance(item, FunctionCallOutputItemParam) for item in items)
+
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ if has_fn_output:
+ # Second turn: extract function output and echo it as text
+ fn_output_text = ""
+ for item in items:
+ if isinstance(item, FunctionCallOutputItemParam):
+ fn_output_text = item.output or ""
+ break
+ for event in stream.output_item_message(f"The weather is: {fn_output_text}"):
+ yield event
+ else:
+ # First turn: emit a function call for get_weather
+ args = json.dumps({"location": await context.get_input_text()})
+ for event in stream.output_item_function_call("get_weather", "call_001", args):
+ yield event
+
+ yield stream.emit_completed()
+
+
+def test_sample4_turn1_emits_function_call() -> None:
+ """First turn emits function_call with get_weather."""
+ client = _make_app(_sample4_handler)
+ events = _post_stream(client, _base_payload("Seattle"))
+
+ added_events = [e for e in events if e["type"] == "response.output_item.added"]
+ assert len(added_events) == 1
+ item = added_events[0]["data"]["item"]
+ assert item["type"] == "function_call"
+ assert item["name"] == "get_weather"
+ assert item["call_id"] == "call_001"
+
+
+def test_sample4_turn2_returns_weather_text() -> None:
+ """Second turn with function_call_output returns weather text."""
+ client = _make_app(_sample4_handler)
+ input_items = [
+ {
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": "Seattle"}],
+ },
+ {
+ "type": "function_call_output",
+ "call_id": "call_001",
+ "output": "72°F and sunny",
+ },
+ ]
+ resp = _post_json(client, _base_payload(input_items))
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ msg_outputs = [o for o in body["output"] if o.get("type") == "message"]
+ assert len(msg_outputs) == 1
+ text_parts = [p for p in msg_outputs[0]["content"] if p.get("type") == "output_text"]
+ assert "72°F and sunny" in text_parts[0]["text"]
+
+
+# ---------------------------------------------------------------------------
+# Sample 5: Conversation History
+# ---------------------------------------------------------------------------
+
+
+async def _sample5_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Study tutor handler using TextResponse: welcome on first turn,
+ references previous_response_id on second turn."""
+ has_previous = request.previous_response_id is not None and str(request.previous_response_id).strip() != ""
+ user_text = await context.get_input_text()
+ if has_previous:
+ text = f"Building on our previous discussion ({request.previous_response_id}): {user_text}"
+ else:
+ text = f"Welcome! I'm your study tutor. You asked: {user_text}"
+
+ return TextResponse(context, request, create_text=lambda: text)
+
+
+def test_sample5_first_turn_welcome() -> None:
+ """First turn (no history) returns welcome message."""
+ client = _make_app(_sample5_handler)
+ resp = _post_json(client, _base_payload("Hi there"))
+
+ body = resp.json()
+ assert body["status"] == "completed"
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert "Welcome" in text_parts[0]["text"]
+ assert "study tutor" in text_parts[0]["text"]
+ assert "Hi there" in text_parts[0]["text"]
+
+
+def test_sample5_second_turn_references_history() -> None:
+ """Second turn references previous response."""
+ client = _make_app(_sample5_handler)
+
+ # First turn
+ first_resp = _post_json(client, _base_payload("Hello"))
+ first_body = first_resp.json()
+ first_id = first_body["id"]
+
+ # Second turn with previous_response_id
+ second_payload = _base_payload("Follow up question")
+ second_payload["previous_response_id"] = first_id
+ second_resp = _post_json(client, second_payload)
+
+ second_body = second_resp.json()
+ assert second_body["status"] == "completed"
+ text_parts = [p for p in second_body["output"][0]["content"] if p.get("type") == "output_text"]
+ text = text_parts[0]["text"]
+ assert "Building on our previous discussion" in text
+ assert first_id in text
+
+
+# ---------------------------------------------------------------------------
+# Sample 6: Multi Output — Reasoning + Message
+# ---------------------------------------------------------------------------
+
+
+async def _sample6_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Math solver handler: emits a reasoning item then a message item using convenience generators."""
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ question = await context.get_input_text() or "What is 6 times 7?"
+
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ # Output item 0: reasoning
+ thought = f'The user asked: "{question}". I need to compute the result.'
+ for event in stream.output_item_reasoning_item(thought):
+ yield event
+
+ # Output item 1: message
+ for event in stream.output_item_message(f"After reasoning: {question}"):
+ yield event
+
+ yield stream.emit_completed()
+
+
+def test_sample6_streaming_emits_reasoning_and_message() -> None:
+ """Streaming response has 2 output_item.added events."""
+ client = _make_app(_sample6_handler)
+ events = _post_stream(client, _base_payload("complex question"))
+
+ added_events = [e for e in events if e["type"] == "response.output_item.added"]
+ assert len(added_events) == 2
+ assert added_events[0]["data"]["item"]["type"] == "reasoning"
+ assert added_events[1]["data"]["item"]["type"] == "message"
+
+
+def test_sample6_non_streaming_both_output_items() -> None:
+ """Non-streaming returns 2 output items."""
+ client = _make_app(_sample6_handler)
+ resp = _post_json(client, _base_payload("deep thought"))
+
+ body = resp.json()
+ assert body["status"] == "completed"
+ output = body["output"]
+ assert len(output) == 2
+ assert output[0]["type"] == "reasoning"
+ assert output[1]["type"] == "message"
+ text_parts = [p for p in output[1]["content"] if p.get("type") == "output_text"]
+ assert "deep thought" in text_parts[0]["text"]
+
+
+# ---------------------------------------------------------------------------
+# Sample 7: Customization — Default model via options
+# ---------------------------------------------------------------------------
+
+
+def _sample7_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Handler that reports which model is used, via TextResponse."""
+ return TextResponse(
+ context,
+ request,
+ create_text=lambda: f"[model={request.model}]",
+ )
+
+
+def test_sample7_custom_options_applied() -> None:
+ """Default model is applied when request omits model."""
+ opts = ResponsesServerOptions(
+ default_model="gpt-4o",
+ sse_keep_alive_interval_seconds=5,
+ shutdown_grace_period_seconds=15,
+ )
+ client = _make_app(_sample7_handler, options=opts)
+
+ # POST without model — server should fill in "gpt-4o" from options
+ payload: dict[str, Any] = {"input": "hello", "stream": False}
+ resp = _post_json(client, payload)
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert "gpt-4o" in text_parts[0]["text"]
+
+
+def test_sample7_explicit_model_overrides_default() -> None:
+ """Explicit model in request overrides default_model."""
+ opts = ResponsesServerOptions(default_model="gpt-4o")
+ client = _make_app(_sample7_handler, options=opts)
+
+ resp = _post_json(client, _base_payload("hello", model="custom-model"))
+
+ body = resp.json()
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert "custom-model" in text_parts[0]["text"]
+
+
+# ---------------------------------------------------------------------------
+# Sample 8: Mixin Composition — Both protocols on one server
+# ---------------------------------------------------------------------------
+
+
+def _sample8_response_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Responses handler for the mixin test, via TextResponse."""
+
+ async def _create_text():
+ return f"[Response] Echo: {await context.get_input_text()}"
+
+ return TextResponse(
+ context,
+ request,
+ create_text=_create_text,
+ )
+
+
+def test_sample8_mixin_composition_both_protocols() -> None:
+ """Both /responses and /invocations endpoints work."""
+ invocations = pytest.importorskip(
+ "azure.ai.agentserver.invocations",
+ reason="azure-ai-agentserver-invocations not installed",
+ )
+ InvocationAgentServerHost = invocations.InvocationAgentServerHost
+
+ from starlette.requests import Request
+ from starlette.responses import JSONResponse, Response
+
+ class MyHost(InvocationAgentServerHost, ResponsesAgentServerHost):
+ pass
+
+ host = MyHost()
+
+ @host.invoke_handler
+ async def handle_invoke(request: Request) -> Response:
+ data = await request.json()
+ invocation_id = request.state.invocation_id
+ return JSONResponse(
+ {
+ "invocation_id": invocation_id,
+ "status": "completed",
+ "output": f"[Invocation] Echo: {data.get('message', '')}",
+ }
+ )
+
+ host.create_handler(_sample8_response_handler)
+
+ client = TestClient(host)
+
+ # Test invocations endpoint
+ inv_resp = client.post("/invocations", json={"message": "Hello invocations"})
+ assert inv_resp.status_code == 200
+ inv_body = inv_resp.json()
+ assert inv_body["status"] == "completed"
+ assert "Hello invocations" in inv_body["output"]
+
+ # Test responses endpoint
+ resp = client.post(
+ "/responses",
+ json={"model": "test-model", "input": "Hello responses", "stream": False},
+ )
+ assert resp.status_code == 200
+ resp_body = resp.json()
+ assert resp_body["status"] == "completed"
+ text_parts = [p for p in resp_body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert "Hello responses" in text_parts[0]["text"]
+
+
+# ---------------------------------------------------------------------------
+# Sample 9: Self-Hosting — Mount under /api prefix
+# ---------------------------------------------------------------------------
+
+
+def test_sample9_self_hosted_responses_under_prefix() -> None:
+ """Responses endpoints work under /api prefix."""
+ from starlette.applications import Starlette
+ from starlette.routing import Mount
+
+ responses_app = ResponsesAgentServerHost()
+
+ def _handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+
+ async def _create_text():
+ return f"Self-hosted: {await context.get_input_text()}"
+
+ return TextResponse(
+ context,
+ request,
+ create_text=_create_text,
+ )
+
+ responses_app.create_handler(_handler)
+
+ parent_app = Starlette(
+ routes=[
+ Mount("/api", app=responses_app),
+ ]
+ )
+
+ client = TestClient(parent_app)
+ resp = client.post(
+ "/api/responses",
+ json={"model": "test-model", "input": "mounted test", "stream": False},
+ )
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert "mounted test" in text_parts[0]["text"]
+
+
+# ---------------------------------------------------------------------------
+# Sample 10: Streaming Upstream — Raw events, no ResponseEventStream
+# ---------------------------------------------------------------------------
+
+
+def _sample10_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Streaming upstream handler: yields raw event dicts."""
+
+ async def _mock_upstream_events(prompt: str):
+ """Simulate upstream SSE stream events (lifecycle + content)."""
+ # Upstream lifecycle — handler will skip these
+ yield {"type": "response.created"}
+ yield {"type": "response.in_progress"}
+ # Upstream content — handler will yield these directly
+ yield {
+ "type": "response.output_item.added",
+ "output_index": 0,
+ "item": {"id": "item_up_001", "type": "message", "role": "assistant", "content": []},
+ }
+ yield {
+ "type": "response.content_part.added",
+ "output_index": 0,
+ "content_index": 0,
+ "part": {"type": "output_text", "text": ""},
+ }
+ tokens = ["Upstream says: ", "Hello", ", ", prompt, "!"]
+ for token in tokens:
+ yield {"type": "response.output_text.delta", "output_index": 0, "content_index": 0, "delta": token}
+ full = "".join(tokens)
+ yield {"type": "response.output_text.done", "output_index": 0, "content_index": 0, "text": full}
+ yield {
+ "type": "response.content_part.done",
+ "output_index": 0,
+ "content_index": 0,
+ "part": {"type": "output_text", "text": full},
+ }
+ yield {
+ "type": "response.output_item.done",
+ "output_index": 0,
+ "item": {
+ "id": "item_up_001",
+ "type": "message",
+ "role": "assistant",
+ "content": [{"type": "output_text", "text": full}],
+ },
+ }
+ yield {"type": "response.completed"}
+
+ async def _events():
+ # Build response snapshot by hand — no ResponseEventStream.
+ snapshot: dict[str, Any] = {
+ "id": context.response_id,
+ "object": "response",
+ "status": "in_progress",
+ "model": request.model or "",
+ "output": [],
+ }
+
+ # Lifecycle events nest the snapshot under "response"
+ # — matching the SSE wire format.
+ yield {"type": "response.created", "response": snapshot}
+ yield {"type": "response.in_progress", "response": snapshot}
+
+ user_text = await context.get_input_text() or "world"
+ output_items: list[dict[str, Any]] = []
+ upstream_failed = False
+
+ async for event in _mock_upstream_events(user_text):
+ etype = event["type"]
+ if etype in ("response.created", "response.in_progress"):
+ continue
+ if etype == "response.completed":
+ break
+ if etype == "response.failed":
+ upstream_failed = True
+ break
+
+ # Clear upstream response_id on output items.
+ if etype == "response.output_item.added":
+ event.get("item", {}).pop("response_id", None) # type: ignore[union-attr]
+ elif etype == "response.output_item.done":
+ item: dict[str, Any] = event.get("item", {}) # type: ignore[assignment]
+ item.pop("response_id", None)
+ output_items.append(item)
+
+ yield event
+
+ if upstream_failed:
+ snapshot["status"] = "failed"
+ snapshot["error"] = {"code": "server_error", "message": "Upstream request failed"}
+ yield {"type": "response.failed", "response": snapshot}
+ else:
+ snapshot["status"] = "completed"
+ snapshot["output"] = output_items
+ yield {"type": "response.completed", "response": snapshot}
+
+ return _events()
+
+
+def test_sample10_streaming_upstream_emits_deltas() -> None:
+ """Streaming upstream produces delta events."""
+ client = _make_app(_sample10_handler)
+ events = _post_stream(client, _base_payload("Alice"))
+
+ event_types = [e["type"] for e in events]
+ # Verify full lifecycle
+ assert "response.created" in event_types
+ assert "response.output_item.added" in event_types
+ assert "response.output_text.done" in event_types
+ assert "response.output_item.done" in event_types
+ assert "response.completed" in event_types
+
+ delta_events = [e for e in events if e["type"] == "response.output_text.delta"]
+ assert len(delta_events) == 5
+ joined = "".join(e["data"]["delta"] for e in delta_events)
+ assert "Alice" in joined
+ assert "Upstream says" in joined
+
+
+def test_sample10_streaming_upstream_non_streaming_returns_full_text() -> None:
+ """Non-streaming fallback reassembles all deltas."""
+ client = _make_app(_sample10_handler)
+ resp = _post_json(client, _base_payload("Bob"))
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ assert "Bob" in text_parts[0]["text"]
+ assert "Upstream says" in text_parts[0]["text"]
+
+
+# ---------------------------------------------------------------------------
+# Sample 11: Non-Streaming Upstream — Output item builders
+# ---------------------------------------------------------------------------
+
+
+def _sample11_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Non-streaming upstream handler: iterates upstream output items via builders."""
+
+ def _mock_upstream_call(prompt: str) -> list[dict[str, Any]]:
+ """Simulate upstream non-streaming response returning output items."""
+ return [
+ {
+ "type": "message",
+ "content": [{"type": "output_text", "text": f"Upstream non-streaming reply to: {prompt}"}],
+ }
+ ]
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, request=request)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ user_text = await context.get_input_text() or "world"
+ upstream_items = _mock_upstream_call(user_text)
+
+ for item in upstream_items:
+ if item["type"] == "message":
+ text = "".join(part["text"] for part in item["content"] if part.get("type") == "output_text")
+ for event in stream.output_item_message(text):
+ yield event
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def test_sample11_non_streaming_upstream_returns_output() -> None:
+ """Non-streaming upstream returns completed response."""
+ client = _make_app(_sample11_handler)
+ resp = _post_json(client, _base_payload("Charlie"))
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ output = body["output"]
+ assert len(output) == 1
+ assert output[0]["type"] == "message"
+ text_parts = [p for p in output[0]["content"] if p.get("type") == "output_text"]
+ assert "Upstream non-streaming reply to: Charlie" in text_parts[0]["text"]
+
+
+def test_sample11_non_streaming_upstream_streaming_events() -> None:
+ """Streaming mode still emits proper lifecycle events."""
+ client = _make_app(_sample11_handler)
+ events = _post_stream(client, _base_payload("Dana"))
+
+ event_types = [e["type"] for e in events]
+ assert "response.created" in event_types
+ assert "response.output_item.added" in event_types
+ assert "response.output_text.delta" in event_types
+ assert "response.completed" in event_types
+
+ delta_events = [e for e in events if e["type"] == "response.output_text.delta"]
+ joined = "".join(e["data"]["delta"] for e in delta_events)
+ assert "Dana" in joined
+
+
+# ---------------------------------------------------------------------------
+# Item Reference Multi-Turn Tests
+#
+# Validates that item_reference inputs are resolved by the server to their
+# concrete Item types before reaching the handler.
+# ---------------------------------------------------------------------------
+
+
+async def _item_ref_echo_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ """Handler that echoes resolved input items as JSON in the response text.
+
+ For each input item, emits its type and (for messages) its text content.
+ This lets tests verify that item_references were resolved to concrete items.
+ """
+ items = await context.get_input_items()
+ summaries = []
+ for item in items:
+ if isinstance(item, ItemMessage):
+ texts = []
+ for part in getattr(item, "content", None) or []:
+ t = getattr(part, "text", None)
+ if t:
+ texts.append(t)
+ summaries.append({"type": "message", "text": " ".join(texts)})
+ else:
+ summaries.append({"type": getattr(item, "type", "unknown")})
+
+ return TextResponse(context, request, create_text=lambda: json.dumps(summaries))
+
+
+def test_item_reference_turn2_resolves_to_message() -> None:
+ """Turn 2 sends an item_reference to Turn 1's output; handler receives a resolved Item."""
+ client = _make_app(_item_ref_echo_handler)
+
+ # Turn 1: normal message input
+ t1_resp = _post_json(client, _base_payload("Hello from turn 1"))
+ assert t1_resp.status_code == 200
+ t1_body = t1_resp.json()
+ assert t1_body["status"] == "completed"
+ # Get the output item ID from turn 1's message output
+ t1_output_id = t1_body["output"][0]["id"]
+ t1_response_id = t1_body["id"]
+
+ # Turn 2: send an item_reference pointing to the turn-1 output item
+ t2_payload = _base_payload(
+ [
+ {"type": "item_reference", "id": t1_output_id},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "And hello from turn 2"}]},
+ ]
+ )
+ t2_payload["previous_response_id"] = t1_response_id
+ t2_resp = _post_json(client, t2_payload)
+
+ assert t2_resp.status_code == 200
+ t2_body = t2_resp.json()
+ assert t2_body["status"] == "completed"
+ # Parse handler's serialised summary from the response text
+ text_parts = [p for p in t2_body["output"][0]["content"] if p.get("type") == "output_text"]
+ items_json = json.loads(text_parts[0]["text"])
+
+ # First item should be the resolved message from turn 1
+ assert items_json[0]["type"] == "message"
+ # Second item is the inline message from turn 2
+ assert items_json[1]["type"] == "message"
+ assert "turn 2" in items_json[1]["text"]
+
+
+def test_item_reference_get_input_text_includes_resolved() -> None:
+ """get_input_text() includes text from resolved item_references."""
+ client = _make_app(_item_ref_echo_handler)
+
+ # Turn 1 (unused — establishes context in a different client)
+ _post_json(client, _base_payload("Alpha"))
+
+ # Turn 2: handler uses get_input_text which should include resolved text
+ async def _text_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event):
+ text = await context.get_input_text()
+ return TextResponse(context, request, create_text=lambda: f"GOT: {text}")
+
+ client2 = _make_app(_text_handler)
+ # First create context for turn 1 in client2 too
+ t1b = _post_json(client2, _base_payload("Alpha"))
+ t1b_body = t1b.json()
+ t1b_output_id = t1b_body["output"][0]["id"]
+ t1b_response_id = t1b_body["id"]
+
+ t2_payload = _base_payload(
+ [
+ {"type": "item_reference", "id": t1b_output_id},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Beta"}]},
+ ]
+ )
+ t2_payload["previous_response_id"] = t1b_response_id
+ t2_resp = _post_json(client2, t2_payload)
+
+ t2_body = t2_resp.json()
+ text_parts = [p for p in t2_body["output"][0]["content"] if p.get("type") == "output_text"]
+ result_text = text_parts[0]["text"]
+ assert "GOT:" in result_text
+ assert "Beta" in result_text
+
+
+def test_item_reference_nonexistent_dropped_silently() -> None:
+ """An item_reference pointing to a non-existent ID is silently dropped."""
+ client = _make_app(_item_ref_echo_handler)
+
+ payload = _base_payload(
+ [
+ {"type": "item_reference", "id": "item_nonexistent_999"},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Only me"}]},
+ ]
+ )
+ resp = _post_json(client, payload)
+
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["status"] == "completed"
+ text_parts = [p for p in body["output"][0]["content"] if p.get("type") == "output_text"]
+ items_json = json.loads(text_parts[0]["text"])
+
+ # Only the inline message should remain — the unresolvable reference is dropped
+ assert len(items_json) == 1
+ assert items_json[0]["type"] == "message"
+ assert "Only me" in items_json[0]["text"]
+
+
+def test_item_reference_three_turn_chain() -> None:
+ """Three-turn conversation: each turn references previous output via item_reference."""
+ client = _make_app(_item_ref_echo_handler)
+
+ # Turn 1
+ t1 = _post_json(client, _base_payload("Turn 1"))
+ t1_body = t1.json()
+ t1_id = t1_body["id"]
+ t1_output_id = t1_body["output"][0]["id"]
+
+ # Turn 2: reference turn-1 output
+ t2_payload = _base_payload(
+ [
+ {"type": "item_reference", "id": t1_output_id},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Turn 2"}]},
+ ]
+ )
+ t2_payload["previous_response_id"] = t1_id
+ t2 = _post_json(client, t2_payload)
+ t2_body = t2.json()
+ t2_id = t2_body["id"]
+ t2_output_id = t2_body["output"][0]["id"]
+
+ # Turn 3: reference turn-2 output
+ t3_payload = _base_payload(
+ [
+ {"type": "item_reference", "id": t2_output_id},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "Turn 3"}]},
+ ]
+ )
+ t3_payload["previous_response_id"] = t2_id
+ t3 = _post_json(client, t3_payload)
+ t3_body = t3.json()
+ assert t3_body["status"] == "completed"
+
+ text_parts = [p for p in t3_body["output"][0]["content"] if p.get("type") == "output_text"]
+ items_json = json.loads(text_parts[0]["text"])
+
+ # Should have 2 items: resolved reference from turn 2 + inline turn-3 message
+ assert len(items_json) == 2
+ assert items_json[0]["type"] == "message"
+ assert items_json[1]["type"] == "message"
+ assert "Turn 3" in items_json[1]["text"]
+
+
+def test_item_reference_resolve_references_false() -> None:
+ """When resolve_references=False, item_references are passed through as-is."""
+
+ async def _unresolved_handler(
+ request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event
+ ):
+ items = await context.get_input_items(resolve_references=False)
+ summaries = []
+ for item in items:
+ item_type = getattr(item, "type", "unknown")
+ summaries.append({"type": item_type})
+ return TextResponse(context, request, create_text=lambda: json.dumps(summaries))
+
+ client = _make_app(_unresolved_handler)
+
+ # Turn 1
+ t1 = _post_json(client, _base_payload("Hello"))
+ t1_body = t1.json()
+ t1_output_id = t1_body["output"][0]["id"]
+ t1_id = t1_body["id"]
+
+ # Turn 2 with resolve_references=False in handler
+ t2_payload = _base_payload(
+ [
+ {"type": "item_reference", "id": t1_output_id},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "test"}]},
+ ]
+ )
+ t2_payload["previous_response_id"] = t1_id
+ t2 = _post_json(client, t2_payload)
+ t2_body = t2.json()
+
+ text_parts = [p for p in t2_body["output"][0]["content"] if p.get("type") == "output_text"]
+ items_json = json.loads(text_parts[0]["text"])
+
+ # First item should remain as item_reference (not resolved)
+ assert items_json[0]["type"] == "item_reference"
+ # Second is the inline message
+ assert items_json[1]["type"] == "message"
+
+
+def test_item_reference_input_items_endpoint() -> None:
+ """The GET /responses/{id}/input_items endpoint returns resolved items."""
+ client = _make_app(_item_ref_echo_handler)
+
+ # Turn 1
+ t1 = _post_json(client, _base_payload("Stored text"))
+ t1_body = t1.json()
+ t1_output_id = t1_body["output"][0]["id"]
+ t1_id = t1_body["id"]
+
+ # Turn 2 with item_reference
+ t2_payload = _base_payload(
+ [
+ {"type": "item_reference", "id": t1_output_id},
+ {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "New input"}]},
+ ]
+ )
+ t2_payload["previous_response_id"] = t1_id
+ t2 = _post_json(client, t2_payload)
+ t2_body = t2.json()
+ t2_id = t2_body["id"]
+
+ # GET input_items for turn 2
+ input_items_resp = client.get(f"/responses/{t2_id}/input_items")
+ assert input_items_resp.status_code == 200
+ input_items_body = input_items_resp.json()
+
+ # Should contain resolved items (not raw item_references)
+ items = input_items_body.get("data", [])
+ assert len(items) >= 1
+ # None of the items should be type "item_reference" — they should all be resolved
+ for item in items:
+ assert item.get("type") != "item_reference", f"Expected resolved item, got item_reference: {item}"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/__init__.py
new file mode 100644
index 000000000000..9a0454564dbb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/test_starlette_hosting.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/test_starlette_hosting.py
new file mode 100644
index 000000000000..f90c5812be95
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/test_starlette_hosting.py
@@ -0,0 +1,344 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Integration tests for AgentHost host registration and wiring."""
+
+from __future__ import annotations
+
+import asyncio
+import threading
+from typing import Any
+
+import pytest
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from azure.ai.agentserver.responses._options import ResponsesServerOptions
+from azure.ai.agentserver.responses.hosting._observability import InMemoryCreateSpanHook
+from tests._helpers import EventGate
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire host integration tests."""
+
+ async def _events():
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+def _build_client(*, prefix: str = "", options: ResponsesServerOptions | None = None) -> TestClient:
+ app = ResponsesAgentServerHost(prefix=prefix, options=options)
+ app.create_handler(_noop_response_handler)
+ return TestClient(app)
+
+
+def test_hosting__registers_create_get_cancel_routes_under_prefix() -> None:
+ client = _build_client(prefix="/v1")
+
+ create_response = client.post(
+ "/v1/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ get_response = client.get(f"/v1/responses/{response_id}")
+ assert get_response.status_code in {200, 404}
+
+ cancel_response = client.post(f"/v1/responses/{response_id}/cancel")
+ assert cancel_response.status_code in {200, 400, 404}
+
+
+def test_hosting__options_are_applied_to_runtime_behavior() -> None:
+ options = ResponsesServerOptions(
+ additional_server_version="integration-suite",
+ default_model="gpt-4o-mini",
+ sse_keep_alive_interval_seconds=5,
+ )
+ client = _build_client(options=options)
+
+ response = client.post(
+ "/responses",
+ json={
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 200
+ assert "x-platform-server" in response.headers
+
+
+def test_hosting__client_disconnect_behavior_remains_contract_compliant() -> None:
+ client = _build_client()
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ first_line = next(response.iter_lines(), "")
+ assert first_line.startswith("event:") or first_line.startswith("data:")
+
+ # Post-disconnect visibility and state should remain contract-compliant.
+ # This call should not raise and should return a defined protocol outcome.
+ follow_up = client.get("/responses/resp_disconnect_probe")
+ assert follow_up.status_code in {200, 400, 404}
+
+
+def test_hosting__create_emits_single_root_span_with_key_tags_and_identity_header() -> None:
+ hook = InMemoryCreateSpanHook()
+ options = ResponsesServerOptions(
+ additional_server_version="integration-suite",
+ create_span_hook=hook,
+ )
+ client = _build_client(options=options)
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 200
+ assert "x-platform-server" in response.headers
+
+ assert len(hook.spans) == 1
+ span = hook.spans[0]
+ assert span.name == "create_response"
+ assert span.error is None
+ assert span.ended_at is not None
+ assert span.tags["service.name"] == "azure.ai.agentserver"
+ assert span.tags["gen_ai.operation.name"] == "invoke_agent"
+ assert span.tags["gen_ai.system"] == "responses"
+ assert span.tags["gen_ai.request.model"] == "gpt-4o-mini"
+ assert isinstance(span.tags["gen_ai.response.id"], str)
+
+
+def test_hosting__stream_mode_surfaces_handler_output_item_and_content_events() -> None:
+ from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+ def _streaming_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message_item = stream.add_output_item_message()
+ yield message_item.emit_added()
+
+ text_content = message_item.add_text_content()
+ yield text_content.emit_added()
+ yield text_content.emit_delta("hello")
+ yield text_content.emit_done()
+ yield message_item.emit_content_done(text_content)
+ yield message_item.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ app = ResponsesAgentServerHost()
+ app.create_handler(_streaming_handler)
+ client = TestClient(app)
+
+ with client.stream(
+ "POST",
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": True,
+ "store": True,
+ "background": False,
+ },
+ ) as response:
+ assert response.status_code == 200
+ lines = [line for line in response.iter_lines() if line]
+
+ event_lines = [line for line in lines if line.startswith("event:")]
+ assert "event: response.output_item.added" in event_lines
+ assert "event: response.content_part.added" in event_lines
+ assert "event: response.output_text.delta" in event_lines
+ assert "event: response.output_text.done" in event_lines
+ assert "event: response.content_part.done" in event_lines
+ assert "event: response.output_item.done" in event_lines
+
+
+def test_hosting__non_stream_mode_returns_completed_response_with_output_items() -> None:
+ from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+ def _non_stream_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=getattr(request, "model", None))
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ message_item = stream.add_output_item_message()
+ yield message_item.emit_added()
+
+ text_content = message_item.add_text_content()
+ yield text_content.emit_added()
+ yield text_content.emit_delta("hello")
+ yield text_content.emit_done()
+ yield message_item.emit_content_done(text_content)
+ yield message_item.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+ app = ResponsesAgentServerHost()
+ app.create_handler(_non_stream_handler)
+ client = TestClient(app)
+
+ response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+
+ assert response.status_code == 200
+ payload = response.json()
+ assert payload["status"] == "completed"
+ assert payload["id"].startswith("caresp_")
+ assert isinstance(payload.get("output"), list)
+ assert len(payload["output"]) == 1
+ assert payload["output"][0]["type"] == "message"
+ assert payload["output"][0]["content"][0]["type"] == "output_text"
+ assert payload["output"][0]["content"][0]["text"] == "hello"
+
+
+def test_hosting__health_endpoint_is_available() -> None:
+ """Verify AgentHost provides health endpoint automatically."""
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ client = TestClient(app)
+
+ response = client.get("/readiness")
+ assert response.status_code == 200
+ assert response.json()["status"] == "healthy"
+
+
+def test_hosting__multi_protocol_composition() -> None:
+ """Verify ResponseHandler can coexist with other protocol handlers on the same server."""
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ client = TestClient(app)
+
+ # Responses endpoint works
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+
+ # Health endpoint works
+ health_response = client.get("/readiness")
+ assert health_response.status_code == 200
+
+
+@pytest.mark.skip(reason="Shutdown handler registration under investigation after _hosting.py refactor")
+def test_hosting__shutdown_signals_inflight_background_execution() -> None:
+ started_gate = EventGate()
+ cancelled_gate = EventGate()
+ shutdown_gate = EventGate()
+
+ def _shutdown_aware_handler(request: Any, context: Any, cancellation_signal: Any):
+ async def _events():
+ yield {
+ "type": "response.created",
+ "response": {
+ "status": "in_progress",
+ "output": [],
+ },
+ }
+ started_gate.signal(True)
+
+ while True:
+ if context.is_shutdown_requested:
+ shutdown_gate.signal(True)
+ if cancellation_signal.is_set():
+ cancelled_gate.signal(True)
+ return
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+ app = ResponsesAgentServerHost(
+ options=ResponsesServerOptions(shutdown_grace_period_seconds=2),
+ )
+ app.create_handler(_shutdown_aware_handler)
+
+ create_result: dict[str, Any] = {}
+ get_result: dict[str, Any] = {}
+
+ with TestClient(app) as client:
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+ create_result["response_id"] = response_id
+
+ def _issue_get() -> None:
+ try:
+ get_result["response"] = client.get(f"/responses/{response_id}")
+ except Exception as exc: # pragma: no cover - surfaced via assertion below.
+ get_result["error"] = exc
+
+ get_thread = threading.Thread(target=_issue_get, daemon=True)
+ get_thread.start()
+
+ started, _ = started_gate.wait(timeout_s=2.0)
+ assert started, "Expected background handler execution to start before shutdown"
+ assert client.portal is not None
+ client.portal.call(app.router.shutdown)
+
+ cancelled, _ = cancelled_gate.wait(timeout_s=2.0)
+ shutdown_seen, _ = shutdown_gate.wait(timeout_s=2.0)
+ assert cancelled, "Expected shutdown to trigger cancellation_signal for in-flight execution"
+ assert shutdown_seen, "Expected shutdown to set context.is_shutdown_requested"
+
+ get_thread.join(timeout=2.0)
+ assert not get_thread.is_alive(), "Expected in-flight GET request to finish after shutdown"
+ assert get_result.get("error") is None, str(get_result.get("error"))
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/test_store_lifecycle.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/test_store_lifecycle.py
new file mode 100644
index 000000000000..b3b74c4754bf
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/integration/test_store_lifecycle.py
@@ -0,0 +1,143 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Integration tests for store and lifecycle behavior."""
+
+from __future__ import annotations
+
+import asyncio
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import ResponsesAgentServerHost
+from tests._helpers import poll_until
+
+
+def _noop_response_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Minimal handler used to wire lifecycle integration tests."""
+
+ async def _events():
+ if False: # pragma: no cover - keep async generator shape.
+ yield None
+
+ return _events()
+
+
+def _cancellable_bg_handler(request: Any, context: Any, cancellation_signal: Any):
+ """Handler that emits response.created then blocks until cancelled (Phase 3)."""
+
+ async def _events():
+ yield {"type": "response.created", "response": {"status": "in_progress", "output": []}}
+ while not cancellation_signal.is_set():
+ await asyncio.sleep(0.01)
+
+ return _events()
+
+
+def _build_client() -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_noop_response_handler)
+ return TestClient(app)
+
+
+def test_store_lifecycle__create_read_and_cleanup_behavior() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": False,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ read_response = client.get(f"/responses/{response_id}")
+ assert read_response.status_code == 200
+
+ # Lifecycle cleanup contract: after explicit cancellation, read should still be stable or terminally unavailable.
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code in {200, 400}
+
+
+def test_store_lifecycle__background_completion_is_observed_deterministically() -> None:
+ client = _build_client()
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ terminal_states = {"completed", "failed", "incomplete", "cancelled"}
+ latest_status: str | None = None
+
+ def _is_terminal() -> bool:
+ nonlocal latest_status
+ get_response = client.get(f"/responses/{response_id}")
+ if get_response.status_code != 200:
+ return False
+ latest_status = get_response.json().get("status")
+ return latest_status in terminal_states
+
+ ok, failure = poll_until(
+ _is_terminal,
+ timeout_s=5.0,
+ interval_s=0.05,
+ context_provider=lambda: {"last_status": latest_status},
+ label="background completion polling",
+ )
+ assert ok, failure
+
+
+def test_store_lifecycle__background_cancel_transition_is_deterministic() -> None:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_cancellable_bg_handler)
+ client = TestClient(app)
+
+ create_response = client.post(
+ "/responses",
+ json={
+ "model": "gpt-4o-mini",
+ "input": "hello",
+ "stream": False,
+ "store": True,
+ "background": True,
+ },
+ )
+ assert create_response.status_code == 200
+ response_id = create_response.json()["id"]
+
+ cancel_response = client.post(f"/responses/{response_id}/cancel")
+ assert cancel_response.status_code == 200
+ assert cancel_response.json().get("status") == "cancelled"
+
+ latest_status: str | None = None
+
+ def _is_cancelled() -> bool:
+ nonlocal latest_status
+ get_response = client.get(f"/responses/{response_id}")
+ if get_response.status_code != 200:
+ return False
+ latest_status = get_response.json().get("status")
+ return latest_status == "cancelled"
+
+ ok, failure = poll_until(
+ _is_cancelled,
+ timeout_s=5.0,
+ interval_s=0.05,
+ context_provider=lambda: {"last_status": latest_status},
+ label="background cancel transition polling",
+ )
+ assert ok, failure
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/COMPLIANCE.md b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/COMPLIANCE.md
new file mode 100644
index 000000000000..2bf838991fb8
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/COMPLIANCE.md
@@ -0,0 +1,375 @@
+# OpenAI Responses API — Compliance Requirements
+
+> Source of truth: [OpenAI OpenAPI spec v2.3.0](https://app.stainless.com/api/spec/documented/openai/openapi.documented.yml)
+> (also cross-referenced against the OpenAI Python SDK v1.82+)
+
+This document captures the wire-format contracts that our Responses server
+**must** satisfy to remain drop-in compatible with the OpenAI SDK. Tests in this
+directory enforce these contracts. **When a test fails, fix the service — not the
+test.**
+
+---
+
+## 1. CreateResponse Request Body
+
+| Property | Type | Required | Default | Notes |
+|---|---|---|---|---|
+| `model` | string | No* | — | *Our server resolves via DefaultModel when omitted |
+| `input` | `string \| InputItem[]` | No | — | String shorthand = single user message |
+| `instructions` | string \| null | No | — | System/developer instructions |
+| `tools` | Tool[] | No | — | Available tools |
+| `tool_choice` | `"none" \| "auto" \| "required" \| object` | No | — | String shorthands expand to ToolChoiceAllowed |
+| `temperature` | number(0–2) \| null | No | 1 | |
+| `top_p` | number(0–1) \| null | No | 1 | |
+| `max_output_tokens` | integer \| null | No | — | |
+| `metadata` | map\ \| null | No | — | |
+| `store` | boolean \| null | No | true | |
+| `stream` | boolean \| null | No | false | |
+| `previous_response_id` | string \| null | No | — | |
+| `parallel_tool_calls` | boolean \| null | No | true | |
+| `truncation` | `"auto" \| "disabled"` \| null | No | `"disabled"` | |
+| `reasoning` | Reasoning \| null | No | — | |
+| `conversation` | `string \| { id: string }` \| null | No | — | |
+| `text` | ResponseTextParam | No | — | |
+
+---
+
+## 2. Input Items
+
+### 2.1 EasyInputMessage (`type: "message"`)
+
+| Property | Type | Required | Default | OpenAI Spec Reference |
+|---|---|---|---|---|
+| `type` | `"message"` | **No** | `"message"` | Not in required array |
+| `role` | `"user" \| "assistant" \| "system" \| "developer"` | **Yes** | — | |
+| `content` | `string \| ContentPart[]` | **Yes** | — | String shorthand = single input_text |
+| `phase` | `"commentary" \| "final_answer"` \| null | No | — | |
+
+**Compliance rule C-MSG-01**: Server MUST accept messages without `type` field
+and default to `"message"`.
+
+**Compliance rule C-MSG-02**: Server MUST accept content as a plain string OR an
+array of content parts.
+
+### 2.2 Content Parts (inside message content array)
+
+#### InputTextContent (`type: "input_text"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"input_text"` | **Yes** |
+| `text` | string | **Yes** |
+
+#### InputImageContent (`type: "input_image"`)
+| Property | Type | Required | Default |
+|---|---|---|---|
+| `type` | `"input_image"` | **Yes** | |
+| `image_url` | string \| null | No | |
+| `file_id` | string \| null | No | |
+| `detail` | `"low" \| "high" \| "auto" \| "original"` \| null | **No** | `"auto"` |
+
+**Compliance rule C-IMG-01**: Server MUST accept input_image content parts
+**without** `detail` field. When omitted, the server should default to `"auto"`.
+
+#### InputFileContent (`type: "input_file"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"input_file"` | **Yes** |
+| `file_id` | string \| null | No |
+| `filename` | string | No |
+| `file_data` | string | No |
+
+### 2.3 FunctionCall (`type: "function_call"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"function_call"` | **Yes** |
+| `call_id` | string | **Yes** |
+| `name` | string | **Yes** |
+| `arguments` | string (JSON) | **Yes** |
+| `id` | string | No |
+| `status` | enum | No |
+
+### 2.4 FunctionCallOutput (`type: "function_call_output"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"function_call_output"` | **Yes** |
+| `call_id` | string (1–64 chars) | **Yes** |
+| `output` | `string \| ContentPart[]` | **Yes** |
+| `id` | string \| null | No |
+| `status` | enum \| null | No |
+
+**Compliance rule C-FCO-01**: `output` is `oneOf: [string, array]`.
+A JSON string value is the common case. An array of content parts (input_text,
+input_image, input_file) is also valid.
+
+### 2.5 ItemReference (`type: "item_reference"`)
+
+| Property | Type | Required | Default |
+|---|---|---|---|
+| `type` | `"item_reference"` | **Yes** | `"item_reference"` |
+| `id` | string | **Yes** | |
+
+> **Note**: The OpenAI OpenAPI spec marks `type` as optional/nullable for
+> `ItemReferenceParam`, but the OpenAI SDK **always serializes `type`** — the
+> base class `ResponseItem.JsonModelWriteCore` unconditionally writes it.
+> This is a spec anomaly, not a real optionality. We keep `type` required.
+
+### 2.6 ComputerCallOutput (`type: "computer_call_output"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"computer_call_output"` | **Yes** |
+| `call_id` | string | **Yes** |
+| `output` | ComputerScreenshotImage | **Yes** |
+| `acknowledged_safety_checks` | array \| null | No |
+| `id` | string | No |
+
+### 2.7 Reasoning (`type: "reasoning"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"reasoning"` | **Yes** |
+| `id` | string | **Yes** |
+| `summary` | SummaryTextContent[] | **Yes** |
+| `encrypted_content` | string \| null | No |
+
+### 2.8 MCPApprovalResponse (`type: "mcp_approval_response"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"mcp_approval_response"` | **Yes** |
+| `approval_request_id` | string | **Yes** |
+| `approve` | boolean | **Yes** |
+| `reason` | string \| null | No |
+| `id` | string \| null | No |
+
+---
+
+## 3. Tool Definitions
+
+### 3.1 FunctionTool (`type: "function"`)
+
+| Property | Type | Required | Default |
+|---|---|---|---|
+| `type` | `"function"` | **Yes** | `"function"` |
+| `name` | string (1–128 chars) | **Yes** | — |
+| `description` | string \| null | No | — |
+| `parameters` | object (JSON Schema) \| null | **No** | — |
+| `strict` | boolean \| null | **No** | — |
+
+**Compliance rule C-FUNC-01**: Server MUST accept function tools **without**
+`strict` field. It is nullable and optional.
+
+**Compliance rule C-FUNC-02**: Server MUST accept function tools **without**
+`parameters` field. It is nullable and optional.
+
+### 3.2 WebSearchPreviewTool (`type: "web_search_preview"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"web_search_preview"` | **Yes** |
+| `user_location` | object \| null | No |
+| `search_context_size` | enum | No |
+
+### 3.3 FileSearchTool (`type: "file_search"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"file_search"` | **Yes** |
+| `vector_store_ids` | string[] | **Yes** |
+| `max_num_results` | integer | No |
+| `ranking_options` | object | No |
+| `filters` | object \| null | No |
+
+### 3.4 CodeInterpreterTool (`type: "code_interpreter"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"code_interpreter"` | **Yes** |
+| `container` | string \| object | **Yes** |
+
+### 3.5 ImageGenerationTool (`type: "image_generation"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"image_generation"` | **Yes** |
+| (all other props) | various | No |
+
+### 3.6 MCPTool (`type: "mcp"`)
+
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"mcp"` | **Yes** |
+| `server_label` | string | **Yes** |
+| `server_url` | string | No |
+| `allowed_tools` | array \| object \| null | No |
+| `require_approval` | object \| string \| null | No |
+
+---
+
+## 4. ToolChoice
+
+| Form | Wire Representation | Meaning |
+|---|---|---|
+| String `"none"` | `"none"` | No tools used |
+| String `"auto"` | `"auto"` | Model decides |
+| String `"required"` | `"required"` | Must use a tool |
+| Function object | `{ "type": "function", "name": "..." }` | Force specific function |
+| Type object | `{ "type": "" }` | Force tool type |
+
+---
+
+## 5. Response Object
+
+| Property | Type | Required | Notes |
+|---|---|---|---|
+| `id` | string | **Yes** | Starts with `resp_` (or `caresp_` for our server) |
+| `object` | `"response"` | **Yes** | |
+| `status` | enum | **Yes** | `completed \| failed \| in_progress \| cancelled \| queued \| incomplete` |
+| `model` | string | **Yes** | |
+| `output` | OutputItem[] | **Yes** | |
+| `created_at` | number | **Yes** | Unix timestamp |
+| `error` | object \| null | **Yes** | |
+| `incomplete_details` | object \| null | **Yes** | |
+| `instructions` | string \| null | **Yes** | |
+| `usage` | ResponseUsage | No | |
+| `metadata` | map | No | |
+| `temperature` | number | No | |
+| `top_p` | number | No | |
+| `tools` | Tool[] | No | |
+| `tool_choice` | ToolChoiceParam | No | |
+| `parallel_tool_calls` | boolean | No | |
+
+---
+
+## 6. Output Item Types
+
+### OutputMessage (`type: "message"`)
+| Property | Type | Required |
+|---|---|---|
+| `id` | string | **Yes** |
+| `type` | `"message"` | **Yes** |
+| `role` | `"assistant"` | **Yes** |
+| `status` | enum | **Yes** |
+| `content` | (OutputTextContent \| RefusalContent)[] | **Yes** |
+
+### FunctionToolCall (`type: "function_call"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"function_call"` | **Yes** |
+| `call_id` | string | **Yes** |
+| `name` | string | **Yes** |
+| `arguments` | string (JSON) | **Yes** |
+| `id` | string | No |
+| `status` | enum | No |
+
+### FunctionToolCallOutput (`type: "function_call_output"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"function_call_output"` | **Yes** |
+| `call_id` | string | **Yes** |
+| `output` | string \| array | **Yes** |
+| `id` | string | No |
+| `status` | enum | No |
+
+### WebSearchCall (`type: "web_search_call"`)
+| Property | Type | Required |
+|---|---|---|
+| `id` | string | **Yes** |
+| `type` | `"web_search_call"` | **Yes** |
+| `status` | enum | **Yes** |
+
+### FileSearchCall (`type: "file_search_call"`)
+| Property | Type | Required |
+|---|---|---|
+| `id` | string | **Yes** |
+| `type` | `"file_search_call"` | **Yes** |
+| `status` | enum | **Yes** |
+| `queries` | string[] | **Yes** |
+| `results` | array \| null | No |
+
+### CodeInterpreterCall (`type: "code_interpreter_call"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"code_interpreter_call"` | **Yes** |
+| `id` | string | **Yes** |
+| `status` | enum | **Yes** |
+| `container_id` | string | **Yes** |
+| `code` | string \| null | **Yes** |
+| `outputs` | array \| null | **Yes** |
+
+### ImageGenerationCall (`type: "image_generation_call"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"image_generation_call"` | **Yes** |
+| `id` | string | **Yes** |
+| `status` | enum | **Yes** |
+| `result` | string \| null | **Yes** |
+
+### ComputerCall (`type: "computer_call"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"computer_call"` | **Yes** |
+| `id` | string | **Yes** |
+| `call_id` | string | **Yes** |
+| `pending_safety_checks` | array | **Yes** |
+| `status` | enum | **Yes** |
+
+### ReasoningItem (`type: "reasoning"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"reasoning"` | **Yes** |
+| `id` | string | **Yes** |
+| `summary` | SummaryTextContent[] | **Yes** |
+
+### MCPCall (`type: "mcp_call"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"mcp_call"` | **Yes** |
+| `id` | string | **Yes** |
+| `server_label` | string | **Yes** |
+| `name` | string | **Yes** |
+| `arguments` | string (JSON) | **Yes** |
+
+### MCPApprovalRequest (`type: "mcp_approval_request"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"mcp_approval_request"` | **Yes** |
+| `id` | string | **Yes** |
+| `server_label` | string | **Yes** |
+| `name` | string | **Yes** |
+| `arguments` | string (JSON) | **Yes** |
+
+### MCPListTools (`type: "mcp_list_tools"`)
+| Property | Type | Required |
+|---|---|---|
+| `type` | `"mcp_list_tools"` | **Yes** |
+| `id` | string | **Yes** |
+| `server_label` | string | **Yes** |
+| `tools` | MCPTool[] | **Yes** |
+
+---
+
+## 7. Known Compliance Gaps (to fix in service)
+
+| ID | Gap | OpenAI Spec Says | Our Spec Says | Fix |
+|---|---|---|---|---|
+| GAP-01 | `EasyInputMessage.type` required | Optional (not in required) | Required | Make optional via overlay `not_required` or custom validator default |
+| ~~GAP-02~~ | ~~`ItemReferenceParam.type` required~~ | ~~Optional (nullable)~~ | Required | **Not a real gap** — OpenAI SDK always sends `type`. Spec anomaly. |
+| GAP-03 | `MessageContentInputImageContent.detail` required | Optional (nullable, not in required for Param variant) | Required | Make optional via overlay `not_required` |
+| GAP-04 | `FunctionTool.strict` required | Optional (nullable, not in required) | Required | Make optional via overlay `not_required` |
+| GAP-05 | `FunctionTool.parameters` required | Optional (nullable, not in required) | Required | Make optional via overlay `not_required` |
+
+---
+
+## 8. Test Organization
+
+### Raw JSON Compliance Tests (`test_openai_wire_compliance.py`)
+Send raw JSON that matches exactly what the OpenAI SDK would produce. Tests
+validate the server accepts each payload and correctly deserializes the model.
+
+### SDK Round-Trip Tests (`test_sdk_round_trip.py`)
+Use the OpenAI Python SDK (`openai` package) to construct requests and read
+responses from our server. Tests validate end-to-end compatibility through
+the SDK's serialization and deserialization.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/__init__.py
new file mode 100644
index 000000000000..9a0454564dbb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/test_openai_wire_compliance.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/test_openai_wire_compliance.py
new file mode 100644
index 000000000000..e547216b2cf9
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/test_openai_wire_compliance.py
@@ -0,0 +1,754 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Raw JSON wire-format compliance tests.
+
+Tests in this file define the OpenAI wire-format contract.
+When a test fails, FIX THE SERVICE — do not change the test.
+See COMPLIANCE.md for the source-of-truth specification.
+
+Each test sends a JSON payload that is valid per the OpenAI Responses API
+specification and verifies our server accepts it and correctly deserializes
+the model. The JSON payloads match exactly what the OpenAI SDK (or any
+compliant client) would produce.
+"""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+
+from starlette.testclient import TestClient
+
+from azure.ai.agentserver.responses import (
+ CreateResponse,
+ ResponseContext,
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+ get_input_expanded,
+)
+from azure.ai.agentserver.responses.models import (
+ get_tool_choice_expanded,
+)
+
+# ---------------------------------------------------------------------------
+# Test infrastructure
+# ---------------------------------------------------------------------------
+
+# Mutable container to capture the deserialized request from the handler.
+_captured: dict[str, Any] = {}
+
+
+def _capture_handler(request: CreateResponse, context: ResponseContext, cancellation_signal: Any):
+ """Handler that captures the parsed request, then emits a minimal response."""
+ _captured["request"] = request
+
+ async def _events():
+ stream = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield stream.emit_created()
+ yield stream.emit_in_progress()
+
+ msg = stream.add_output_item_message()
+ yield msg.emit_added()
+ text = msg.add_text_content()
+ yield text.emit_added()
+ yield text.emit_done("ok")
+ yield msg.emit_content_done(text)
+ yield msg.emit_done()
+
+ yield stream.emit_completed()
+
+ return _events()
+
+
+def _build_client() -> TestClient:
+ app = ResponsesAgentServerHost()
+ app.create_handler(_capture_handler)
+ return TestClient(app)
+
+
+def _send_and_capture(json_body: str) -> CreateResponse:
+ """POST raw JSON to /responses and return the captured CreateResponse."""
+ _captured.clear()
+ client = _build_client()
+ resp = client.post(
+ "/responses",
+ content=json_body.encode("utf-8"),
+ headers={"content-type": "application/json"},
+ )
+ assert resp.status_code == 200, f"Expected 200, got {resp.status_code}: {resp.text}"
+ return _captured["request"]
+
+
+def _send_input_and_capture(input_items_json: str) -> list:
+ """Send input items array and return the expanded item list."""
+ json_body = f'{{"model": "test", "input": {input_items_json}}}'
+ request = _send_and_capture(json_body)
+ return get_input_expanded(request)
+
+
+def _send_stream_and_collect(json_body: str) -> list[dict[str, Any]]:
+ """POST raw JSON with stream=True and collect SSE events."""
+ _captured.clear()
+ client = _build_client()
+ body = json.loads(json_body)
+ body["stream"] = True
+ with client.stream(
+ "POST",
+ "/responses",
+ content=json.dumps(body).encode("utf-8"),
+ headers={"content-type": "application/json"},
+ ) as resp:
+ assert resp.status_code == 200
+ events: list[dict[str, Any]] = []
+ current_type: str | None = None
+ current_data: str | None = None
+ for line in resp.iter_lines():
+ if not line:
+ if current_type is not None:
+ events.append(
+ {
+ "type": current_type,
+ "data": json.loads(current_data) if current_data else {},
+ }
+ )
+ current_type = None
+ current_data = None
+ continue
+ if line.startswith("event:"):
+ current_type = line.split(":", 1)[1].strip()
+ elif line.startswith("data:"):
+ current_data = line.split(":", 1)[1].strip()
+ if current_type is not None:
+ events.append(
+ {
+ "type": current_type,
+ "data": json.loads(current_data) if current_data else {},
+ }
+ )
+ return events
+
+
+def _reject_payload(json_body: str) -> int:
+ """POST raw JSON and return the status code (expected non-200)."""
+ client = _build_client()
+ resp = client.post(
+ "/responses",
+ content=json_body.encode("utf-8"),
+ headers={"content-type": "application/json"},
+ )
+ return resp.status_code
+
+
+# ═══════════════════════════════════════════════════════════════════
+# GAP-01: EasyInputMessage — type is OPTIONAL (C-MSG-01)
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_c_msg_01__message_without_type_accepted_as_message() -> None:
+ """OpenAI spec: EasyInputMessage does NOT require 'type'."""
+ items = _send_input_and_capture("""
+ [{ "role": "user", "content": "Hello without type" }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "message"
+ assert items[0].get("role") == "user"
+
+
+def test_c_msg_01__message_with_type_also_accepted() -> None:
+ items = _send_input_and_capture("""
+ [{ "type": "message", "role": "user", "content": "With type" }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("role") == "user"
+
+
+def test_c_msg_01__multiple_messages_without_type() -> None:
+ items = _send_input_and_capture("""
+ [
+ { "role": "developer", "content": "System msg" },
+ { "role": "user", "content": "User msg" },
+ { "role": "assistant", "content": "Asst msg" }
+ ]
+ """)
+ assert len(items) == 3
+ assert items[0].get("role") == "developer"
+ assert items[1].get("role") == "user"
+ assert items[2].get("role") == "assistant"
+
+
+# ═══════════════════════════════════════════════════════════════════
+# ItemReferenceParam — type is REQUIRED (SDK always sends it)
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_item_reference_with_type_accepted() -> None:
+ items = _send_input_and_capture("""
+ [{ "type": "item_reference", "id": "msg_existing_002" }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "item_reference"
+ assert items[0].get("id") == "msg_existing_002"
+
+
+# ═══════════════════════════════════════════════════════════════════
+# GAP-03: InputImageContent.detail is OPTIONAL (C-IMG-01)
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_c_img_01__input_image_without_detail_accepted() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "message",
+ "role": "user",
+ "content": [
+ { "type": "input_image", "image_url": "https://example.com/img.png" }
+ ]
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "message"
+
+
+def test_c_img_01__input_image_with_detail_also_accepted() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "message",
+ "role": "user",
+ "content": [
+ { "type": "input_image", "image_url": "https://example.com/img.png", "detail": "high" }
+ ]
+ }]
+ """)
+ assert len(items) == 1
+
+
+def test_c_img_01__input_image_with_null_detail_accepted() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "message",
+ "role": "user",
+ "content": [
+ { "type": "input_image", "image_url": "https://example.com/img.png", "detail": null }
+ ]
+ }]
+ """)
+ assert len(items) == 1
+
+
+# ═══════════════════════════════════════════════════════════════════
+# GAP-04 & GAP-05: FunctionTool — strict & parameters OPTIONAL
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_c_func_01__function_tool_without_strict_accepted() -> None:
+ request = _send_and_capture("""
+ {
+ "model": "test",
+ "tools": [{
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather",
+ "parameters": { "type": "object", "properties": {} }
+ }]
+ }
+ """)
+ assert request.tools is not None
+ assert len(request.tools) == 1
+ assert request.tools[0].get("type") == "function"
+ assert request.tools[0].get("name") == "get_weather"
+
+
+def test_c_func_02__function_tool_without_parameters_accepted() -> None:
+ request = _send_and_capture("""
+ {
+ "model": "test",
+ "tools": [{
+ "type": "function",
+ "name": "no_params_tool"
+ }]
+ }
+ """)
+ assert request.tools is not None
+ assert len(request.tools) == 1
+ assert request.tools[0].get("name") == "no_params_tool"
+
+
+def test_c_func_01_02__function_tool_minimal_form_accepted() -> None:
+ request = _send_and_capture("""
+ {
+ "model": "test",
+ "tools": [{ "type": "function", "name": "minimal_tool" }]
+ }
+ """)
+ assert request.tools is not None
+ assert len(request.tools) == 1
+ assert request.tools[0].get("name") == "minimal_tool"
+
+
+def test_c_func_01__function_tool_with_strict_null_accepted() -> None:
+ request = _send_and_capture("""
+ {
+ "model": "test",
+ "tools": [{
+ "type": "function",
+ "name": "get_weather",
+ "strict": null,
+ "parameters": { "type": "object", "properties": {} }
+ }]
+ }
+ """)
+ assert request.tools is not None
+ assert len(request.tools) == 1
+
+
+def test_c_func_01__function_tool_with_strict_true_accepted() -> None:
+ request = _send_and_capture("""
+ {
+ "model": "test",
+ "tools": [{
+ "type": "function",
+ "name": "strict_tool",
+ "strict": true,
+ "parameters": { "type": "object", "properties": {} }
+ }]
+ }
+ """)
+ assert request.tools is not None
+ assert len(request.tools) == 1
+
+
+# ═══════════════════════════════════════════════════════════════════
+# INPUT ITEM TYPES — all types recognized by the OpenAI spec
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_input_message_text_content() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "message",
+ "role": "user",
+ "content": [{ "type": "input_text", "text": "Hello" }]
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "message"
+ assert items[0].get("role") == "user"
+ content = items[0].get("content", [])
+ assert len(content) == 1
+ assert content[0].get("type") == "input_text"
+ assert content[0].get("text") == "Hello"
+
+
+def test_input_message_string_content() -> None:
+ items = _send_input_and_capture("""
+ [{ "type": "message", "role": "developer", "content": "System prompt" }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("role") == "developer"
+
+
+def test_input_message_multiple_content_parts() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "message",
+ "role": "user",
+ "content": [
+ { "type": "input_text", "text": "Look at this image" },
+ { "type": "input_image", "image_url": "https://example.com/img.png" }
+ ]
+ }]
+ """)
+ assert len(items) == 1
+ content = items[0].get("content", [])
+ assert len(content) == 2
+
+
+def test_input_message_all_roles() -> None:
+ items = _send_input_and_capture("""
+ [
+ { "type": "message", "role": "user", "content": "r1" },
+ { "type": "message", "role": "assistant", "content": "r2" },
+ { "type": "message", "role": "developer", "content": "r3" },
+ { "type": "message", "role": "system", "content": "r4" }
+ ]
+ """)
+ assert len(items) == 4
+ assert items[0].get("role") == "user"
+ assert items[1].get("role") == "assistant"
+ assert items[2].get("role") == "developer"
+ assert items[3].get("role") == "system"
+
+
+def test_input_function_call() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "function_call",
+ "call_id": "call_abc",
+ "name": "get_weather",
+ "arguments": "{\\"city\\":\\"Seattle\\"}"
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "function_call"
+ assert items[0].get("call_id") == "call_abc"
+ assert items[0].get("name") == "get_weather"
+ assert items[0].get("arguments") == '{"city":"Seattle"}'
+
+
+def test_input_function_call_output_string_output() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "function_call_output",
+ "call_id": "call_abc",
+ "output": "72°F and sunny"
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "function_call_output"
+ assert items[0].get("call_id") == "call_abc"
+
+
+def test_input_function_call_output_array_output() -> None:
+ """output can be an array of content parts per OpenAI spec."""
+ items = _send_input_and_capture("""
+ [{
+ "type": "function_call_output",
+ "call_id": "call_xyz",
+ "output": [
+ { "type": "input_text", "text": "Result text" }
+ ]
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "function_call_output"
+
+
+def test_input_reasoning() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "reasoning",
+ "id": "rs_abc",
+ "summary": [
+ { "type": "summary_text", "text": "Thinking step 1" }
+ ]
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "reasoning"
+ assert items[0].get("id") == "rs_abc"
+
+
+def test_input_computer_call_output() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "computer_call_output",
+ "call_id": "cu_abc",
+ "output": {
+ "type": "computer_screenshot",
+ "image_url": "https://example.com/screenshot.png"
+ }
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "computer_call_output"
+ assert items[0].get("call_id") == "cu_abc"
+
+
+def test_input_mcp_approval_response() -> None:
+ items = _send_input_and_capture("""
+ [{
+ "type": "mcp_approval_response",
+ "approval_request_id": "mcpr_abc",
+ "approve": true
+ }]
+ """)
+ assert len(items) == 1
+ assert items[0].get("type") == "mcp_approval_response"
+ assert items[0].get("approval_request_id") == "mcpr_abc"
+ assert items[0].get("approve") is True
+
+
+def test_input_mixed_types_all_deserialize() -> None:
+ items = _send_input_and_capture("""
+ [
+ { "role": "user", "content": "Hello" },
+ { "type": "function_call", "call_id": "c1", "name": "fn", "arguments": "{}" },
+ { "type": "function_call_output", "call_id": "c1", "output": "done" },
+ { "type": "item_reference", "id": "ref_001" }
+ ]
+ """)
+ assert len(items) == 4
+ # First item is a message (inferred from role without type)
+ assert items[0].get("role") == "user"
+ assert items[1].get("type") == "function_call"
+ assert items[2].get("type") == "function_call_output"
+ assert items[3].get("type") == "item_reference"
+
+
+# ═══════════════════════════════════════════════════════════════════
+# CREATERESPONSE PROPERTIES — all fields round-trip
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_create_response_model() -> None:
+ req = _send_and_capture('{"model": "gpt-4o-mini"}')
+ assert req.model == "gpt-4o-mini"
+
+
+def test_create_response_instructions() -> None:
+ req = _send_and_capture('{"model": "test", "instructions": "Be helpful"}')
+ assert req.instructions == "Be helpful"
+
+
+def test_create_response_temperature() -> None:
+ req = _send_and_capture('{"model": "test", "temperature": 0.7}')
+ assert abs(req.temperature - 0.7) < 0.001
+
+
+def test_create_response_top_p() -> None:
+ req = _send_and_capture('{"model": "test", "top_p": 0.9}')
+ assert abs(req.top_p - 0.9) < 0.001
+
+
+def test_create_response_max_output_tokens() -> None:
+ req = _send_and_capture('{"model": "test", "max_output_tokens": 1024}')
+ assert req.max_output_tokens == 1024
+
+
+def test_create_response_previous_response_id() -> None:
+ req = _send_and_capture('{"model": "test", "previous_response_id": "resp_prev_001"}')
+ assert req.previous_response_id == "resp_prev_001"
+
+
+def test_create_response_store() -> None:
+ req = _send_and_capture('{"model": "test", "store": false}')
+ assert req.store is False
+
+
+def test_create_response_metadata() -> None:
+ req = _send_and_capture('{"model": "test", "metadata": {"key": "value"}}')
+ assert req.metadata is not None
+ assert req.metadata.get("key") == "value"
+
+
+def test_create_response_parallel_tool_calls() -> None:
+ req = _send_and_capture('{"model": "test", "parallel_tool_calls": false}')
+ assert req.parallel_tool_calls is False
+
+
+def test_create_response_truncation() -> None:
+ req = _send_and_capture('{"model": "test", "truncation": "auto"}')
+ assert req.truncation is not None
+
+
+def test_create_response_reasoning() -> None:
+ req = _send_and_capture('{"model": "test", "reasoning": {"effort": "high"}}')
+ assert req.reasoning is not None
+
+
+def test_create_response_tool_choice_auto() -> None:
+ req = _send_and_capture('{"model": "test", "tool_choice": "auto"}')
+ tc = get_tool_choice_expanded(req)
+ assert tc is not None
+ assert tc.get("type") == "auto" or tc.get("mode") == "auto"
+
+
+def test_create_response_tool_choice_required() -> None:
+ req = _send_and_capture('{"model": "test", "tool_choice": "required"}')
+ tc = get_tool_choice_expanded(req)
+ assert tc is not None
+
+
+def test_create_response_tool_choice_none() -> None:
+ req = _send_and_capture('{"model": "test", "tool_choice": "none"}')
+ tc = get_tool_choice_expanded(req)
+ assert tc is None
+
+
+def test_create_response_tool_choice_function_object() -> None:
+ req = _send_and_capture("""
+ {"model": "test", "tool_choice": {"type": "function", "name": "get_weather"}}
+ """)
+ tc = get_tool_choice_expanded(req)
+ assert tc is not None
+ assert tc.get("name") == "get_weather"
+
+
+def test_create_response_tools_web_search() -> None:
+ req = _send_and_capture("""
+ {"model": "test", "tools": [{"type": "web_search_preview"}]}
+ """)
+ assert req.tools is not None
+ assert len(req.tools) == 1
+ assert req.tools[0].get("type") == "web_search_preview"
+
+
+def test_create_response_tools_file_search() -> None:
+ req = _send_and_capture("""
+ {"model": "test", "tools": [{"type": "file_search", "vector_store_ids": ["vs_abc"]}]}
+ """)
+ assert req.tools is not None
+ assert len(req.tools) == 1
+ assert req.tools[0].get("type") == "file_search"
+
+
+def test_create_response_tools_code_interpreter() -> None:
+ req = _send_and_capture("""
+ {"model": "test", "tools": [{"type": "code_interpreter"}]}
+ """)
+ assert req.tools is not None
+ assert len(req.tools) == 1
+ assert req.tools[0].get("type") == "code_interpreter"
+
+
+def test_create_response_stream() -> None:
+ events = _send_stream_and_collect('{"model": "test"}')
+ assert len(events) > 0
+ assert events[0]["type"] == "response.created"
+
+
+# ═══════════════════════════════════════════════════════════════════
+# RESPONSE OBJECT — server output readable by OpenAI SDK
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_response_object_has_required_fields() -> None:
+ """Non-streaming response has all required fields per OpenAI spec."""
+ client = _build_client()
+ resp = client.post(
+ "/responses",
+ content=b'{"model": "gpt-4o"}',
+ headers={"content-type": "application/json"},
+ )
+ assert resp.status_code == 200
+ body = resp.json()
+ assert "id" in body
+ assert body.get("object") == "response"
+ assert body.get("status") in {"completed", "failed", "in_progress", "cancelled", "queued", "incomplete"}
+ assert body.get("model") == "gpt-4o"
+ assert "output" in body
+ assert "created_at" in body
+
+
+# ═══════════════════════════════════════════════════════════════════
+# SHORTHAND NOTATIONS — string | array forms
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_input_string_shorthand_expands_to_user_message() -> None:
+ req = _send_and_capture('{"model": "test", "input": "Hello world"}')
+ items = get_input_expanded(req)
+ assert len(items) == 1
+ assert items[0].get("role") == "user"
+ content = items[0].get("content", [])
+ assert len(content) == 1
+ assert content[0].get("type") == "input_text"
+ assert content[0].get("text") == "Hello world"
+
+
+def test_input_empty_array_returns_empty() -> None:
+ req = _send_and_capture('{"model": "test", "input": []}')
+ assert get_input_expanded(req) == []
+
+
+def test_input_null_or_absent_returns_empty() -> None:
+ req = _send_and_capture('{"model": "test"}')
+ assert get_input_expanded(req) == []
+
+
+def test_message_content_string_shorthand_expands_to_input_text() -> None:
+ items = _send_input_and_capture("""
+ [{"type": "message", "role": "user", "content": "shorthand"}]
+ """)
+ # Content is stored as the raw value — may be string or expanded
+ # The server keeps the original form; expansion happens via get_content_expanded
+ assert len(items) == 1
+ assert items[0].get("role") == "user"
+
+
+def test_message_content_empty_string_accepted() -> None:
+ items = _send_input_and_capture("""
+ [{"type": "message", "role": "user", "content": ""}]
+ """)
+ assert len(items) == 1
+
+
+# ═══════════════════════════════════════════════════════════════════
+# COMBINED SCENARIO — realistic multi-turn with all shorthands
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_full_payload_all_shorthands_and_minimal_forms() -> None:
+ """Uses ALL shorthand/minimal forms in one request."""
+ req = _send_and_capture("""
+ {
+ "model": "gpt-4o",
+ "input": "What is the weather?",
+ "instructions": "Be helpful",
+ "tool_choice": "auto",
+ "store": true,
+ "temperature": 0.5,
+ "max_output_tokens": 500,
+ "tools": [
+ { "type": "function", "name": "get_weather" }
+ ]
+ }
+ """)
+ assert req.model == "gpt-4o"
+ assert req.instructions == "Be helpful"
+ assert abs(req.temperature - 0.5) < 0.001
+ assert req.max_output_tokens == 500
+ assert req.store is True
+
+ items = get_input_expanded(req)
+ assert len(items) == 1
+ assert items[0].get("role") == "user"
+
+ tc = get_tool_choice_expanded(req)
+ assert tc is not None
+
+ assert req.tools is not None
+ assert len(req.tools) == 1
+
+
+def test_multi_turn_mixed_shorthand_and_full_form() -> None:
+ items = _send_input_and_capture("""
+ [
+ { "role": "developer", "content": "You are helpful" },
+ {
+ "type": "message",
+ "role": "user",
+ "content": [
+ { "type": "input_text", "text": "Look at this" },
+ { "type": "input_image", "image_url": "https://example.com/img.png" }
+ ]
+ }
+ ]
+ """)
+ assert len(items) == 2
+ assert items[0].get("role") == "developer"
+ assert items[1].get("role") == "user"
+ content = items[1].get("content", [])
+ assert len(content) == 2
+
+
+# ═══════════════════════════════════════════════════════════════════
+# VALIDATION — reject truly invalid inputs
+# ═══════════════════════════════════════════════════════════════════
+
+
+def test_reject_input_as_number() -> None:
+ status = _reject_payload('{"model": "test", "input": 42}')
+ assert status == 400
+
+
+def test_reject_input_as_boolean() -> None:
+ status = _reject_payload('{"model": "test", "input": true}')
+ assert status == 400
+
+
+def test_reject_content_as_number() -> None:
+ status = _reject_payload("""
+ {"model": "test", "input": [{"type": "message", "role": "user", "content": 42}]}
+ """)
+ assert status == 400
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/test_sdk_round_trip.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/test_sdk_round_trip.py
new file mode 100644
index 000000000000..c9004fdec82e
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/interop/test_sdk_round_trip.py
@@ -0,0 +1,795 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""OpenAI Python SDK round-trip tests.
+
+These tests verify end-to-end compatibility between the **openai** Python SDK
+and our Responses API server. Each test:
+
+1. Creates a ``ResponsesAgentServerHost`` with a specific handler
+2. Uses ``starlette.testclient.TestClient`` (an ``httpx.Client`` subclass)
+ as the ``http_client`` for ``openai.OpenAI``
+3. Calls ``client.responses.create()`` through the SDK
+4. Asserts the SDK-parsed ``Response`` object matches expectations
+
+This is the Python equivalent of the SDK round-trip test suite.
+
+When a test fails, FIX THE SERVICE — do not change the test.
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+import pytest
+from starlette.testclient import TestClient
+
+openai = pytest.importorskip("openai", reason="openai SDK required for round-trip tests")
+
+from openai.types.responses import ( # noqa: E402
+ ResponseCodeInterpreterToolCall,
+ ResponseFileSearchToolCall,
+ ResponseFunctionToolCall,
+ ResponseFunctionToolCallOutputItem,
+ ResponseOutputMessage,
+ ResponseOutputText,
+ ResponseReasoningItem,
+)
+from openai.types.responses.response_output_item import ( # noqa: E402
+ ImageGenerationCall,
+ McpCall,
+ McpListTools,
+)
+
+from azure.ai.agentserver.responses import ( # noqa: E402
+ ResponseEventStream,
+ ResponsesAgentServerHost,
+)
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+_captured: dict[str, Any] = {}
+
+
+def _make_sdk_client(handler) -> openai.OpenAI:
+ """Build an OpenAI SDK client backed by our ASGI test server.
+
+ ``TestClient`` (an ``httpx.Client`` subclass) is passed as
+ ``http_client`` so all traffic goes through the ASGI app — no real
+ network calls are made.
+ """
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ tc = TestClient(app)
+ return openai.OpenAI(
+ api_key="test-key",
+ base_url="http://testserver",
+ http_client=tc,
+ )
+
+
+def _capturing(handler):
+ """Wrap *handler* so the parsed ``CreateResponse`` is captured."""
+ _captured.clear()
+
+ def wrapper(request, context, cancellation_signal):
+ _captured["request"] = request
+ _captured["context"] = context
+ return handler(request, context, cancellation_signal)
+
+ return wrapper
+
+
+# ---------------------------------------------------------------------------
+# Handler factories
+# ---------------------------------------------------------------------------
+# Each factory returns a handler that emits specific output item(s).
+# Handlers follow the (request, context, cancellation_signal) -> AsyncIterator
+# signature required by ResponsesAgentServerHost.
+
+
+def _text_message_handler(text: str = "Hello, world!"):
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ for ev in s.output_item_message(text):
+ yield ev
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _function_call_handler(
+ name: str = "get_weather",
+ call_id: str = "call_abc123",
+ arguments: str = '{"location":"Seattle"}',
+):
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ for ev in s.output_item_function_call(name, call_id, arguments):
+ yield ev
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _function_call_output_handler(
+ call_id: str = "call_abc123",
+ output: str = "72°F and sunny",
+):
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ for ev in s.output_item_function_call_output(call_id, output):
+ yield ev
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _reasoning_handler(summary: str = "Let me think step by step..."):
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ for ev in s.output_item_reasoning_item(summary):
+ yield ev
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _file_search_handler():
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ b = s.add_output_item_file_search_call()
+ yield b.emit_added()
+ yield b.emit_in_progress()
+ yield b.emit_searching()
+ yield b.emit_completed()
+ yield b.emit_done()
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _web_search_handler():
+ """Emit a web_search_call with a valid ``action`` payload.
+
+ The OpenAI SDK requires ``action`` to be a discriminated union member
+ (``ActionSearch``, etc.) so we use the low-level builder and override
+ the item to include a valid search action.
+ """
+
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ b = s.add_output_item_web_search_call()
+ # Override the added item to include a valid action.
+ added = b.emit_added()
+ item = added.get("item", {})
+ item["action"] = {"type": "search", "query": "test query"}
+ yield added
+ yield b.emit_searching()
+ yield b.emit_completed()
+ done = b.emit_done()
+ done_item = done.get("item", {})
+ done_item["action"] = {"type": "search", "query": "test query"}
+ yield done
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _code_interpreter_handler(code: str = "print('hello')"):
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ b = s.add_output_item_code_interpreter_call()
+ yield b.emit_added()
+ for ev in b.code(code):
+ yield ev
+ yield b.emit_completed()
+ yield b.emit_done()
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _image_gen_handler():
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ b = s.add_output_item_image_gen_call()
+ yield b.emit_added()
+ yield b.emit_generating()
+ yield b.emit_completed()
+ yield b.emit_done()
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _mcp_call_handler(
+ server_label: str = "my-server",
+ name: str = "search_docs",
+):
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ b = s.add_output_item_mcp_call(server_label, name)
+ yield b.emit_added()
+ for ev in b.arguments('{"query": "test"}'):
+ yield ev
+ yield b.emit_completed()
+ yield b.emit_done()
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _mcp_list_tools_handler(server_label: str = "my-server"):
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ b = s.add_output_item_mcp_list_tools(server_label)
+ yield b.emit_added()
+ yield b.emit_completed()
+ yield b.emit_done()
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+def _multiple_items_handler():
+ """Emit a message, a function call, and a reasoning item."""
+
+ def handler(request, context, cancellation_signal):
+ async def events():
+ s = ResponseEventStream(response_id=context.response_id, model=request.model)
+ yield s.emit_created()
+ for ev in s.output_item_message("Here is the result."):
+ yield ev
+ for ev in s.output_item_function_call("lookup", "call_multi", '{"id": 42}'):
+ yield ev
+ for ev in s.output_item_reasoning_item("Analyzing the data..."):
+ yield ev
+ yield s.emit_completed()
+
+ return events()
+
+ return handler
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — text message
+# ---------------------------------------------------------------------------
+
+
+class TestEmitTextMessage:
+ """SDK parses a single text message response."""
+
+ def test_basic_text(self):
+ client = _make_sdk_client(_text_message_handler("Hello, world!"))
+ resp = client.responses.create(model="test-model", input="hi")
+
+ assert resp.status == "completed"
+ assert resp.model == "test-model"
+ assert resp.object == "response"
+ assert len(resp.output) == 1
+
+ msg = resp.output[0]
+ assert isinstance(msg, ResponseOutputMessage)
+ assert msg.type == "message"
+ assert msg.role == "assistant"
+ assert len(msg.content) == 1
+
+ text = msg.content[0]
+ assert isinstance(text, ResponseOutputText)
+ assert text.type == "output_text"
+ assert text.text == "Hello, world!"
+
+ def test_empty_text(self):
+ client = _make_sdk_client(_text_message_handler(""))
+ resp = client.responses.create(model="test", input="hi")
+ msg = resp.output[0]
+ assert isinstance(msg, ResponseOutputMessage)
+ assert msg.content[0].text == ""
+
+ def test_long_text(self):
+ long = "x" * 10_000
+ client = _make_sdk_client(_text_message_handler(long))
+ resp = client.responses.create(model="test", input="hi")
+ assert resp.output[0].content[0].text == long
+
+ def test_unicode_text(self):
+ text = "Hello 🌍! Ñoño. 日本語テスト"
+ client = _make_sdk_client(_text_message_handler(text))
+ resp = client.responses.create(model="test", input="hi")
+ assert resp.output[0].content[0].text == text
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — function call
+# ---------------------------------------------------------------------------
+
+
+class TestEmitFunctionCall:
+ """SDK parses a function tool call response."""
+
+ def test_basic_function_call(self):
+ client = _make_sdk_client(_function_call_handler("get_weather", "call_123", '{"city":"NYC"}'))
+ resp = client.responses.create(model="test", input="weather?")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ fc = resp.output[0]
+ assert isinstance(fc, ResponseFunctionToolCall)
+ assert fc.type == "function_call"
+ assert fc.name == "get_weather"
+ assert fc.call_id == "call_123"
+ assert fc.arguments == '{"city":"NYC"}'
+
+ def test_empty_arguments(self):
+ client = _make_sdk_client(_function_call_handler("ping", "call_empty", ""))
+ resp = client.responses.create(model="test", input="ping")
+ fc = resp.output[0]
+ assert isinstance(fc, ResponseFunctionToolCall)
+ assert fc.arguments == ""
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — function call output
+# ---------------------------------------------------------------------------
+
+
+class TestEmitFunctionCallOutput:
+ """SDK parses a function_call_output item."""
+
+ def test_basic_output(self):
+ client = _make_sdk_client(_function_call_output_handler("call_abc", "72°F"))
+ resp = client.responses.create(model="test", input="hi")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert isinstance(item, ResponseFunctionToolCallOutputItem)
+ assert item.type == "function_call_output"
+ assert item.call_id == "call_abc"
+ assert item.output == "72°F"
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — reasoning
+# ---------------------------------------------------------------------------
+
+
+class TestEmitReasoning:
+ """SDK parses a reasoning item response."""
+
+ def test_basic_reasoning(self):
+ client = _make_sdk_client(_reasoning_handler("Step 1: Analyze the problem."))
+ resp = client.responses.create(model="test", input="think")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert isinstance(item, ResponseReasoningItem)
+ assert item.type == "reasoning"
+ assert len(item.summary) == 1
+ assert item.summary[0].text == "Step 1: Analyze the problem."
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — file search call
+# ---------------------------------------------------------------------------
+
+
+class TestEmitFileSearchCall:
+ """SDK parses a file_search_call item."""
+
+ def test_basic_file_search(self):
+ client = _make_sdk_client(_file_search_handler())
+ resp = client.responses.create(model="test", input="search files")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert isinstance(item, ResponseFileSearchToolCall)
+ assert item.type == "file_search_call"
+ assert item.status == "completed"
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — web search call
+# ---------------------------------------------------------------------------
+
+
+class TestEmitWebSearchCall:
+ """SDK parses a web_search_call item.
+
+ The web search builder needs a valid ``action`` dict
+ (e.g. ``{"type": "search", "query": "..."}``).
+ """
+
+ def test_basic_web_search(self):
+ client = _make_sdk_client(_web_search_handler())
+ resp = client.responses.create(model="test", input="search web")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert item.type == "web_search_call"
+ assert item.status == "completed"
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — code interpreter call
+# ---------------------------------------------------------------------------
+
+
+class TestEmitCodeInterpreterCall:
+ """SDK parses a code_interpreter_call item."""
+
+ def test_basic_code_interpreter(self):
+ client = _make_sdk_client(_code_interpreter_handler("print('hi')"))
+ resp = client.responses.create(model="test", input="run code")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert isinstance(item, ResponseCodeInterpreterToolCall)
+ assert item.type == "code_interpreter_call"
+ assert item.code == "print('hi')"
+ assert item.status == "completed"
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — image generation call
+# ---------------------------------------------------------------------------
+
+
+class TestEmitImageGenCall:
+ """SDK parses an image_generation_call item."""
+
+ def test_basic_image_gen(self):
+ client = _make_sdk_client(_image_gen_handler())
+ resp = client.responses.create(model="test", input="draw cat")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert isinstance(item, ImageGenerationCall)
+ assert item.type == "image_generation_call"
+ assert item.status == "completed"
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — MCP call
+# ---------------------------------------------------------------------------
+
+
+class TestEmitMcpCall:
+ """SDK parses an mcp_call item."""
+
+ def test_basic_mcp_call(self):
+ client = _make_sdk_client(_mcp_call_handler("tool-server", "do_stuff"))
+ resp = client.responses.create(model="test", input="mcp")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert isinstance(item, McpCall)
+ assert item.type == "mcp_call"
+ assert item.name == "do_stuff"
+ assert item.server_label == "tool-server"
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — MCP list tools
+# ---------------------------------------------------------------------------
+
+
+class TestEmitMcpListTools:
+ """SDK parses an mcp_list_tools item."""
+
+ def test_basic_list_tools(self):
+ client = _make_sdk_client(_mcp_list_tools_handler("tool-server"))
+ resp = client.responses.create(model="test", input="list")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 1
+
+ item = resp.output[0]
+ assert isinstance(item, McpListTools)
+ assert item.type == "mcp_list_tools"
+ assert item.server_label == "tool-server"
+
+
+# ---------------------------------------------------------------------------
+# Non-streaming round-trip tests — multiple items
+# ---------------------------------------------------------------------------
+
+
+class TestEmitMultipleItems:
+ """SDK parses a response with multiple output item types."""
+
+ def test_message_function_call_reasoning(self):
+ client = _make_sdk_client(_multiple_items_handler())
+ resp = client.responses.create(model="test", input="multi")
+
+ assert resp.status == "completed"
+ assert len(resp.output) == 3
+
+ # 1. text message
+ msg = resp.output[0]
+ assert isinstance(msg, ResponseOutputMessage)
+ assert msg.content[0].text == "Here is the result."
+
+ # 2. function call
+ fc = resp.output[1]
+ assert isinstance(fc, ResponseFunctionToolCall)
+ assert fc.name == "lookup"
+ assert fc.call_id == "call_multi"
+
+ # 3. reasoning
+ reason = resp.output[2]
+ assert isinstance(reason, ResponseReasoningItem)
+ assert len(reason.summary) == 1
+
+
+# ---------------------------------------------------------------------------
+# Response properties tests
+# ---------------------------------------------------------------------------
+
+
+class TestResponseProperties:
+ """Verify standard Response properties are populated."""
+
+ def test_response_id_present(self):
+ client = _make_sdk_client(_text_message_handler())
+ resp = client.responses.create(model="test", input="hi")
+ assert resp.id is not None
+ assert len(resp.id) > 0
+
+ def test_model_preserved(self):
+ client = _make_sdk_client(_text_message_handler())
+ resp = client.responses.create(model="my-custom-model", input="hi")
+ assert resp.model == "my-custom-model"
+
+ def test_object_type(self):
+ client = _make_sdk_client(_text_message_handler())
+ resp = client.responses.create(model="test", input="hi")
+ assert resp.object == "response"
+
+ def test_created_at_present(self):
+ client = _make_sdk_client(_text_message_handler())
+ resp = client.responses.create(model="test", input="hi")
+ assert resp.created_at is not None
+
+ def test_completed_status(self):
+ client = _make_sdk_client(_text_message_handler())
+ resp = client.responses.create(model="test", input="hi")
+ assert resp.status == "completed"
+
+
+# ---------------------------------------------------------------------------
+# Input round-trip tests
+# ---------------------------------------------------------------------------
+
+
+class TestInputRoundTrip:
+ """Verify that SDK-built requests are correctly parsed by the server."""
+
+ def test_string_input(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(model="test", input="Hello, server!")
+
+ req = _captured["request"]
+ assert req is not None
+
+ def test_message_input(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(
+ model="test",
+ input=[{"role": "user", "content": "What is 2+2?"}],
+ )
+ req = _captured["request"]
+ assert req is not None
+
+ def test_multi_turn_input(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(
+ model="test",
+ input=[
+ {"role": "user", "content": "Hi"},
+ {"role": "assistant", "content": "Hello!"},
+ {"role": "user", "content": "How are you?"},
+ ],
+ )
+ req = _captured["request"]
+ assert req is not None
+
+ def test_model_in_request(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(model="gpt-4o", input="hi")
+ assert _captured["request"].model == "gpt-4o"
+
+ def test_instructions_in_request(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(model="test", input="hi", instructions="Be helpful")
+ assert _captured["request"].instructions == "Be helpful"
+
+ def test_temperature_in_request(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(model="test", input="hi", temperature=0.7)
+ assert _captured["request"].temperature == pytest.approx(0.7)
+
+ def test_tools_in_request(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(
+ model="test",
+ input="hi",
+ tools=[
+ {
+ "type": "function",
+ "name": "get_weather",
+ "parameters": {
+ "type": "object",
+ "properties": {"city": {"type": "string"}},
+ },
+ }
+ ],
+ )
+ req = _captured["request"]
+ assert req.tools is not None
+ assert len(req.tools) >= 1
+
+ def test_max_output_tokens_in_request(self):
+ handler = _text_message_handler()
+ client = _make_sdk_client(_capturing(handler))
+ client.responses.create(
+ model="test",
+ input="hi",
+ max_output_tokens=1024,
+ )
+ assert _captured["request"].max_output_tokens == 1024
+
+
+# ---------------------------------------------------------------------------
+# Streaming round-trip tests
+# ---------------------------------------------------------------------------
+
+
+class TestStreamingRoundTrip:
+ """Verify SDK streaming integration.
+
+ Streaming through OpenAI SDK + TestClient exercises the full SSE
+ pipeline: server emits events → TestClient → httpx → OpenAI SDK parser.
+ """
+
+ def test_stream_text_message(self):
+ """Stream yields events and the final response has the expected text."""
+ handler = _text_message_handler("Streamed text")
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ tc = TestClient(app)
+ client = openai.OpenAI(
+ api_key="test-key",
+ base_url="http://testserver",
+ http_client=tc,
+ )
+
+ events_seen: list[str] = []
+ with client.responses.create(
+ model="test",
+ input="hi",
+ stream=True,
+ ) as stream:
+ for event in stream:
+ events_seen.append(event.type)
+
+ # Expect lifecycle events including created, output_item, and completed.
+ assert "response.created" in events_seen
+ assert "response.completed" in events_seen
+ assert any("output_text" in t for t in events_seen)
+
+ def test_stream_function_call(self):
+ """Stream a function call and verify argument events."""
+ handler = _function_call_handler("my_func", "call_s1", '{"x": 1}')
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ tc = TestClient(app)
+ client = openai.OpenAI(
+ api_key="test-key",
+ base_url="http://testserver",
+ http_client=tc,
+ )
+
+ events_seen: list[str] = []
+ with client.responses.create(
+ model="test",
+ input="do it",
+ stream=True,
+ ) as stream:
+ for event in stream:
+ events_seen.append(event.type)
+
+ assert "response.created" in events_seen
+ assert "response.completed" in events_seen
+ assert any("function_call_arguments" in t for t in events_seen)
+
+ def test_stream_multiple_items(self):
+ """Stream a mix of output items."""
+ handler = _multiple_items_handler()
+ app = ResponsesAgentServerHost()
+ app.create_handler(handler)
+ tc = TestClient(app)
+ client = openai.OpenAI(
+ api_key="test-key",
+ base_url="http://testserver",
+ http_client=tc,
+ )
+
+ events_seen: list[str] = []
+ with client.responses.create(
+ model="test",
+ input="all",
+ stream=True,
+ ) as stream:
+ for event in stream:
+ events_seen.append(event.type)
+
+ # Should see events for message, function call, and reasoning.
+ assert "response.output_item.added" in events_seen
+ assert "response.completed" in events_seen
+ # Multiple output_item.added events (one per item)
+ added_count = events_seen.count("response.output_item.added")
+ assert added_count == 3, f"expected 3 output_item.added, got {added_count}"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/__init__.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/__init__.py
new file mode 100644
index 000000000000..9a0454564dbb
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_builders.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_builders.py
new file mode 100644
index 000000000000..b3b9119be173
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_builders.py
@@ -0,0 +1,297 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Phase E Part D stream builder parity tests."""
+
+from __future__ import annotations
+
+from azure.ai.agentserver.responses import (
+ OutputItemFunctionCallBuilder,
+ OutputItemFunctionCallOutputBuilder,
+ OutputItemMessageBuilder,
+ ResponseEventStream,
+ TextContentBuilder,
+)
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+
+
+def test_text_content_builder__emits_added_delta_done_events() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_1")
+ stream.emit_created()
+ message = stream.add_output_item_message()
+ message.emit_added()
+ text = message.add_text_content()
+
+ added = text.emit_added()
+ delta = text.emit_delta("hello")
+ done = text.emit_done()
+
+ assert isinstance(text, TextContentBuilder)
+ assert added["type"] == "response.content_part.added"
+ assert delta["type"] == "response.output_text.delta"
+ assert done["type"] == "response.output_text.done"
+ assert done["text"] == "hello"
+
+
+def test_text_content_builder__emit_done_merges_all_delta_fragments() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_1b")
+ stream.emit_created()
+ message = stream.add_output_item_message()
+ message.emit_added()
+ text = message.add_text_content()
+
+ text.emit_added()
+ text.emit_delta("hello")
+ text.emit_delta(" ")
+ text.emit_delta("world")
+ done = text.emit_done()
+
+ assert done["type"] == "response.output_text.done"
+ assert done["text"] == "hello world"
+ assert text.final_text == "hello world"
+
+
+def test_output_item_message_builder__emits_added_content_done_and_done() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_2")
+ stream.emit_created()
+ message = stream.add_output_item_message()
+ text = message.add_text_content()
+
+ added = message.emit_added()
+ text.emit_added()
+ text.emit_delta("alpha")
+ text.emit_done()
+ content_done = message.emit_content_done(text)
+ done = message.emit_done()
+
+ assert isinstance(message, OutputItemMessageBuilder)
+ assert added["type"] == "response.output_item.added"
+ assert content_done["type"] == "response.content_part.done"
+ assert done["type"] == "response.output_item.done"
+ assert done["item"]["type"] == "message"
+ assert done["item"]["content"][0]["text"] == "alpha"
+
+
+def test_output_item_function_call_builder__emits_arguments_and_done_events() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_3")
+ stream.emit_created()
+ function_call = stream.add_output_item_function_call("get_weather", "call_1")
+
+ added = function_call.emit_added()
+ delta = function_call.emit_arguments_delta('{"loc')
+ args_done = function_call.emit_arguments_done('{"location": "Seattle"}')
+ done = function_call.emit_done()
+
+ assert isinstance(function_call, OutputItemFunctionCallBuilder)
+ assert added["type"] == "response.output_item.added"
+ assert delta["type"] == "response.function_call_arguments.delta"
+ assert args_done["type"] == "response.function_call_arguments.done"
+ assert done["type"] == "response.output_item.done"
+ assert done["item"]["name"] == "get_weather"
+ assert done["item"]["call_id"] == "call_1"
+ assert done["item"]["arguments"] == '{"location": "Seattle"}'
+
+
+def test_output_item_function_call_output_builder__emits_added_and_done_events() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_3b")
+ stream.emit_created()
+ function_output = stream.add_output_item_function_call_output("call_1")
+
+ added = function_output.emit_added("partial")
+ done = function_output.emit_done("result")
+
+ assert isinstance(function_output, OutputItemFunctionCallOutputBuilder)
+ assert added["type"] == "response.output_item.added"
+ assert added["item"]["type"] == "function_call_output"
+ assert added["item"]["call_id"] == "call_1"
+ assert done["type"] == "response.output_item.done"
+ assert done["item"]["output"] == "result"
+
+
+def test_output_item_events__item_has_response_id_and_agent_reference() -> None:
+ """B20/B21 — output items carry response_id and agent_reference stamped by with_output_item_defaults."""
+ stream = ResponseEventStream(
+ response_id="resp_builder_3c",
+ agent_reference={"type": "agent_reference", "name": "agent-a"},
+ )
+ stream.emit_created()
+ function_call = stream.add_output_item_function_call("get_weather", "call_2")
+
+ added = function_call.emit_added()
+ done = function_call.emit_done()
+
+ assert added["item"]["response_id"] == "resp_builder_3c"
+ assert added["item"]["agent_reference"]["name"] == "agent-a"
+ assert done["item"]["response_id"] == "resp_builder_3c"
+ assert done["item"]["agent_reference"]["name"] == "agent-a"
+
+
+def test_stream_builders__share_global_sequence_number() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_4")
+ stream.emit_created()
+ stream.emit_in_progress()
+ message = stream.add_output_item_message()
+ event = message.emit_added()
+
+ assert event["sequence_number"] == 2
+
+
+def test_message_builder__output_index_increments_across_factories() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_5")
+ stream.emit_created()
+ message = stream.add_output_item_message()
+ function_call = stream.add_output_item_function_call("fn", "call_1")
+ function_output = stream.add_output_item_function_call_output("call_2")
+
+ assert message.output_index == 0
+ assert function_call.output_index == 1
+ assert function_output.output_index == 2
+
+
+def test_message_builder__emit_done_requires_completed_content() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_6")
+ stream.emit_created()
+ message = stream.add_output_item_message()
+ message.emit_added()
+
+ import pytest
+
+ with pytest.raises(ValueError):
+ message.emit_done()
+
+
+def test_builder_events__include_required_payload_fields_per_event_type() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_7")
+ stream.emit_created()
+
+ code_interpreter = stream.add_output_item_code_interpreter_call()
+ code_delta = code_interpreter.emit_code_delta("print('hi')")
+ code_done = code_interpreter.emit_code_done("print('hi')")
+
+ image_gen = stream.add_output_item_image_gen_call()
+ partial_image = image_gen.emit_partial_image("ZmFrZS1pbWFnZQ==")
+
+ custom_tool = stream.add_output_item_custom_tool_call("call_7", "custom")
+ input_done = custom_tool.emit_input_done('{"ok": true}')
+
+ function_call = stream.add_output_item_function_call("tool_fn", "call_fn_7")
+ args_done = function_call.emit_arguments_done('{"city": "Seattle"}')
+
+ mcp_call = stream.add_output_item_mcp_call("srv", "tool")
+ mcp_args_done = mcp_call.emit_arguments_done('{"arg": 1}')
+
+ message = stream.add_output_item_message()
+ message.emit_added()
+ refusal = message.add_refusal_content()
+ refusal.emit_added()
+ refusal.emit_done("cannot comply")
+ refusal_part_done = message.emit_content_done(refusal)
+
+ reasoning = stream.add_output_item_reasoning_item()
+ reasoning.emit_added()
+ summary = reasoning.add_summary_part()
+ summary_added = summary.emit_added()
+ summary.emit_text_done("short reason")
+ summary_done = summary.emit_done()
+ reasoning.emit_summary_part_done(summary)
+ reasoning_item_done = reasoning.emit_done()
+
+ assert code_delta["type"] == "response.code_interpreter_call_code.delta"
+ assert code_delta["item_id"] == code_interpreter.item_id
+ assert code_delta["delta"] == "print('hi')"
+
+ assert code_done["type"] == "response.code_interpreter_call_code.done"
+ assert code_done["item_id"] == code_interpreter.item_id
+ assert code_done["code"] == "print('hi')"
+
+ assert partial_image["type"] == "response.image_generation_call.partial_image"
+ assert partial_image["item_id"] == image_gen.item_id
+ assert partial_image["partial_image_index"] == 0
+ assert partial_image["partial_image_b64"] == "ZmFrZS1pbWFnZQ=="
+
+ assert input_done["type"] == "response.custom_tool_call_input.done"
+ assert input_done["item_id"] == custom_tool.item_id
+ assert input_done["input"] == '{"ok": true}'
+
+ assert args_done["type"] == "response.function_call_arguments.done"
+ assert args_done["item_id"] == function_call.item_id
+ assert args_done["name"] == "tool_fn"
+ assert args_done["arguments"] == '{"city": "Seattle"}'
+
+ assert mcp_args_done["type"] == "response.mcp_call_arguments.done"
+ assert mcp_args_done["item_id"] == mcp_call.item_id
+ assert mcp_args_done["arguments"] == '{"arg": 1}'
+
+ assert refusal_part_done["type"] == "response.content_part.done"
+ assert refusal_part_done["part"]["type"] == "refusal"
+ assert refusal_part_done["part"]["refusal"] == "cannot comply"
+
+ assert summary_added["type"] == "response.reasoning_summary_part.added"
+ assert summary_added["part"]["type"] == "summary_text"
+ assert summary_added["part"]["text"] == ""
+
+ assert summary_done["type"] == "response.reasoning_summary_part.done"
+ assert summary_done["part"]["type"] == "summary_text"
+ assert summary_done["part"]["text"] == "short reason"
+
+ assert reasoning_item_done["type"] == "response.output_item.done"
+ assert reasoning_item_done["item"]["summary"][0]["type"] == "summary_text"
+ assert reasoning_item_done["item"]["summary"][0]["text"] == "short reason"
+
+
+def test_stream_item_id_generation__uses_expected_shape_and_response_partition_key() -> None:
+ response_id = IdGenerator.new_response_id()
+ stream = ResponseEventStream(response_id=response_id)
+
+ generated_item_ids = [
+ stream.add_output_item_message().item_id,
+ stream.add_output_item_function_call("fn", "call_a").item_id,
+ stream.add_output_item_function_call_output("call_b").item_id,
+ stream.add_output_item_reasoning_item().item_id,
+ stream.add_output_item_file_search_call().item_id,
+ stream.add_output_item_web_search_call().item_id,
+ stream.add_output_item_code_interpreter_call().item_id,
+ stream.add_output_item_image_gen_call().item_id,
+ stream.add_output_item_mcp_call("srv", "tool").item_id,
+ stream.add_output_item_mcp_list_tools("srv").item_id,
+ stream.add_output_item_custom_tool_call("call_c", "custom").item_id,
+ ]
+
+ response_partition_key = IdGenerator.extract_partition_key(response_id)
+ for item_id in generated_item_ids:
+ assert IdGenerator.extract_partition_key(item_id) == response_partition_key
+ body = item_id.split("_", maxsplit=1)[1]
+ assert len(body) == 50
+
+
+def test_response_event_stream__exposes_mutable_response_snapshot_for_lifecycle_events() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_snapshot", model="gpt-4o-mini")
+ stream.response.temperature = 1
+ stream.response.metadata = {"source": "unit-test"}
+
+ created = stream.emit_created()
+
+ assert created["type"] == "response.created"
+ assert created["response"]["id"] == "resp_builder_snapshot"
+ assert created["response"]["model"] == "gpt-4o-mini"
+ assert created["response"]["temperature"] == 1
+ assert created["response"]["metadata"] == {"source": "unit-test"}
+
+
+def test_response_event_stream__tracks_completed_output_items_into_response_output() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_output")
+ stream.emit_created()
+
+ message = stream.add_output_item_message()
+ message.emit_added()
+ text = message.add_text_content()
+ text.emit_added()
+ text.emit_delta("hello")
+ text.emit_done()
+ message.emit_content_done(text)
+ done = message.emit_done()
+
+ assert done["type"] == "response.output_item.done"
+ output_item = stream.response.output[0].as_dict()
+ assert output_item["id"] == message.item_id
+ assert output_item["type"] == "message"
+ assert output_item["content"][0]["text"] == "hello"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_event_stream_generators.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_event_stream_generators.py
new file mode 100644
index 000000000000..d8a27f3a3c79
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_event_stream_generators.py
@@ -0,0 +1,274 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for ResponseEventStream generator convenience methods."""
+
+from __future__ import annotations
+
+import pytest
+
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+
+RESPONSE_ID = "resp_gen_test_12345"
+
+
+def _make_stream(**kwargs) -> ResponseEventStream:
+ return ResponseEventStream(response_id=RESPONSE_ID, **kwargs)
+
+
+def _started_stream(**kwargs) -> ResponseEventStream:
+ """Return a stream that has already emitted created + in_progress events."""
+ stream = _make_stream(**kwargs)
+ stream.emit_created()
+ stream.emit_in_progress()
+ return stream
+
+
+# ---- output_item_message() ----
+
+
+def test_output_item_message_yields_full_lifecycle() -> None:
+ stream = _started_stream()
+ events = list(stream.output_item_message("Hello world"))
+
+ assert len(events) == 6
+ types = [e["type"] for e in events]
+ assert types == [
+ "response.output_item.added",
+ "response.content_part.added",
+ "response.output_text.delta",
+ "response.output_text.done",
+ "response.content_part.done",
+ "response.output_item.done",
+ ]
+
+ # Verify text content in delta event
+ delta_event = events[2]
+ assert delta_event["delta"] == "Hello world"
+
+ # Verify text content in done event
+ done_event = events[3]
+ assert done_event["text"] == "Hello world"
+
+
+# ---- output_item_function_call() ----
+
+
+def test_output_item_function_call_yields_full_lifecycle() -> None:
+ stream = _started_stream()
+ events = list(stream.output_item_function_call("get_weather", "call_abc", '{"city":"Seattle"}'))
+
+ assert len(events) == 4
+ types = [e["type"] for e in events]
+ assert types == [
+ "response.output_item.added",
+ "response.function_call_arguments.delta",
+ "response.function_call_arguments.done",
+ "response.output_item.done",
+ ]
+
+ # Verify function name in added event
+ added_item = events[0]["item"]
+ assert added_item["name"] == "get_weather"
+ assert added_item["call_id"] == "call_abc"
+
+ # Verify arguments in delta
+ assert events[1]["delta"] == '{"city":"Seattle"}'
+
+ # Verify arguments in done
+ assert events[2]["arguments"] == '{"city":"Seattle"}'
+
+
+# ---- output_item_function_call_output() ----
+
+
+def test_output_item_function_call_output_yields_added_and_done() -> None:
+ stream = _started_stream()
+ events = list(stream.output_item_function_call_output("call_abc", "Sunny, 72F"))
+
+ assert len(events) == 2
+ types = [e["type"] for e in events]
+ assert types == [
+ "response.output_item.added",
+ "response.output_item.done",
+ ]
+
+ # Verify output content
+ added_item = events[0]["item"]
+ assert added_item["call_id"] == "call_abc"
+ assert added_item["output"] == "Sunny, 72F"
+
+
+# ---- output_item_reasoning_item() ----
+
+
+def test_output_item_reasoning_item_yields_full_lifecycle() -> None:
+ stream = _started_stream()
+ events = list(stream.output_item_reasoning_item("The user asked about weather"))
+
+ assert len(events) == 6
+ types = [e["type"] for e in events]
+ assert types == [
+ "response.output_item.added",
+ "response.reasoning_summary_part.added",
+ "response.reasoning_summary_text.delta",
+ "response.reasoning_summary_text.done",
+ "response.reasoning_summary_part.done",
+ "response.output_item.done",
+ ]
+
+ # Verify summary text in delta
+ assert events[2]["delta"] == "The user asked about weather"
+
+ # Verify summary text in done
+ assert events[3]["text"] == "The user asked about weather"
+
+
+# ---- Sequence number continuity across generators ----
+
+
+def test_sequence_numbers_are_continuous_across_generators() -> None:
+ """Verify that sequence numbers increase monotonically when chaining generators."""
+ stream = _make_stream()
+ all_events: list[dict] = []
+ all_events.append(stream.emit_created())
+ all_events.append(stream.emit_in_progress())
+ all_events.extend(stream.output_item_message("hi"))
+ all_events.append(stream.emit_completed())
+
+ seq_numbers = [e["sequence_number"] for e in all_events]
+ assert seq_numbers == list(range(len(all_events)))
+
+
+# ---- Async generator variants ----
+
+
+async def _collect(async_iter):
+ """Collect all items from an async iterator."""
+ result = []
+ async for item in async_iter:
+ result.append(item)
+ return result
+
+
+@pytest.mark.asyncio
+async def test_aoutput_item_message_yields_same_as_sync() -> None:
+ stream = _started_stream()
+ sync_events = list(stream.output_item_message("hello async"))
+
+ stream2 = _started_stream()
+ async_events = await _collect(stream2.aoutput_item_message("hello async"))
+
+ assert len(async_events) == len(sync_events)
+ for s, a in zip(sync_events, async_events):
+ assert s["type"] == a["type"]
+
+
+@pytest.mark.asyncio
+async def test_aoutput_item_message_streams_deltas() -> None:
+ """Verify that AsyncIterable[str] input produces one delta per chunk."""
+
+ async def chunks():
+ yield "Hello"
+ yield " world"
+ yield "!"
+
+ stream = _started_stream()
+ events = await _collect(stream.aoutput_item_message(chunks()))
+
+ # added, content_added, delta("Hello"), delta(" world"), delta("!"),
+ # text_done("Hello world!"), content_done, item_done
+ assert len(events) == 8
+ types = [e["type"] for e in events]
+ assert types == [
+ "response.output_item.added",
+ "response.content_part.added",
+ "response.output_text.delta",
+ "response.output_text.delta",
+ "response.output_text.delta",
+ "response.output_text.done",
+ "response.content_part.done",
+ "response.output_item.done",
+ ]
+
+ # Verify individual deltas
+ assert events[2]["delta"] == "Hello"
+ assert events[3]["delta"] == " world"
+ assert events[4]["delta"] == "!"
+
+ # Verify accumulated done text
+ assert events[5]["text"] == "Hello world!"
+
+
+@pytest.mark.asyncio
+async def test_aoutput_item_function_call_yields_same_as_sync() -> None:
+ stream = _started_stream()
+ sync_events = list(stream.output_item_function_call("fn", "call_1", '{"x":1}'))
+
+ stream2 = _started_stream()
+ async_events = await _collect(stream2.aoutput_item_function_call("fn", "call_1", '{"x":1}'))
+
+ assert len(async_events) == len(sync_events)
+ for s, a in zip(sync_events, async_events):
+ assert s["type"] == a["type"]
+
+
+@pytest.mark.asyncio
+async def test_aoutput_item_function_call_streams_arguments() -> None:
+ """Verify streaming arguments via AsyncIterable[str]."""
+
+ async def arg_chunks():
+ yield '{"city":'
+ yield '"Seattle"}'
+
+ stream = _started_stream()
+ events = await _collect(stream.aoutput_item_function_call("get_weather", "call_1", arg_chunks()))
+
+ # added, delta, delta, args_done, item_done
+ assert len(events) == 5
+ assert events[1]["delta"] == '{"city":'
+ assert events[2]["delta"] == '"Seattle"}'
+ assert events[3]["arguments"] == '{"city":"Seattle"}'
+
+
+@pytest.mark.asyncio
+async def test_aoutput_item_function_call_output_yields_same_as_sync() -> None:
+ stream = _started_stream()
+ sync_events = list(stream.output_item_function_call_output("call_1", "result"))
+
+ stream2 = _started_stream()
+ async_events = await _collect(stream2.aoutput_item_function_call_output("call_1", "result"))
+
+ assert len(async_events) == len(sync_events)
+ for s, a in zip(sync_events, async_events):
+ assert s["type"] == a["type"]
+
+
+@pytest.mark.asyncio
+async def test_aoutput_item_reasoning_item_yields_same_as_sync() -> None:
+ stream = _started_stream()
+ sync_events = list(stream.output_item_reasoning_item("thinking..."))
+
+ stream2 = _started_stream()
+ async_events = await _collect(stream2.aoutput_item_reasoning_item("thinking..."))
+
+ assert len(async_events) == len(sync_events)
+ for s, a in zip(sync_events, async_events):
+ assert s["type"] == a["type"]
+
+
+@pytest.mark.asyncio
+async def test_aoutput_item_reasoning_item_streams_deltas() -> None:
+ """Verify streaming reasoning summary via AsyncIterable[str]."""
+
+ async def summary_chunks():
+ yield "Let me "
+ yield "think..."
+
+ stream = _started_stream()
+ events = await _collect(stream.aoutput_item_reasoning_item(summary_chunks()))
+
+ # added, part_added, delta, delta, text_done, part_done, item_done
+ assert len(events) == 7
+ assert events[2]["delta"] == "Let me "
+ assert events[3]["delta"] == "think..."
+ assert events[4]["text"] == "Let me think..."
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_foundry_storage_provider.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_foundry_storage_provider.py
new file mode 100644
index 000000000000..7c2524ade2e6
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_foundry_storage_provider.py
@@ -0,0 +1,682 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for FoundryStorageProvider — validates HTTP request construction and
+response deserialization by mocking AsyncPipelineClient responses."""
+
+from __future__ import annotations
+
+import json
+from typing import Any
+from unittest.mock import AsyncMock, MagicMock
+
+import pytest
+
+from azure.ai.agentserver.responses._response_context import IsolationContext
+from azure.ai.agentserver.responses.store._foundry_errors import (
+ FoundryApiError,
+ FoundryBadRequestError,
+ FoundryResourceNotFoundError,
+)
+from azure.ai.agentserver.responses.store._foundry_provider import (
+ _CHAT_ISOLATION_HEADER,
+ _USER_ISOLATION_HEADER,
+ FoundryStorageProvider,
+)
+from azure.ai.agentserver.responses.store._foundry_settings import FoundryStorageSettings
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+_BASE_URL = "https://foundry.example.com/storage/"
+_SETTINGS = FoundryStorageSettings(storage_base_url=_BASE_URL)
+
+_RESPONSE_DICT: dict[str, Any] = {
+ "id": "resp_abc123",
+ "object": "response",
+ "status": "completed",
+ "output": [],
+ "model": "gpt-4o",
+ "created_at": 1710000000,
+}
+
+_INPUT_ITEM_DICT: dict[str, Any] = {
+ "id": "item_001",
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": "hello"}],
+}
+
+_OUTPUT_ITEM_DICT: dict[str, Any] = {
+ "id": "item_out_001",
+ "type": "message",
+ "role": "assistant",
+ "status": "completed",
+ "content": [{"type": "output_text", "text": "hi", "annotations": []}],
+}
+
+
+def _make_credential(token: str = "tok_test") -> Any:
+ """Return a mock async credential that always yields *token*."""
+ token_obj = MagicMock()
+ token_obj.token = token
+ cred = AsyncMock()
+ cred.get_token = AsyncMock(return_value=token_obj)
+ return cred
+
+
+def _make_response(status_code: int, body: Any) -> MagicMock:
+ """Build a mock azure.core.rest.HttpResponse with the given *status_code* and JSON *body*."""
+ content = json.dumps(body).encode("utf-8")
+ resp = MagicMock()
+ resp.status_code = status_code
+ resp.text = MagicMock(return_value=content.decode("utf-8"))
+ return resp
+
+
+def _make_provider(credential: Any, settings: FoundryStorageSettings, response: MagicMock) -> FoundryStorageProvider:
+ """Create a FoundryStorageProvider with a mocked pipeline client."""
+ provider = FoundryStorageProvider.__new__(FoundryStorageProvider)
+ provider._settings = settings
+ mock_client = AsyncMock()
+ mock_client.send_request = AsyncMock(return_value=response)
+ mock_client.close = AsyncMock()
+ provider._client = mock_client
+ return provider
+
+
+# ---------------------------------------------------------------------------
+# Fixtures
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture()
+def credential() -> Any:
+ return _make_credential()
+
+
+@pytest.fixture()
+def settings() -> FoundryStorageSettings:
+ return _SETTINGS
+
+
+# ===========================================================================
+# create_response
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_create_response__posts_to_responses_endpoint(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ response = ResponseObject(_RESPONSE_DICT)
+ await provider.create_response(response, None, None)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.method == "POST"
+ assert request.url.startswith(f"{_BASE_URL}responses")
+ assert "api-version=v1" in request.url
+
+
+@pytest.mark.asyncio
+async def test_create_response__sends_correct_envelope(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ response = ResponseObject(_RESPONSE_DICT)
+ await provider.create_response(response, [MagicMock(as_dict=lambda: _INPUT_ITEM_DICT)], ["prev_item_1"])
+
+ request = provider._client.send_request.call_args[0][0]
+ payload = json.loads(request.content.decode("utf-8"))
+ assert payload["response"]["id"] == "resp_abc123"
+ assert len(payload["input_items"]) == 1
+ assert payload["history_item_ids"] == ["prev_item_1"]
+
+
+@pytest.mark.asyncio
+async def test_create_response__raises_foundry_api_error_on_500(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(500, {"error": {"message": "server fault"}}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ with pytest.raises(FoundryApiError) as exc_info:
+ await provider.create_response(ResponseObject(_RESPONSE_DICT), None, None)
+
+ assert exc_info.value.status_code == 500
+ assert "server fault" in exc_info.value.message
+
+
+# ===========================================================================
+# get_response
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_get_response__gets_correct_url(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, _RESPONSE_DICT))
+
+ await provider.get_response("resp_abc123")
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.method == "GET"
+ assert "responses/resp_abc123" in request.url
+ assert "api-version=v1" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_response__returns_deserialized_response(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, _RESPONSE_DICT))
+
+ result = await provider.get_response("resp_abc123")
+
+ assert result["id"] == "resp_abc123"
+ assert result["status"] == "completed"
+
+
+@pytest.mark.asyncio
+async def test_get_response__raises_not_found_on_404(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(404, {"error": {"message": "not found"}}))
+
+ with pytest.raises(FoundryResourceNotFoundError) as exc_info:
+ await provider.get_response("missing_id")
+
+ assert "not found" in exc_info.value.message
+
+
+@pytest.mark.asyncio
+async def test_get_response__url_encodes_special_characters(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, _RESPONSE_DICT))
+
+ await provider.get_response("id with spaces/slash")
+
+ request = provider._client.send_request.call_args[0][0]
+ assert " " not in request.url
+ assert "id%20with%20spaces%2Fslash" in request.url
+
+
+# ===========================================================================
+# update_response
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_update_response__posts_to_response_id_url(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ response = ResponseObject(_RESPONSE_DICT)
+ await provider.update_response(response)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.method == "POST"
+ assert "responses/resp_abc123" in request.url
+
+
+@pytest.mark.asyncio
+async def test_update_response__sends_serialized_response_body(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ response = ResponseObject(_RESPONSE_DICT)
+ await provider.update_response(response)
+
+ request = provider._client.send_request.call_args[0][0]
+ payload = json.loads(request.content.decode("utf-8"))
+ assert payload["id"] == "resp_abc123"
+
+
+@pytest.mark.asyncio
+async def test_update_response__raises_bad_request_on_409(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(409, {"error": {"message": "conflict"}}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ with pytest.raises(FoundryBadRequestError) as exc_info:
+ await provider.update_response(ResponseObject(_RESPONSE_DICT))
+
+ assert "conflict" in exc_info.value.message
+
+
+# ===========================================================================
+# delete_response
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_delete_response__sends_delete_to_response_url(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+
+ await provider.delete_response("resp_abc123")
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.method == "DELETE"
+ assert "responses/resp_abc123" in request.url
+ assert "api-version=v1" in request.url
+
+
+@pytest.mark.asyncio
+async def test_delete_response__raises_not_found_on_404(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(404, {}))
+
+ with pytest.raises(FoundryResourceNotFoundError):
+ await provider.delete_response("ghost_id")
+
+
+# ===========================================================================
+# get_input_items
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__default_params_in_url(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(
+ credential, settings, _make_response(200, {"data": [_OUTPUT_ITEM_DICT], "object": "list"})
+ )
+
+ await provider.get_input_items("resp_abc123")
+
+ request = provider._client.send_request.call_args[0][0]
+ assert "responses/resp_abc123/input_items" in request.url
+ assert "limit=20" in request.url
+ assert "order=desc" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__ascending_sets_order_asc(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {"data": []}))
+
+ await provider.get_input_items("resp_abc123", ascending=True, limit=5)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert "order=asc" in request.url
+ assert "limit=5" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__cursor_params_appended(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {"data": []}))
+
+ await provider.get_input_items("resp_abc123", after="item_cursor_1", before="item_cursor_2")
+
+ request = provider._client.send_request.call_args[0][0]
+ assert "after=item_cursor_1" in request.url
+ assert "before=item_cursor_2" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__returns_deserialized_items(credential: Any, settings: FoundryStorageSettings) -> None:
+ paged_body = {"data": [_OUTPUT_ITEM_DICT], "object": "list"}
+ provider = _make_provider(credential, settings, _make_response(200, paged_body))
+
+ items = await provider.get_input_items("resp_abc123")
+
+ assert len(items) == 1
+ assert items[0]["id"] == "item_out_001"
+ assert items[0]["type"] == "message"
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__empty_data_returns_empty_list(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {"data": [], "object": "list"}))
+
+ items = await provider.get_input_items("resp_abc123")
+
+ assert items == []
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__cursor_params_omitted_when_none(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {"data": []}))
+
+ await provider.get_input_items("resp_abc123", after=None, before=None)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert "after=" not in request.url
+ assert "before=" not in request.url
+
+
+# ===========================================================================
+# get_items
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_get_items__posts_to_batch_retrieve_endpoint(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, [_OUTPUT_ITEM_DICT, None]))
+
+ await provider.get_items(["item_out_001", "missing_id"])
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.method == "POST"
+ assert "items/batch/retrieve" in request.url
+ assert "api-version=v1" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_items__sends_item_ids_in_body(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, [_OUTPUT_ITEM_DICT]))
+
+ await provider.get_items(["item_out_001"])
+
+ request = provider._client.send_request.call_args[0][0]
+ payload = json.loads(request.content.decode("utf-8"))
+ assert payload["item_ids"] == ["item_out_001"]
+
+
+@pytest.mark.asyncio
+async def test_get_items__returns_none_for_missing_items(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, [_OUTPUT_ITEM_DICT, None]))
+
+ items = await provider.get_items(["item_out_001", "missing_id"])
+
+ assert len(items) == 2
+ assert items[0]["id"] == "item_out_001"
+ assert items[1] is None
+
+
+@pytest.mark.asyncio
+async def test_get_items__preserves_input_order(credential: Any, settings: FoundryStorageSettings) -> None:
+ item_a = {**_OUTPUT_ITEM_DICT, "id": "item_a"}
+ item_b = {**_OUTPUT_ITEM_DICT, "id": "item_b"}
+ provider = _make_provider(credential, settings, _make_response(200, [item_b, item_a]))
+
+ items = await provider.get_items(["id_b", "id_a"])
+
+ assert items[0]["id"] == "item_b"
+ assert items[1]["id"] == "item_a"
+
+
+# ===========================================================================
+# get_history_item_ids
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_get_history_item_ids__gets_to_history_endpoint(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, ["item_h1", "item_h2"]))
+
+ await provider.get_history_item_ids(None, None, limit=10)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.method == "GET"
+ assert "history/item_ids" in request.url
+ assert "api-version=v1" in request.url
+ assert "limit=10" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_history_item_ids__returns_list_of_strings(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, ["item_h1", "item_h2"]))
+
+ ids = await provider.get_history_item_ids(None, None, limit=10)
+
+ assert ids == ["item_h1", "item_h2"]
+
+
+@pytest.mark.asyncio
+async def test_get_history_item_ids__appends_previous_response_id(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, ["item_h1"]))
+
+ await provider.get_history_item_ids("prev_resp_99", None, limit=5)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert "previous_response_id=prev_resp_99" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_history_item_ids__appends_conversation_id(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, []))
+
+ await provider.get_history_item_ids(None, "conv_42", limit=3)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert "conversation_id=conv_42" in request.url
+
+
+@pytest.mark.asyncio
+async def test_get_history_item_ids__omits_optional_params_when_none(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, []))
+
+ await provider.get_history_item_ids(None, None, limit=10)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert "previous_response_id" not in request.url
+ assert "conversation_id" not in request.url
+
+
+# ===========================================================================
+# Isolation headers (S-018)
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_create_response__sends_isolation_headers(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ isolation = IsolationContext(user_key="u_key_1", chat_key="c_key_1")
+ await provider.create_response(ResponseObject(_RESPONSE_DICT), None, None, isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_key_1"
+ assert request.headers[_CHAT_ISOLATION_HEADER] == "c_key_1"
+
+
+@pytest.mark.asyncio
+async def test_get_response__sends_isolation_headers(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, _RESPONSE_DICT))
+
+ isolation = IsolationContext(user_key="u_key_2", chat_key="c_key_2")
+ await provider.get_response("resp_abc123", isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_key_2"
+ assert request.headers[_CHAT_ISOLATION_HEADER] == "c_key_2"
+
+
+@pytest.mark.asyncio
+async def test_update_response__sends_isolation_headers(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ isolation = IsolationContext(user_key="u_key_3", chat_key="c_key_3")
+ await provider.update_response(ResponseObject(_RESPONSE_DICT), isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_key_3"
+ assert request.headers[_CHAT_ISOLATION_HEADER] == "c_key_3"
+
+
+@pytest.mark.asyncio
+async def test_delete_response__sends_isolation_headers(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {}))
+
+ isolation = IsolationContext(user_key="u_key_4", chat_key="c_key_4")
+ await provider.delete_response("resp_abc123", isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_key_4"
+ assert request.headers[_CHAT_ISOLATION_HEADER] == "c_key_4"
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__sends_isolation_headers(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, {"data": []}))
+
+ isolation = IsolationContext(user_key="u_key_5", chat_key="c_key_5")
+ await provider.get_input_items("resp_abc123", isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_key_5"
+ assert request.headers[_CHAT_ISOLATION_HEADER] == "c_key_5"
+
+
+@pytest.mark.asyncio
+async def test_get_items__sends_isolation_headers(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, [_OUTPUT_ITEM_DICT]))
+
+ isolation = IsolationContext(user_key="u_key_6", chat_key="c_key_6")
+ await provider.get_items(["item_out_001"], isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_key_6"
+ assert request.headers[_CHAT_ISOLATION_HEADER] == "c_key_6"
+
+
+@pytest.mark.asyncio
+async def test_get_history_item_ids__sends_isolation_headers(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(200, []))
+
+ isolation = IsolationContext(user_key="u_key_7", chat_key="c_key_7")
+ await provider.get_history_item_ids(None, None, limit=10, isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_key_7"
+ assert request.headers[_CHAT_ISOLATION_HEADER] == "c_key_7"
+
+
+@pytest.mark.asyncio
+async def test_isolation_headers__omitted_when_none(credential: Any, settings: FoundryStorageSettings) -> None:
+ """When isolation=None (default), no isolation headers are sent."""
+ provider = _make_provider(credential, settings, _make_response(200, _RESPONSE_DICT))
+
+ await provider.get_response("resp_abc123")
+
+ request = provider._client.send_request.call_args[0][0]
+ assert _USER_ISOLATION_HEADER not in request.headers
+ assert _CHAT_ISOLATION_HEADER not in request.headers
+
+
+@pytest.mark.asyncio
+async def test_isolation_headers__partial_keys_only_sends_present(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ """When only user_key is set, only user header is added."""
+ provider = _make_provider(credential, settings, _make_response(200, _RESPONSE_DICT))
+
+ isolation = IsolationContext(user_key="u_only")
+ await provider.get_response("resp_abc123", isolation=isolation)
+
+ request = provider._client.send_request.call_args[0][0]
+ assert request.headers[_USER_ISOLATION_HEADER] == "u_only"
+ assert _CHAT_ISOLATION_HEADER not in request.headers
+
+
+# ===========================================================================
+# Error mapping
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_error_mapping__400_raises_bad_request(credential: Any, settings: FoundryStorageSettings) -> None:
+ provider = _make_provider(credential, settings, _make_response(400, {"error": {"message": "invalid input"}}))
+
+ with pytest.raises(FoundryBadRequestError) as exc_info:
+ await provider.get_response("any_id")
+
+ assert "invalid input" in exc_info.value.message
+
+
+@pytest.mark.asyncio
+async def test_error_mapping__generic_status_raises_foundry_api_error(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ provider = _make_provider(credential, settings, _make_response(503, {}))
+
+ with pytest.raises(FoundryApiError) as exc_info:
+ await provider.get_response("any_id")
+
+ assert exc_info.value.status_code == 503
+
+
+@pytest.mark.asyncio
+async def test_error_mapping__error_message_falls_back_for_non_json_body(
+ credential: Any, settings: FoundryStorageSettings
+) -> None:
+ raw = MagicMock()
+ raw.status_code = 502
+ raw.text = MagicMock(return_value="Bad Gateway")
+ provider = _make_provider(credential, settings, raw)
+
+ with pytest.raises(FoundryApiError) as exc_info:
+ await provider.get_response("any_id")
+
+ assert "502" in exc_info.value.message
+
+
+# ===========================================================================
+# HTTP client lifecycle
+# ===========================================================================
+
+
+@pytest.mark.asyncio
+async def test_aclose__closes_pipeline_client() -> None:
+ provider = _make_provider(_make_credential(), _SETTINGS, _make_response(200, {}))
+ await provider.aclose()
+ provider._client.close.assert_awaited_once()
+
+
+@pytest.mark.asyncio
+async def test_async_context_manager__closes_client_on_exit() -> None:
+ provider = _make_provider(_make_credential(), _SETTINGS, _make_response(200, {}))
+ async with provider:
+ pass
+ provider._client.close.assert_awaited_once()
+
+
+# ===========================================================================
+# FoundryStorageSettings
+# ===========================================================================
+
+
+def test_settings__from_env__reads_foundry_project_endpoint(monkeypatch: pytest.MonkeyPatch) -> None:
+ monkeypatch.setenv("FOUNDRY_PROJECT_ENDPOINT", "https://myproject.foundry.azure.com")
+ settings = FoundryStorageSettings.from_env()
+ assert settings.storage_base_url == "https://myproject.foundry.azure.com/storage/"
+
+
+def test_settings__from_env__strips_trailing_slash(monkeypatch: pytest.MonkeyPatch) -> None:
+ monkeypatch.setenv("FOUNDRY_PROJECT_ENDPOINT", "https://myproject.foundry.azure.com/")
+ settings = FoundryStorageSettings.from_env()
+ assert settings.storage_base_url == "https://myproject.foundry.azure.com/storage/"
+
+
+def test_settings__from_env__raises_if_env_var_missing(monkeypatch: pytest.MonkeyPatch) -> None:
+ monkeypatch.delenv("FOUNDRY_PROJECT_ENDPOINT", raising=False)
+ with pytest.raises(EnvironmentError, match="FOUNDRY_PROJECT_ENDPOINT"):
+ FoundryStorageSettings.from_env()
+
+
+def test_settings__from_env__raises_if_not_absolute_url(monkeypatch: pytest.MonkeyPatch) -> None:
+ monkeypatch.setenv("FOUNDRY_PROJECT_ENDPOINT", "just-a-hostname")
+ with pytest.raises(ValueError, match="valid absolute URL"):
+ FoundryStorageSettings.from_env()
+
+
+def test_settings__build_url__includes_api_version() -> None:
+ url = _SETTINGS.build_url("responses/abc")
+ assert url == f"{_BASE_URL}responses/abc?api-version=v1"
+
+
+def test_settings__build_url__appends_extra_params_encoded() -> None:
+ url = _SETTINGS.build_url("responses", limit="10", order="asc")
+ assert "limit=10" in url
+ assert "order=asc" in url
+
+
+def test_settings__build_url__url_encodes_extra_param_values() -> None:
+ url = _SETTINGS.build_url("history/item_ids", conversation_id="conv id/1")
+ assert "conversation_id=conv%20id%2F1" in url
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_generated_payload_validation.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_generated_payload_validation.py
new file mode 100644
index 000000000000..2de5b2219130
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_generated_payload_validation.py
@@ -0,0 +1,108 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for generated payload validator integration in parse flow."""
+
+from __future__ import annotations
+
+import pytest
+
+from azure.ai.agentserver.responses.hosting._validation import parse_create_response
+from azure.ai.agentserver.responses.models._generated._validators import validate_CreateResponse
+from azure.ai.agentserver.responses.models.errors import RequestValidationError
+
+# ---------------------------------------------------------------------------
+# parse_create_response integration tests (real validator + real model)
+# ---------------------------------------------------------------------------
+
+
+def test_parse_create_response_rejects_invalid_payload() -> None:
+ """A payload with a wrong-typed field is caught by the generated validator."""
+ with pytest.raises(RequestValidationError) as exc_info:
+ parse_create_response({"model": 123})
+
+ error = exc_info.value
+ assert error.code == "invalid_request"
+ assert error.details is not None
+ assert any(d["param"] == "$.model" for d in error.details)
+
+
+def test_parse_create_response_allows_valid_payload() -> None:
+ parsed = parse_create_response({"model": "gpt-4o"})
+ assert parsed.model == "gpt-4o"
+
+
+def test_parse_create_response_rejects_non_object_body() -> None:
+ with pytest.raises(RequestValidationError) as exc_info:
+ parse_create_response("not-a-dict") # type: ignore[arg-type]
+
+ assert exc_info.value.code == "invalid_request"
+
+
+# ---------------------------------------------------------------------------
+# Generated validator tests (validate_CreateResponse directly)
+# ---------------------------------------------------------------------------
+
+
+def test_generated_create_response_validator_accepts_string_input() -> None:
+ errors = validate_CreateResponse({"input": "hello world"})
+ assert errors == []
+
+
+def test_generated_create_response_validator_accepts_array_input_items() -> None:
+ # ItemMessage requires role + content in addition to type (GAP-01: type is
+ # optional on input, but role/content remain required by the spec).
+ errors = validate_CreateResponse({"input": [{"type": "message", "role": "user", "content": "hello"}]})
+ assert errors == []
+
+
+def test_generated_create_response_validator_rejects_non_string_non_array_input() -> None:
+ errors = validate_CreateResponse({"input": 123})
+ assert any(e["path"] == "$.input" and "Expected one of: string, array" in e["message"] for e in errors)
+
+
+def test_generated_create_response_validator_rejects_non_object_input_item() -> None:
+ errors = validate_CreateResponse({"input": [123]})
+ assert any(e["path"] == "$.input" and "Expected one of: string, array" in e["message"] for e in errors)
+
+
+def test_generated_create_response_validator_rejects_input_item_missing_type() -> None:
+ errors = validate_CreateResponse({"input": [{}]})
+ assert any(e["path"] == "$.input" and "Expected one of: string, array" in e["message"] for e in errors)
+
+
+def test_generated_create_response_validator_rejects_input_item_type_with_wrong_primitive() -> None:
+ errors = validate_CreateResponse({"input": [{"type": 1}]})
+ assert any(e["path"] == "$.input" and "Expected one of: string, array" in e["message"] for e in errors)
+
+
+# Minimal valid payloads per item type, satisfying each schema's required fields.
+_VALID_INPUT_ITEMS: dict[str, dict] = {
+ "message": {"type": "message", "role": "user", "content": "hello"},
+ "item_reference": {"type": "item_reference", "id": "ref_123"},
+ "function_call_output": {"type": "function_call_output", "call_id": "call_123", "output": "result"},
+ "computer_call_output": {
+ "type": "computer_call_output",
+ "call_id": "call_123",
+ "output": {"type": "computer_screenshot"},
+ },
+ "apply_patch_call_output": {"type": "apply_patch_call_output", "call_id": "call_123", "status": "completed"},
+}
+
+
+@pytest.mark.parametrize("item_type", list(_VALID_INPUT_ITEMS))
+def test_generated_create_response_validator_accepts_multiple_input_item_types(item_type: str) -> None:
+ errors = validate_CreateResponse({"input": [_VALID_INPUT_ITEMS[item_type]]})
+ assert errors == []
+
+
+def test_generated_create_response_validator_accepts_mixed_input_item_types() -> None:
+ errors = validate_CreateResponse(
+ {
+ "input": [
+ _VALID_INPUT_ITEMS["message"],
+ _VALID_INPUT_ITEMS["item_reference"],
+ _VALID_INPUT_ITEMS["function_call_output"],
+ ]
+ }
+ )
+ assert errors == []
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_id_generator.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_id_generator.py
new file mode 100644
index 000000000000..f0b47d97d537
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_id_generator.py
@@ -0,0 +1,110 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for ID generation behavior."""
+
+from __future__ import annotations
+
+import re
+
+import pytest
+
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.models import _generated as generated_models
+
+
+def test_id_generator__new_id_uses_new_format_shape() -> None:
+ created_id = IdGenerator.new_id("msg")
+
+ assert created_id.startswith("msg_")
+ body = created_id[len("msg_") :]
+ assert len(body) == 50
+
+ partition_key = body[:18]
+ entropy = body[18:]
+
+ assert len(partition_key) == 18
+ assert partition_key.endswith("00")
+ assert re.fullmatch(r"[0-9a-f]{16}00", partition_key) is not None
+ assert len(entropy) == 32
+ assert re.fullmatch(r"[A-Za-z0-9]{32}", entropy) is not None
+
+
+def test_id_generator__new_id_reuses_new_format_partition_key_from_hint() -> None:
+ hint = "caresp_1234567890abcdef00ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
+
+ created_id = IdGenerator.new_id("fc", hint)
+
+ assert created_id.startswith("fc_1234567890abcdef00")
+
+
+def test_id_generator__new_id_upgrades_legacy_partition_key_from_hint() -> None:
+ legacy_partition_key = "1234567890abcdef"
+ legacy_entropy = "A" * 32
+ legacy_hint = f"msg_{legacy_entropy}{legacy_partition_key}"
+
+ created_id = IdGenerator.new_id("rs", legacy_hint)
+
+ assert created_id.startswith("rs_1234567890abcdef00")
+
+
+def test_id_generator__extract_partition_key_supports_new_and_legacy_formats() -> None:
+ new_format_id = "caresp_1234567890abcdef00ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
+ legacy_format_id = "msg_AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1234567890abcdef"
+
+ assert IdGenerator.extract_partition_key(new_format_id) == "1234567890abcdef00"
+ assert IdGenerator.extract_partition_key(legacy_format_id) == "1234567890abcdef"
+
+
+def test_id_generator__extract_partition_key_raises_for_bad_input() -> None:
+ with pytest.raises(ValueError, match="ID must not be null or empty"):
+ IdGenerator.extract_partition_key("")
+
+ with pytest.raises(ValueError, match="has no '_' delimiter"):
+ IdGenerator.extract_partition_key("badid")
+
+ with pytest.raises(ValueError, match="unexpected body length"):
+ IdGenerator.extract_partition_key("msg_short")
+
+
+def test_id_generator__is_valid_reports_compatible_errors() -> None:
+ assert IdGenerator.is_valid("") == (False, "ID must not be null or empty.")
+ assert IdGenerator.is_valid("badid") == (False, "ID 'badid' has no '_' delimiter.")
+ assert IdGenerator.is_valid("_short") == (
+ False,
+ "ID has an empty prefix.",
+ )
+ assert IdGenerator.is_valid("msg_short") == (
+ False,
+ "ID 'msg_short' has unexpected body length 5 (expected 50 or 48).",
+ )
+
+
+def test_id_generator__is_valid_checks_allowed_prefixes() -> None:
+ valid_id = "msg_1234567890abcdef00ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
+
+ ok, error = IdGenerator.is_valid(valid_id, allowed_prefixes=["msg", "fc"])
+ assert ok is True
+ assert error is None
+
+ ok, error = IdGenerator.is_valid(valid_id, allowed_prefixes=["fc"])
+ assert ok is False
+ assert error == "ID prefix 'msg' is not in the allowed set [fc]."
+
+
+def test_id_generator__convenience_method_uses_caresp_prefix() -> None:
+ created_id = IdGenerator.new_response_id()
+
+ assert created_id.startswith("caresp_")
+ assert len(created_id.split("_", maxsplit=1)[1]) == 50
+
+
+def test_id_generator__new_item_id_dispatches_by_generated_model_type() -> None:
+ item_message = object.__new__(generated_models.ItemMessage)
+ item_reference = object.__new__(generated_models.ItemReferenceParam)
+
+ generated_id = IdGenerator.new_item_id(item_message)
+
+ assert generated_id is not None
+ assert generated_id.startswith("msg_")
+ assert IdGenerator.new_item_id(item_reference) is None
+ assert IdGenerator.new_item_id(object()) is None
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_in_memory_provider_crud.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_in_memory_provider_crud.py
new file mode 100644
index 000000000000..24d3e95c8a0c
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_in_memory_provider_crud.py
@@ -0,0 +1,466 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""CRUD tests for InMemoryResponseProvider.
+
+Covers create, read, update, delete of response envelopes,
+output item storage, history resolution via previous_response_id
+and conversation_id, and defensive-copy isolation.
+"""
+
+from __future__ import annotations
+
+import asyncio
+from typing import Any
+
+import pytest
+
+from azure.ai.agentserver.responses.models import _generated as generated_models
+from azure.ai.agentserver.responses.store._memory import InMemoryResponseProvider
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _response(
+ response_id: str,
+ *,
+ status: str = "completed",
+ output: list[dict[str, Any]] | None = None,
+ conversation_id: str | None = None,
+) -> generated_models.ResponseObject:
+ payload: dict[str, Any] = {
+ "id": response_id,
+ "object": "response",
+ "output": output or [],
+ "store": True,
+ "status": status,
+ }
+ if conversation_id is not None:
+ payload["conversation"] = {"id": conversation_id}
+ return generated_models.ResponseObject(payload)
+
+
+def _input_item(item_id: str, text: str) -> dict[str, Any]:
+ return {
+ "id": item_id,
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": text}],
+ }
+
+
+def _output_message(item_id: str, text: str) -> dict[str, Any]:
+ return {
+ "id": item_id,
+ "type": "output_message",
+ "role": "assistant",
+ "status": "completed",
+ "content": [{"type": "output_text", "text": text}],
+ }
+
+
+# ===========================================================================
+# Create
+# ===========================================================================
+
+
+def test_create__stores_response_envelope() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_1"), None, None))
+
+ result = asyncio.run(provider.get_response("resp_1"))
+ assert str(getattr(result, "id")) == "resp_1"
+
+
+def test_create__duplicate_raises_value_error() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_dup"), None, None))
+
+ with pytest.raises(ValueError, match="already exists"):
+ asyncio.run(provider.create_response(_response("resp_dup"), None, None))
+
+
+def test_create__stores_input_items_in_item_store() -> None:
+ provider = InMemoryResponseProvider()
+ items = [_input_item("in_1", "hello"), _input_item("in_2", "world")]
+ asyncio.run(provider.create_response(_response("resp_in"), items, None))
+
+ fetched = asyncio.run(provider.get_items(["in_1", "in_2"]))
+ assert len(fetched) == 2
+ assert fetched[0]["id"] == "in_1"
+ assert fetched[1]["id"] == "in_2"
+
+
+def test_create__stores_output_items_in_item_store() -> None:
+ provider = InMemoryResponseProvider()
+ resp = _response(
+ "resp_out",
+ output=[_output_message("out_1", "hi"), _output_message("out_2", "there")],
+ )
+ asyncio.run(provider.create_response(resp, None, None))
+
+ fetched = asyncio.run(provider.get_items(["out_1", "out_2"]))
+ assert len(fetched) == 2
+ assert fetched[0]["id"] == "out_1"
+ assert fetched[1]["id"] == "out_2"
+
+
+def test_create__returns_defensive_copy() -> None:
+ """Mutating the returned response must not affect the stored copy."""
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_copy"), None, None))
+
+ r1 = asyncio.run(provider.get_response("resp_copy"))
+ r1["status"] = "failed"
+
+ r2 = asyncio.run(provider.get_response("resp_copy"))
+ assert str(getattr(r2, "status")) == "completed"
+
+
+# ===========================================================================
+# Read (get)
+# ===========================================================================
+
+
+def test_get__raises_key_error_for_missing() -> None:
+ provider = InMemoryResponseProvider()
+ with pytest.raises(KeyError, match="not found"):
+ asyncio.run(provider.get_response("nonexistent"))
+
+
+def test_get__raises_key_error_for_deleted() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_del"), None, None))
+ asyncio.run(provider.delete_response("resp_del"))
+
+ with pytest.raises(KeyError, match="not found"):
+ asyncio.run(provider.get_response("resp_del"))
+
+
+def test_get_items__missing_ids_return_none() -> None:
+ provider = InMemoryResponseProvider()
+ result = asyncio.run(provider.get_items(["no_such_item"]))
+ assert result == [None]
+
+
+# ===========================================================================
+# Update
+# ===========================================================================
+
+
+def test_update__replaces_envelope() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_upd", status="in_progress"), None, None))
+
+ updated = _response("resp_upd", status="completed")
+ asyncio.run(provider.update_response(updated))
+
+ result = asyncio.run(provider.get_response("resp_upd"))
+ assert str(getattr(result, "status")) == "completed"
+
+
+def test_update__stores_new_output_items() -> None:
+ """Updating a response with new output items must index them in the item store."""
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_upd2", status="in_progress"), None, None))
+
+ updated = _response(
+ "resp_upd2",
+ status="completed",
+ output=[_output_message("out_upd_1", "answer")],
+ )
+ asyncio.run(provider.update_response(updated))
+
+ fetched = asyncio.run(provider.get_items(["out_upd_1"]))
+ assert fetched[0] is not None
+ assert fetched[0]["id"] == "out_upd_1"
+
+
+def test_update__raises_key_error_for_missing() -> None:
+ provider = InMemoryResponseProvider()
+ with pytest.raises(KeyError, match="not found"):
+ asyncio.run(provider.update_response(_response("ghost")))
+
+
+def test_update__raises_key_error_for_deleted() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_d"), None, None))
+ asyncio.run(provider.delete_response("resp_d"))
+
+ with pytest.raises(KeyError, match="not found"):
+ asyncio.run(provider.update_response(_response("resp_d")))
+
+
+# ===========================================================================
+# Delete
+# ===========================================================================
+
+
+def test_delete__marks_entry_as_deleted() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_del2"), None, None))
+ asyncio.run(provider.delete_response("resp_del2"))
+
+ with pytest.raises(KeyError):
+ asyncio.run(provider.get_response("resp_del2"))
+
+
+def test_delete__raises_key_error_for_missing() -> None:
+ provider = InMemoryResponseProvider()
+ with pytest.raises(KeyError, match="not found"):
+ asyncio.run(provider.delete_response("nonexistent"))
+
+
+def test_delete__double_delete_raises() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(provider.create_response(_response("resp_dd"), None, None))
+ asyncio.run(provider.delete_response("resp_dd"))
+
+ with pytest.raises(KeyError, match="not found"):
+ asyncio.run(provider.delete_response("resp_dd"))
+
+
+# ===========================================================================
+# History resolution — previous_response_id path
+# ===========================================================================
+
+
+def test_history__previous_response_returns_input_and_output_ids() -> None:
+ """get_history_item_ids via previous_response_id must include
+ history + input + output item IDs from the previous response."""
+ provider = InMemoryResponseProvider()
+ resp = _response(
+ "resp_prev",
+ output=[_output_message("out_h1", "reply")],
+ )
+ asyncio.run(
+ provider.create_response(
+ resp,
+ [_input_item("in_h1", "question")],
+ history_item_ids=None,
+ )
+ )
+
+ ids = asyncio.run(provider.get_history_item_ids("resp_prev", None, 100))
+ assert "in_h1" in ids
+ assert "out_h1" in ids
+
+
+def test_history__previous_response_chains_history_ids() -> None:
+ """History chain: resp_1 (with input) → resp_2 (previous_response_id=resp_1)
+ should yield resp_1's history + input + output when queried from resp_2."""
+ provider = InMemoryResponseProvider()
+ resp1 = _response(
+ "resp_chain1",
+ output=[_output_message("out_c1", "first reply")],
+ )
+ asyncio.run(
+ provider.create_response(
+ resp1,
+ [_input_item("in_c1", "first question")],
+ history_item_ids=None,
+ )
+ )
+
+ # Build resp_2 with history referencing resp_1's items
+ history_from_1 = asyncio.run(provider.get_history_item_ids("resp_chain1", None, 100))
+ resp2 = _response(
+ "resp_chain2",
+ output=[_output_message("out_c2", "second reply")],
+ )
+ asyncio.run(
+ provider.create_response(
+ resp2,
+ [_input_item("in_c2", "second question")],
+ history_item_ids=history_from_1,
+ )
+ )
+
+ # Now query history from resp_2's perspective
+ ids = asyncio.run(provider.get_history_item_ids("resp_chain2", None, 100))
+ # Should include: history (in_c1, out_c1) + input (in_c2) + output (out_c2)
+ assert "in_c1" in ids
+ assert "out_c1" in ids
+ assert "in_c2" in ids
+ assert "out_c2" in ids
+
+
+def test_history__items_resolvable_after_chain() -> None:
+ """Full round-trip: create resp_1, then resp_2 referencing resp_1, and
+ verify all history items are resolvable via get_items."""
+ provider = InMemoryResponseProvider()
+ resp1 = _response(
+ "resp_rt1",
+ output=[_output_message("out_rt1", "answer one")],
+ )
+ asyncio.run(
+ provider.create_response(
+ resp1,
+ [_input_item("in_rt1", "question one")],
+ history_item_ids=None,
+ )
+ )
+
+ history_ids = asyncio.run(provider.get_history_item_ids("resp_rt1", None, 100))
+ resp2 = _response("resp_rt2", output=[_output_message("out_rt2", "answer two")])
+ asyncio.run(
+ provider.create_response(
+ resp2,
+ [_input_item("in_rt2", "question two")],
+ history_item_ids=history_ids,
+ )
+ )
+
+ all_ids = asyncio.run(provider.get_history_item_ids("resp_rt2", None, 100))
+ items = asyncio.run(provider.get_items(all_ids))
+ assert all(item is not None for item in items), f"Some history items not found: {all_ids}"
+ resolved_ids = [item["id"] for item in items]
+ assert "in_rt1" in resolved_ids
+ assert "out_rt1" in resolved_ids
+ assert "in_rt2" in resolved_ids
+ assert "out_rt2" in resolved_ids
+
+
+def test_history__deleted_response_excluded() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(
+ provider.create_response(
+ _response("resp_hdel", output=[_output_message("out_hdel", "msg")]),
+ [_input_item("in_hdel", "q")],
+ None,
+ )
+ )
+ asyncio.run(provider.delete_response("resp_hdel"))
+
+ ids = asyncio.run(provider.get_history_item_ids("resp_hdel", None, 100))
+ assert ids == []
+
+
+def test_history__respects_limit() -> None:
+ provider = InMemoryResponseProvider()
+ many_inputs = [_input_item(f"in_lim_{i}", f"msg {i}") for i in range(10)]
+ asyncio.run(provider.create_response(_response("resp_lim"), many_inputs, None))
+
+ ids = asyncio.run(provider.get_history_item_ids("resp_lim", None, 3))
+ assert len(ids) == 3
+
+
+def test_history__zero_limit_returns_empty() -> None:
+ provider = InMemoryResponseProvider()
+ asyncio.run(
+ provider.create_response(
+ _response("resp_z"),
+ [_input_item("in_z", "q")],
+ None,
+ )
+ )
+
+ ids = asyncio.run(provider.get_history_item_ids("resp_z", None, 0))
+ assert ids == []
+
+
+# ===========================================================================
+# History resolution — conversation_id path
+# ===========================================================================
+
+
+def test_history__conversation_id_collects_across_responses() -> None:
+ """All input + output item IDs from responses in a conversation should be returned."""
+ provider = InMemoryResponseProvider()
+
+ resp1 = _response(
+ "resp_cv1",
+ conversation_id="conv_1",
+ output=[_output_message("out_cv1", "reply 1")],
+ )
+ asyncio.run(
+ provider.create_response(
+ resp1,
+ [_input_item("in_cv1", "q1")],
+ None,
+ )
+ )
+
+ resp2 = _response(
+ "resp_cv2",
+ conversation_id="conv_1",
+ output=[_output_message("out_cv2", "reply 2")],
+ )
+ asyncio.run(
+ provider.create_response(
+ resp2,
+ [_input_item("in_cv2", "q2")],
+ None,
+ )
+ )
+
+ ids = asyncio.run(provider.get_history_item_ids(None, "conv_1", 100))
+ assert "in_cv1" in ids
+ assert "out_cv1" in ids
+ assert "in_cv2" in ids
+ assert "out_cv2" in ids
+
+
+def test_history__conversation_excludes_deleted_responses() -> None:
+ provider = InMemoryResponseProvider()
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_cvd1", conversation_id="conv_d"),
+ [_input_item("in_cvd1", "q1")],
+ None,
+ )
+ )
+ asyncio.run(
+ provider.create_response(
+ _response("resp_cvd2", conversation_id="conv_d"),
+ [_input_item("in_cvd2", "q2")],
+ None,
+ )
+ )
+ asyncio.run(provider.delete_response("resp_cvd1"))
+
+ ids = asyncio.run(provider.get_history_item_ids(None, "conv_d", 100))
+ assert "in_cvd1" not in ids
+ assert "in_cvd2" in ids
+
+
+def test_history__no_previous_no_conversation_returns_empty() -> None:
+ provider = InMemoryResponseProvider()
+ ids = asyncio.run(provider.get_history_item_ids(None, None, 100))
+ assert ids == []
+
+
+# ===========================================================================
+# Output items updated on update_response
+# ===========================================================================
+
+
+def test_update__output_items_reflected_in_history() -> None:
+ """After updating a response with new output, history resolution should
+ include the updated output item IDs."""
+ provider = InMemoryResponseProvider()
+ asyncio.run(
+ provider.create_response(
+ _response("resp_uo", status="in_progress"),
+ [_input_item("in_uo", "question")],
+ None,
+ )
+ )
+
+ # Initially no output
+ ids_before = asyncio.run(provider.get_history_item_ids("resp_uo", None, 100))
+ assert "out_uo" not in ids_before
+
+ # Update adds output
+ updated = _response(
+ "resp_uo",
+ status="completed",
+ output=[_output_message("out_uo", "answer")],
+ )
+ asyncio.run(provider.update_response(updated))
+
+ ids_after = asyncio.run(provider.get_history_item_ids("resp_uo", None, 100))
+ assert "in_uo" in ids_after
+ assert "out_uo" in ids_after
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_input_items_provider_behavior.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_input_items_provider_behavior.py
new file mode 100644
index 000000000000..d5e027594438
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_input_items_provider_behavior.py
@@ -0,0 +1,178 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for input-items provider paging and error semantics."""
+
+from __future__ import annotations
+
+import asyncio
+
+import pytest
+
+from azure.ai.agentserver.responses.models import _generated as generated_models
+from azure.ai.agentserver.responses.store._memory import InMemoryResponseProvider
+
+
+def _response(response_id: str, *, store: bool = True) -> generated_models.ResponseObject:
+ return generated_models.ResponseObject(
+ {
+ "id": response_id,
+ "object": "response",
+ "output": [],
+ "store": store,
+ "status": "completed",
+ }
+ )
+
+
+def _item(item_id: str, text: str) -> dict[str, object]:
+ return {
+ "id": item_id,
+ "type": "message",
+ "role": "user",
+ "content": [{"type": "input_text", "text": text}],
+ }
+
+
+def _ids(items: list[object]) -> list[str]:
+ result: list[str] = []
+ for item in items:
+ if isinstance(item, dict):
+ item_id = item.get("id")
+ if isinstance(item_id, str):
+ result.append(item_id)
+ return result
+
+
+def test_provider_input_items__supports_after_before_combination() -> None:
+ provider = InMemoryResponseProvider()
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_combo"),
+ [
+ _item("msg_001", "one"),
+ _item("msg_002", "two"),
+ _item("msg_003", "three"),
+ _item("msg_004", "four"),
+ _item("msg_005", "five"),
+ ],
+ history_item_ids=None,
+ )
+ )
+
+ items = asyncio.run(
+ provider.get_input_items(
+ "resp_combo",
+ ascending=True,
+ after="msg_002",
+ before="msg_005",
+ )
+ )
+
+ assert _ids(items) == ["msg_003", "msg_004"]
+
+
+def test_provider_input_items__returns_empty_page_after_last_cursor() -> None:
+ provider = InMemoryResponseProvider()
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_empty"),
+ [
+ _item("msg_001", "one"),
+ _item("msg_002", "two"),
+ ],
+ history_item_ids=None,
+ )
+ )
+
+ items = asyncio.run(provider.get_input_items("resp_empty", ascending=True, after="msg_002"))
+
+ assert items == []
+
+
+def test_provider_input_items__returns_history_only_items_when_current_input_is_empty() -> None:
+ provider = InMemoryResponseProvider()
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_base"),
+ [
+ _item("msg_hist_001", "history-1"),
+ _item("msg_hist_002", "history-2"),
+ ],
+ history_item_ids=None,
+ )
+ )
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_history_only"),
+ [],
+ history_item_ids=["msg_hist_001", "msg_hist_002"],
+ )
+ )
+
+ items = asyncio.run(provider.get_input_items("resp_history_only", ascending=True))
+
+ assert _ids(items) == ["msg_hist_001", "msg_hist_002"]
+
+
+def test_provider_input_items__returns_current_only_items_when_no_history() -> None:
+ provider = InMemoryResponseProvider()
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_current_only"),
+ [
+ _item("msg_curr_001", "current-1"),
+ _item("msg_curr_002", "current-2"),
+ ],
+ history_item_ids=None,
+ )
+ )
+
+ items = asyncio.run(provider.get_input_items("resp_current_only", ascending=True))
+
+ assert _ids(items) == ["msg_curr_001", "msg_curr_002"]
+
+
+def test_provider_input_items__respects_limit_boundaries_1_and_100() -> None:
+ provider = InMemoryResponseProvider()
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_limits"),
+ [_item(f"msg_{index:03d}", f"item-{index:03d}") for index in range(1, 151)],
+ history_item_ids=None,
+ )
+ )
+
+ one_item = asyncio.run(provider.get_input_items("resp_limits", ascending=True, limit=1))
+ hundred_items = asyncio.run(provider.get_input_items("resp_limits", ascending=True, limit=100))
+
+ assert len(one_item) == 1
+ assert _ids(one_item) == ["msg_001"]
+ assert len(hundred_items) == 100
+ assert _ids(hundred_items)[0] == "msg_001"
+ assert _ids(hundred_items)[-1] == "msg_100"
+
+
+def test_provider_input_items__raises_for_deleted_and_missing_response() -> None:
+ provider = InMemoryResponseProvider()
+
+ asyncio.run(
+ provider.create_response(
+ _response("resp_deleted"),
+ [_item("msg_001", "one")],
+ history_item_ids=None,
+ )
+ )
+
+ asyncio.run(provider.delete_response("resp_deleted"))
+
+ with pytest.raises(ValueError):
+ asyncio.run(provider.get_input_items("resp_deleted"))
+
+ with pytest.raises(KeyError):
+ asyncio.run(provider.get_input_items("resp_missing"))
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_lifecycle_state_machine.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_lifecycle_state_machine.py
new file mode 100644
index 000000000000..e3a7d7ab46fa
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_lifecycle_state_machine.py
@@ -0,0 +1,86 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for lifecycle event state machine normalization."""
+
+from __future__ import annotations
+
+import pytest
+
+from azure.ai.agentserver.responses.streaming._state_machine import (
+ LifecycleStateMachineError,
+ normalize_lifecycle_events,
+)
+
+
+def test_lifecycle_state_machine__requires_response_created_as_first_event() -> None:
+ with pytest.raises(LifecycleStateMachineError):
+ normalize_lifecycle_events(
+ response_id="resp_123",
+ events=[
+ {
+ "type": "response.in_progress",
+ "response": {"status": "in_progress"},
+ }
+ ],
+ )
+
+
+def test_lifecycle_state_machine__rejects_multiple_terminal_events() -> None:
+ with pytest.raises(LifecycleStateMachineError):
+ normalize_lifecycle_events(
+ response_id="resp_123",
+ events=[
+ {"type": "response.created", "response": {"status": "queued"}},
+ {"type": "response.completed", "response": {"status": "completed"}},
+ {"type": "response.failed", "response": {"status": "failed"}},
+ ],
+ )
+
+
+def test_lifecycle_state_machine__auto_appends_failed_when_terminal_missing() -> None:
+ normalized = normalize_lifecycle_events(
+ response_id="resp_123",
+ events=[
+ {"type": "response.created", "response": {"status": "queued"}},
+ {"type": "response.in_progress", "response": {"status": "in_progress"}},
+ ],
+ )
+
+ assert normalized[-1]["type"] == "response.failed"
+ assert normalized[-1]["response"]["status"] == "failed"
+
+
+def test_lifecycle_state_machine__rejects_out_of_order_transitions() -> None:
+ with pytest.raises(LifecycleStateMachineError):
+ normalize_lifecycle_events(
+ response_id="resp_123",
+ events=[
+ {"type": "response.created", "response": {"status": "queued"}},
+ {"type": "response.completed", "response": {"status": "completed"}},
+ {"type": "response.in_progress", "response": {"status": "in_progress"}},
+ ],
+ )
+
+
+def test_lifecycle_state_machine__returns_deep_copied_response_snapshots() -> None:
+ original_events = [
+ {
+ "type": "response.created",
+ "response": {
+ "status": "queued",
+ "metadata": {"nested": "before"},
+ },
+ },
+ {
+ "type": "response.completed",
+ "response": {
+ "status": "completed",
+ "metadata": {"nested": "before"},
+ },
+ },
+ ]
+
+ normalized = normalize_lifecycle_events(response_id="resp_123", events=original_events)
+
+ original_events[0]["response"]["metadata"]["nested"] = "after"
+ assert normalized[0]["response"]["metadata"]["nested"] == "before"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_observability.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_observability.py
new file mode 100644
index 000000000000..d578db78615b
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_observability.py
@@ -0,0 +1,197 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for observability helpers."""
+
+from __future__ import annotations
+
+from types import SimpleNamespace
+
+from azure.ai.agentserver.responses.hosting._observability import (
+ InMemoryCreateSpanHook,
+ build_create_otel_attrs,
+ build_create_span_tags,
+ build_platform_server_header,
+ extract_request_id,
+ start_create_span,
+)
+
+
+def test_observability__build_platform_server_header_includes_extra_identity() -> None:
+ value = build_platform_server_header(
+ sdk_name="azure-ai-agentserver-responses",
+ version="0.1.0",
+ runtime="python/3.11",
+ extra="integration-suite",
+ )
+
+ assert value == "azure-ai-agentserver-responses/0.1.0 (python/3.11) integration-suite"
+
+
+def test_observability__start_create_span_records_single_lifecycle_event() -> None:
+ hook = InMemoryCreateSpanHook()
+ span = start_create_span(
+ "create_response",
+ {"service.name": "svc", "gen_ai.operation.name": "create_response"},
+ hook=hook,
+ )
+
+ span.set_tag("gen_ai.response.id", "resp_123")
+ span.end()
+ span.end() # idempotent
+
+ assert len(hook.spans) == 1
+ assert hook.spans[0].name == "create_response"
+ assert hook.spans[0].tags["gen_ai.response.id"] == "resp_123"
+ assert hook.spans[0].ended_at is not None
+
+
+def test_observability__build_create_span_tags_uses_agent_name_and_model() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_abc",
+ model="gpt-4o-mini",
+ agent_reference={"name": "agent-one", "version": "v1"},
+ conversation_id=None,
+ stream=False,
+ )
+ tags = build_create_span_tags(ctx)
+
+ assert tags["service.name"] == "azure.ai.agentserver"
+ assert tags["gen_ai.operation.name"] == "invoke_agent"
+ assert tags["gen_ai.provider.name"] == "AzureAI Hosted Agents"
+ assert tags["gen_ai.response.id"] == "resp_abc"
+ assert tags["gen_ai.request.model"] == "gpt-4o-mini"
+ assert tags["gen_ai.agent.name"] == "agent-one"
+ assert tags["gen_ai.agent.id"] == "agent-one:v1"
+ assert tags["gen_ai.agent.version"] == "v1"
+
+
+def test_observability__build_create_span_tags_includes_conversation_id() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_xyz",
+ model="gpt-4o",
+ agent_reference=None,
+ conversation_id="conv_123",
+ stream=False,
+ )
+ tags = build_create_span_tags(ctx)
+
+ assert tags["gen_ai.conversation.id"] == "conv_123"
+ assert "gen_ai.agent.version" not in tags
+
+
+def test_observability__build_create_span_tags_omits_conversation_id_when_absent() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_xyz",
+ model="gpt-4o",
+ agent_reference=None,
+ conversation_id=None,
+ stream=False,
+ )
+ tags = build_create_span_tags(ctx)
+
+ assert "gen_ai.conversation.id" not in tags
+
+
+def test_observability__build_create_span_tags_omits_agent_version_when_absent() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_xyz",
+ model="gpt-4o",
+ agent_reference={"name": "my-agent"},
+ conversation_id=None,
+ stream=False,
+ )
+ tags = build_create_span_tags(ctx)
+
+ assert tags["gen_ai.agent.name"] == "my-agent"
+ assert tags["gen_ai.agent.id"] == ""
+ assert "gen_ai.agent.version" not in tags
+
+
+def test_observability__build_create_span_tags_includes_request_id() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_r",
+ model="gpt-4o",
+ agent_reference=None,
+ conversation_id=None,
+ stream=False,
+ )
+ tags = build_create_span_tags(ctx, request_id="req-1")
+
+ assert tags["request.id"] == "req-1"
+
+
+def test_observability__build_create_span_tags_omits_request_id_when_absent() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_r",
+ model="gpt-4o",
+ agent_reference=None,
+ conversation_id=None,
+ stream=False,
+ )
+ tags = build_create_span_tags(ctx)
+
+ assert "request.id" not in tags
+
+
+# ---------------------------------------------------------------------------
+# extract_request_id
+# ---------------------------------------------------------------------------
+
+
+def test_observability__extract_request_id_returns_value_from_header() -> None:
+ assert extract_request_id({"x-request-id": "req-abc"}) == "req-abc"
+
+
+def test_observability__extract_request_id_truncates_to_256_chars() -> None:
+ long_id = "z" * 300
+ result = extract_request_id({"x-request-id": long_id})
+ assert result == "z" * 256
+
+
+def test_observability__extract_request_id_returns_none_when_absent() -> None:
+ assert extract_request_id({}) is None
+
+
+# ---------------------------------------------------------------------------
+# build_create_otel_attrs
+# ---------------------------------------------------------------------------
+
+
+def test_observability__build_create_otel_attrs_includes_all_fields() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_1",
+ model="gpt-4o",
+ agent_reference={"name": "bot", "version": "2.0"},
+ conversation_id="conv_x",
+ stream=False,
+ )
+ attrs = build_create_otel_attrs(ctx, request_id="req-1")
+
+ assert attrs["gen_ai.response.id"] == "resp_1"
+ assert attrs["service.name"] == "azure.ai.agentserver"
+ assert attrs["gen_ai.provider.name"] == "AzureAI Hosted Agents"
+ assert attrs["gen_ai.operation.name"] == "invoke_agent"
+ assert attrs["gen_ai.request.model"] == "gpt-4o"
+ assert attrs["gen_ai.conversation.id"] == "conv_x"
+ assert attrs["gen_ai.agent.name"] == "bot"
+ assert attrs["gen_ai.agent.id"] == "bot:2.0"
+ assert attrs["gen_ai.agent.version"] == "2.0"
+ assert attrs["request.id"] == "req-1"
+
+
+def test_observability__build_create_otel_attrs_omits_optional_fields_when_absent() -> None:
+ ctx = SimpleNamespace(
+ response_id="resp_2",
+ model=None,
+ agent_reference=None,
+ conversation_id=None,
+ stream=False,
+ )
+ attrs = build_create_otel_attrs(ctx, request_id=None)
+
+ assert "gen_ai.conversation.id" not in attrs
+ assert "gen_ai.agent.name" not in attrs
+ assert attrs["gen_ai.agent.id"] == ""
+ assert "gen_ai.agent.version" not in attrs
+ assert "request.id" not in attrs
+ assert attrs["gen_ai.request.model"] == ""
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_options.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_options.py
new file mode 100644
index 000000000000..175598a46eec
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_options.py
@@ -0,0 +1,72 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for server options behavior."""
+
+from __future__ import annotations
+
+import pytest
+
+from azure.ai.agentserver.responses._options import ResponsesServerOptions
+
+
+def test_options__defaults_match_public_contract() -> None:
+ options = ResponsesServerOptions()
+
+ assert options.default_fetch_history_count == 100
+ assert options.default_model is None
+ assert options.additional_server_version is None
+ assert options.sse_keep_alive_enabled is False
+
+
+def test_options__environment_values_override_defaults() -> None:
+ options = ResponsesServerOptions.from_env(
+ {
+ "DEFAULT_FETCH_HISTORY_ITEM_COUNT": "42",
+ }
+ )
+
+ assert options.default_fetch_history_count == 42
+
+
+def test_options__sse_keep_alive_set_via_constructor() -> None:
+ """SSE keep-alive is set explicitly via the constructor, not from_env()."""
+ options = ResponsesServerOptions(sse_keep_alive_interval_seconds=12)
+ assert options.sse_keep_alive_interval_seconds == 12
+ assert options.sse_keep_alive_enabled is True
+
+
+def test_options__invalid_boundary_values_fail_fast() -> None:
+ with pytest.raises(ValueError):
+ ResponsesServerOptions(default_fetch_history_count=0)
+
+ with pytest.raises(ValueError):
+ ResponsesServerOptions(sse_keep_alive_interval_seconds=0)
+
+ with pytest.raises(ValueError):
+ ResponsesServerOptions.from_env({"DEFAULT_FETCH_HISTORY_ITEM_COUNT": "-1"})
+
+
+def test_options__spec_environment_variable_names_are_supported() -> None:
+ """Verify the spec-aligned env var names."""
+ options = ResponsesServerOptions.from_env(
+ {
+ "DEFAULT_FETCH_HISTORY_ITEM_COUNT": "55",
+ }
+ )
+
+ assert options.default_fetch_history_count == 55
+ # SSE keep-alive is resolved from AgentConfig, not from_env()
+ assert options.sse_keep_alive_interval_seconds is None
+
+
+def test_options__legacy_environment_variable_names_are_ignored() -> None:
+ """Old prefixed names must NOT be picked up."""
+ options = ResponsesServerOptions.from_env(
+ {
+ "AZURE_AI_RESPONSES_SERVER_DEFAULT_FETCH_HISTORY_ITEM_COUNT": "42",
+ }
+ )
+
+ assert options.default_model is None
+ assert options.default_fetch_history_count == 100
+ assert options.sse_keep_alive_interval_seconds is None
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_context_input_items.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_context_input_items.py
new file mode 100644
index 000000000000..84b4e0d11557
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_context_input_items.py
@@ -0,0 +1,368 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for ResponseContext.get_input_items() item-reference resolution."""
+
+from __future__ import annotations
+
+from typing import Any
+from unittest.mock import AsyncMock
+
+import pytest
+
+from azure.ai.agentserver.responses._response_context import IsolationContext, ResponseContext
+from azure.ai.agentserver.responses.models._generated import (
+ CreateResponse,
+ Item,
+ ItemMessage,
+ ItemReferenceParam,
+ MessageContentInputTextContent,
+ MessageRole,
+ OutputItemMessage,
+)
+from azure.ai.agentserver.responses.models._helpers import to_output_item
+from azure.ai.agentserver.responses.models.runtime import ResponseModeFlags
+
+
+def _mode_flags() -> ResponseModeFlags:
+ return ResponseModeFlags(stream=True, store=True, background=False)
+
+
+def _mock_provider(**overrides: Any) -> Any:
+ """Create a mock provider with default stubs."""
+ provider = AsyncMock()
+ provider.get_items = AsyncMock(return_value=overrides.get("get_items_return", []))
+ return provider
+
+
+def _make_request(inp: Any) -> CreateResponse:
+ """Build a minimal CreateResponse with the given input."""
+ return CreateResponse(model="test-model", input=inp)
+
+
+# ------------------------------------------------------------------
+# Basic: no references — items pass through as-is
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__no_references_passes_through() -> None:
+ """Inline items are returned as Item subtypes (ItemMessage)."""
+ msg = ItemMessage(role=MessageRole.USER, content=[MessageContentInputTextContent(text="hello")])
+ request = _make_request([msg])
+ ctx = ResponseContext(
+ response_id="resp_001",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[msg],
+ )
+
+ items = await ctx.get_input_items()
+
+ assert len(items) == 1
+ assert isinstance(items[0], ItemMessage)
+ assert isinstance(items[0], Item)
+ assert items[0].role == MessageRole.USER
+
+
+# ------------------------------------------------------------------
+# Reference resolution: single reference
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__resolves_single_reference() -> None:
+ """A single ItemReferenceParam is resolved and converted to an Item subtype."""
+ ref = ItemReferenceParam(id="item_abc")
+ resolved_item = OutputItemMessage(id="item_abc", role="assistant", content=[], status="completed")
+ provider = _mock_provider(get_items_return=[resolved_item])
+
+ request = _make_request([ref])
+ ctx = ResponseContext(
+ response_id="resp_002",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[ref],
+ provider=provider,
+ )
+
+ items = await ctx.get_input_items()
+
+ assert len(items) == 1
+ # Resolved via to_item(): OutputItemMessage → ItemMessage
+ assert isinstance(items[0], ItemMessage)
+ assert items[0].role == "assistant"
+ provider.get_items.assert_awaited_once_with(["item_abc"], isolation=ctx.isolation)
+
+
+# ------------------------------------------------------------------
+# Reference resolution: mixed inline + references
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__mixed_inline_and_references() -> None:
+ """Inline items and references are interleaved; references are resolved in-place."""
+ inline_msg = ItemMessage(role=MessageRole.USER, content=[MessageContentInputTextContent(text="hi")])
+ ref1 = ItemReferenceParam(id="item_111")
+ ref2 = ItemReferenceParam(id="item_222")
+ resolved1 = OutputItemMessage(id="item_111", role="assistant", content=[], status="completed")
+ resolved2 = OutputItemMessage(id="item_222", role="user", content=[], status="completed")
+ provider = _mock_provider(get_items_return=[resolved1, resolved2])
+
+ request = _make_request([inline_msg, ref1, ref2])
+ ctx = ResponseContext(
+ response_id="resp_003",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[inline_msg, ref1, ref2],
+ provider=provider,
+ )
+
+ items = await ctx.get_input_items()
+
+ # inline passed through as Item, references resolved via to_item()
+ assert len(items) == 3
+ assert isinstance(items[0], ItemMessage)
+ assert isinstance(items[1], ItemMessage) # resolved from OutputItemMessage
+ assert items[1].role == "assistant"
+ assert isinstance(items[2], ItemMessage) # resolved from OutputItemMessage
+ assert items[2].role == "user"
+
+
+# ------------------------------------------------------------------
+# Unresolvable references are dropped
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__unresolvable_references_dropped() -> None:
+ """References that resolve to None are silently dropped."""
+ ref1 = ItemReferenceParam(id="item_exists")
+ ref2 = ItemReferenceParam(id="item_missing")
+ resolved1 = OutputItemMessage(id="item_exists", role="assistant", content=[], status="completed")
+ provider = _mock_provider(get_items_return=[resolved1, None])
+
+ request = _make_request([ref1, ref2])
+ ctx = ResponseContext(
+ response_id="resp_004",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[ref1, ref2],
+ provider=provider,
+ )
+
+ items = await ctx.get_input_items()
+
+ assert len(items) == 1
+ assert isinstance(items[0], ItemMessage) # resolved via to_item()
+
+
+# ------------------------------------------------------------------
+# No provider — references returned as-is (no resolution)
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__no_provider_no_resolution() -> None:
+ """Without a provider, ItemReferenceParam entries are silently dropped (unresolvable)."""
+ inline_msg = ItemMessage(role=MessageRole.USER, content=[MessageContentInputTextContent(text="hi")])
+ ref = ItemReferenceParam(id="item_xyz")
+
+ request = _make_request([inline_msg, ref])
+ ctx = ResponseContext(
+ response_id="resp_005",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[inline_msg, ref],
+ # no provider
+ )
+
+ items = await ctx.get_input_items()
+
+ # inline item returned as Item subtype; reference placeholder is dropped
+ assert len(items) == 1
+ assert isinstance(items[0], ItemMessage)
+
+
+# ------------------------------------------------------------------
+# Caching: second call returns cached result without re-resolving
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__caches_result() -> None:
+ """Calling get_input_items() twice returns the cached result."""
+ ref = ItemReferenceParam(id="item_cache")
+ resolved = OutputItemMessage(id="item_cache", role="assistant", content=[], status="completed")
+ provider = _mock_provider(get_items_return=[resolved])
+
+ request = _make_request([ref])
+ ctx = ResponseContext(
+ response_id="resp_006",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[ref],
+ provider=provider,
+ )
+
+ first = await ctx.get_input_items()
+ second = await ctx.get_input_items()
+
+ assert first is second
+ # Provider should only be called once
+ assert provider.get_items.await_count == 1
+
+
+# ------------------------------------------------------------------
+# String input is expanded to ItemMessage
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__string_input_expanded() -> None:
+ """A plain string input is normalized to an ItemMessage via get_input_expanded."""
+ request = _make_request("Hello world")
+ ctx = ResponseContext(
+ response_id="resp_007",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=["Hello world"], # type: ignore[list-item]
+ )
+
+ items = await ctx.get_input_items()
+
+ assert len(items) == 1
+ assert isinstance(items[0], ItemMessage)
+ assert items[0].role == MessageRole.USER
+
+
+# ------------------------------------------------------------------
+# Empty input returns empty tuple
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__empty_input() -> None:
+ """Empty input returns an empty tuple."""
+ request = _make_request([])
+ ctx = ResponseContext(
+ response_id="resp_008",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[],
+ )
+
+ items = await ctx.get_input_items()
+
+ assert items == ()
+
+
+# ------------------------------------------------------------------
+# Isolation context is forwarded to provider
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__forwards_isolation() -> None:
+ """Isolation context is passed through to provider.get_items()."""
+ ref = ItemReferenceParam(id="item_iso")
+ resolved = OutputItemMessage(id="item_iso", role="assistant", content=[], status="completed")
+ provider = _mock_provider(get_items_return=[resolved])
+ isolation = IsolationContext(user_key="user_123", chat_key="chat_456")
+
+ request = _make_request([ref])
+ ctx = ResponseContext(
+ response_id="resp_009",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[ref],
+ provider=provider,
+ isolation=isolation,
+ )
+
+ items = await ctx.get_input_items()
+
+ assert len(items) == 1
+ assert isinstance(items[0], ItemMessage) # resolved via to_item()
+ provider.get_items.assert_awaited_once_with(["item_iso"], isolation=isolation)
+
+
+# ------------------------------------------------------------------
+# All references unresolvable — empty result
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__all_references_unresolvable() -> None:
+ """When all references resolve to None, result is empty."""
+ ref1 = ItemReferenceParam(id="item_gone1")
+ ref2 = ItemReferenceParam(id="item_gone2")
+ provider = _mock_provider(get_items_return=[None, None])
+
+ request = _make_request([ref1, ref2])
+ ctx = ResponseContext(
+ response_id="resp_010",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[ref1, ref2],
+ provider=provider,
+ )
+
+ items = await ctx.get_input_items()
+
+ assert items == ()
+
+
+# ------------------------------------------------------------------
+# Order is preserved: inline, resolved ref, inline
+# ------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_get_input_items__preserves_order() -> None:
+ """Order of inline items and resolved references matches input order."""
+ msg1 = ItemMessage(role=MessageRole.USER, content=[MessageContentInputTextContent(text="first")])
+ ref = ItemReferenceParam(id="item_mid")
+ msg2 = ItemMessage(role=MessageRole.USER, content=[MessageContentInputTextContent(text="last")])
+ resolved = OutputItemMessage(id="item_mid", role="assistant", content=[], status="completed")
+ provider = _mock_provider(get_items_return=[resolved])
+
+ request = _make_request([msg1, ref, msg2])
+ ctx = ResponseContext(
+ response_id="resp_011",
+ mode_flags=_mode_flags(),
+ request=request,
+ input_items=[msg1, ref, msg2],
+ provider=provider,
+ )
+
+ items = await ctx.get_input_items()
+
+ assert len(items) == 3
+ assert isinstance(items[0], ItemMessage)
+ assert isinstance(items[1], ItemMessage) # resolved via to_item()
+ assert items[1].role == "assistant"
+ assert isinstance(items[2], ItemMessage)
+
+
+# ------------------------------------------------------------------
+# to_output_item: unit tests for the conversion function
+# ------------------------------------------------------------------
+
+
+def test_to_output_item__converts_item_message() -> None:
+ """ItemMessage is converted to OutputItemMessage with generated ID."""
+ msg = ItemMessage(role=MessageRole.USER, content=[MessageContentInputTextContent(text="hello")])
+ result = to_output_item(msg, "resp_123")
+ assert result is not None
+ assert isinstance(result, OutputItemMessage)
+ assert result.id.startswith("msg_")
+ assert result.status == "completed"
+ assert result.role == MessageRole.USER
+
+
+def test_to_output_item__returns_none_for_reference() -> None:
+ """ItemReferenceParam is non-convertible — returns None."""
+ ref = ItemReferenceParam(id="item_abc")
+ result = to_output_item(ref)
+ assert result is None
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_event_stream_builder.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_event_stream_builder.py
new file mode 100644
index 000000000000..78f388902273
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_event_stream_builder.py
@@ -0,0 +1,246 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for the response event stream APIs."""
+
+from __future__ import annotations
+
+import pytest
+
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.models import _generated as generated_models
+from azure.ai.agentserver.responses.streaming._event_stream import ResponseEventStream
+from azure.ai.agentserver.responses.streaming._state_machine import LifecycleStateMachineError
+
+
+def test_event_stream_builder__builds_lifecycle_events() -> None:
+ stream = ResponseEventStream(
+ response_id="resp_builder_12345",
+ agent_reference={"type": "agent_reference", "name": "unit-agent"},
+ model="gpt-4o-mini",
+ )
+
+ events = [
+ stream.emit_created(status="queued"),
+ stream.emit_in_progress(),
+ stream.emit_completed(),
+ ]
+
+ assert [event["type"] for event in events] == [
+ "response.created",
+ "response.in_progress",
+ "response.completed",
+ ]
+ assert [event["sequence_number"] for event in events] == [0, 1, 2]
+ assert all(event["response"]["response_id"] == "resp_builder_12345" for event in events)
+ assert all(event["response"]["agent_reference"]["name"] == "unit-agent" for event in events)
+
+
+def test_event_stream_builder__builds_output_item_events() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_output_12345")
+ message = stream.add_output_item_message()
+ text = message.add_text_content()
+
+ events = [
+ stream.emit_created(status="queued"),
+ stream.emit_in_progress(),
+ message.emit_added(),
+ text.emit_added(),
+ text.emit_delta("hello"),
+ text.emit_done(),
+ message.emit_content_done(text),
+ message.emit_done(),
+ stream.emit_completed(),
+ ]
+
+ event_types = [event["type"] for event in events]
+ assert "response.output_item.added" in event_types
+ assert "response.output_text.delta" in event_types
+ assert "response.output_item.done" in event_types
+
+
+def test_event_stream_builder__output_item_added_returns_event_immediately() -> None:
+ stream = ResponseEventStream(
+ response_id="resp_builder_incremental_12345",
+ agent_reference={"type": "agent_reference", "name": "unit-agent"},
+ model="gpt-4o-mini",
+ )
+ stream.emit_created(status="queued")
+ stream.emit_in_progress()
+ message = stream.add_output_item_message()
+
+ emitted = message.emit_added()
+
+ assert emitted["type"] == "response.output_item.added"
+ assert emitted["output_index"] == 0
+ assert emitted["item"]["id"] == message.item_id
+ assert emitted["item"]["type"] == "message"
+ # B20/B21: response_id and agent_reference must be stamped on output items
+ assert emitted["item"]["response_id"] == "resp_builder_incremental_12345"
+ assert emitted["item"]["agent_reference"] == {"name": "unit-agent", "type": "agent_reference"}
+ assert emitted["sequence_number"] == 2
+
+
+def test_event_stream_builder__rejects_illegal_output_item_sequence() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_bad_12345")
+ stream.emit_created(status="queued")
+ stream.emit_in_progress()
+ message = stream.add_output_item_message()
+
+ with pytest.raises(ValueError):
+ message.emit_done()
+
+
+def test_event_stream_builder__rejects_invalid_global_stream_order() -> None:
+ with pytest.raises(LifecycleStateMachineError):
+ stream = ResponseEventStream(response_id="resp_builder_bad_order_12345")
+ stream.emit_created(status="queued")
+ stream.emit_in_progress()
+ message = stream.add_output_item_message()
+ text = message.add_text_content()
+ message.emit_added()
+ stream.emit_completed()
+ text.emit_added()
+ text.emit_done()
+ message.emit_content_done(text)
+ message.emit_done()
+
+
+def test_event_stream_builder__emit_completed_accepts_usage_and_sets_terminal_fields() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_completed_params")
+ stream.emit_created(status="in_progress")
+
+ message = stream.add_output_item_message()
+ message.emit_added()
+ text = message.add_text_content()
+ text.emit_added()
+ text.emit_delta("hello")
+ text.emit_done()
+ message.emit_content_done(text)
+ message.emit_done()
+
+ usage = {
+ "input_tokens": 1,
+ "input_tokens_details": {"cached_tokens": 0},
+ "output_tokens": 2,
+ "output_tokens_details": {"reasoning_tokens": 0},
+ "total_tokens": 3,
+ }
+
+ completed = stream.emit_completed(usage=usage)
+
+ assert completed["type"] == "response.completed"
+ assert completed["response"]["status"] == "completed"
+ assert completed["response"]["usage"]["total_tokens"] == 3
+ assert isinstance(completed["response"]["completed_at"], int)
+
+
+def test_event_stream_builder__emit_failed_accepts_error_and_usage() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_failed_params")
+ stream.emit_created(status="in_progress")
+
+ usage = {
+ "input_tokens": 4,
+ "input_tokens_details": {"cached_tokens": 0},
+ "output_tokens": 5,
+ "output_tokens_details": {"reasoning_tokens": 0},
+ "total_tokens": 9,
+ }
+
+ failed = stream.emit_failed(code="server_error", message="boom", usage=usage)
+
+ assert failed["type"] == "response.failed"
+ assert failed["response"]["status"] == "failed"
+ assert failed["response"]["error"]["code"] == "server_error"
+ assert failed["response"]["error"]["message"] == "boom"
+ assert failed["response"]["usage"]["total_tokens"] == 9
+ assert failed["response"].get("completed_at") is None
+
+
+def test_event_stream_builder__emit_incomplete_accepts_reason_and_usage() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_incomplete_params")
+ stream.emit_created(status="in_progress")
+
+ usage = {
+ "input_tokens": 2,
+ "input_tokens_details": {"cached_tokens": 0},
+ "output_tokens": 3,
+ "output_tokens_details": {"reasoning_tokens": 0},
+ "total_tokens": 5,
+ }
+
+ incomplete = stream.emit_incomplete(reason="max_output_tokens", usage=usage)
+
+ assert incomplete["type"] == "response.incomplete"
+ assert incomplete["response"]["status"] == "incomplete"
+ assert incomplete["response"]["incomplete_details"]["reason"] == "max_output_tokens"
+ assert incomplete["response"]["usage"]["total_tokens"] == 5
+ assert incomplete["response"].get("completed_at") is None
+
+
+def test_event_stream_builder__add_output_item_generic_emits_added_and_done() -> None:
+ stream = ResponseEventStream(response_id="resp_builder_generic_item")
+ stream.emit_created(status="in_progress")
+
+ item_id = IdGenerator.new_computer_call_output_item_id("resp_builder_generic_item")
+ builder = stream.add_output_item(item_id)
+ added_item = {
+ "id": item_id,
+ "type": "computer_call_output",
+ "call_id": "call_1",
+ "output": {"type": "computer_screenshot", "image_url": "https://example.com/1.png"},
+ "status": "in_progress",
+ }
+ done_item = {
+ "id": item_id,
+ "type": "computer_call_output",
+ "call_id": "call_1",
+ "output": {"type": "computer_screenshot", "image_url": "https://example.com/2.png"},
+ "status": "completed",
+ }
+
+ added = builder.emit_added(added_item)
+ done = builder.emit_done(done_item)
+
+ assert added["type"] == "response.output_item.added"
+ assert added["output_index"] == 0
+ assert done["type"] == "response.output_item.done"
+ assert done["item"]["status"] == "completed"
+
+
+def test_event_stream_builder__constructor_accepts_seed_response() -> None:
+ seed_response = generated_models.ResponseObject(
+ {
+ "id": "resp_builder_seed_response",
+ "object": "response",
+ "output": [],
+ "model": "gpt-4o-mini",
+ "metadata": {"source": "seed"},
+ }
+ )
+
+ stream = ResponseEventStream(response=seed_response)
+ created = stream.emit_created()
+
+ assert created["response"]["id"] == "resp_builder_seed_response"
+ assert created["response"]["model"] == "gpt-4o-mini"
+ assert created["response"]["metadata"] == {"source": "seed"}
+
+
+def test_event_stream_builder__constructor_accepts_request_seed_fields() -> None:
+ request = generated_models.CreateResponse(
+ {
+ "model": "gpt-4o-mini",
+ "background": True,
+ "metadata": {"tag": "seeded"},
+ "previous_response_id": "resp_prev_seed",
+ }
+ )
+
+ stream = ResponseEventStream(response_id="resp_builder_seed_request", request=request)
+ created = stream.emit_created()
+
+ assert created["response"]["id"] == "resp_builder_seed_request"
+ assert created["response"]["model"] == "gpt-4o-mini"
+ assert created["response"]["background"] is True
+ assert created["response"]["previous_response_id"] == "resp_prev_seed"
+ assert created["response"]["metadata"] == {"tag": "seeded"}
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_execution.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_execution.py
new file mode 100644
index 000000000000..5f8bfcaf9952
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_response_execution.py
@@ -0,0 +1,262 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for ResponseExecution fields, properties, apply_event, and build_cancelled_response."""
+
+from __future__ import annotations
+
+import asyncio
+
+import pytest
+
+from azure.ai.agentserver.responses.models.runtime import (
+ ResponseExecution,
+ ResponseModeFlags,
+ build_cancelled_response,
+)
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_execution(**kwargs) -> ResponseExecution:
+ defaults = dict(
+ response_id="caresp_test000000000000000000000000",
+ mode_flags=ResponseModeFlags(stream=False, store=True, background=False),
+ )
+ defaults.update(kwargs)
+ return ResponseExecution(**defaults)
+
+
+# ---------------------------------------------------------------------------
+# T1 – transition_to valid
+# ---------------------------------------------------------------------------
+
+
+def test_transition_to_valid() -> None:
+ execution = _make_execution(status="queued")
+ execution.transition_to("in_progress")
+ assert execution.status == "in_progress"
+ assert execution.completed_at is None
+
+
+# ---------------------------------------------------------------------------
+# T2 – transition_to terminal sets completed_at
+# ---------------------------------------------------------------------------
+
+
+def test_transition_to_terminal_sets_completed_at() -> None:
+ execution = _make_execution(status="in_progress")
+ execution.transition_to("completed")
+ assert execution.status == "completed"
+ assert execution.completed_at is not None
+
+
+# ---------------------------------------------------------------------------
+# T3 – transition_to invalid raises ValueError
+# ---------------------------------------------------------------------------
+
+
+def test_transition_invalid_raises() -> None:
+ execution = _make_execution(status="completed")
+ with pytest.raises(ValueError, match="invalid status transition: completed -> in_progress"):
+ execution.transition_to("in_progress")
+
+
+# ---------------------------------------------------------------------------
+# T4 – transition_to same status is a no-op that refreshes updated_at
+# ---------------------------------------------------------------------------
+
+
+def test_transition_same_status_noop() -> None:
+ execution = _make_execution(status="in_progress")
+ before = execution.updated_at
+ execution.transition_to("in_progress")
+ assert execution.status == "in_progress"
+ assert execution.updated_at >= before
+
+
+# ---------------------------------------------------------------------------
+# T5 – replay_enabled is True only for bg+stream+store
+# ---------------------------------------------------------------------------
+
+
+def test_replay_enabled_bg_stream_store() -> None:
+ execution = _make_execution(mode_flags=ResponseModeFlags(stream=True, store=True, background=True))
+ assert execution.replay_enabled is True
+
+
+# ---------------------------------------------------------------------------
+# T6 – replay_enabled is False for non-background
+# ---------------------------------------------------------------------------
+
+
+def test_replay_enabled_false_for_non_bg() -> None:
+ execution = _make_execution(mode_flags=ResponseModeFlags(stream=True, store=True, background=False))
+ assert execution.replay_enabled is False
+
+
+# ---------------------------------------------------------------------------
+# T7 – visible_via_get is True when store=True
+# ---------------------------------------------------------------------------
+
+
+def test_visible_via_get_store_true() -> None:
+ execution = _make_execution(mode_flags=ResponseModeFlags(stream=False, store=True, background=False))
+ assert execution.visible_via_get is True
+
+
+# ---------------------------------------------------------------------------
+# T8 – visible_via_get is False when store=False
+# ---------------------------------------------------------------------------
+
+
+def test_visible_via_get_store_false() -> None:
+ execution = _make_execution(mode_flags=ResponseModeFlags(stream=False, store=False, background=False))
+ assert execution.visible_via_get is False
+
+
+# ---------------------------------------------------------------------------
+# T9 – apply_event with response.completed snapshot updates status and response
+# ---------------------------------------------------------------------------
+
+
+def test_apply_event_response_snapshot_updates_status() -> None:
+ execution = _make_execution(status="in_progress")
+
+ events = [
+ {
+ "type": "response.created",
+ "response": {
+ "id": execution.response_id,
+ "response_id": execution.response_id,
+ "agent_reference": {"name": "test-agent"},
+ "object": "response",
+ "status": "queued",
+ "output": [],
+ },
+ },
+ {
+ "type": "response.completed",
+ "response": {
+ "id": execution.response_id,
+ "response_id": execution.response_id,
+ "agent_reference": {"name": "test-agent"},
+ "object": "response",
+ "status": "completed",
+ "output": [],
+ },
+ },
+ ]
+
+ execution.apply_event(events[-1], events)
+
+ assert execution.status == "completed"
+ assert execution.response is not None
+
+
+# ---------------------------------------------------------------------------
+# T10 – apply_event is a no-op when already cancelled
+# ---------------------------------------------------------------------------
+
+
+def test_apply_event_cancelled_is_noop() -> None:
+ execution = _make_execution(status="cancelled")
+
+ events = [
+ {
+ "type": "response.completed",
+ "response": {
+ "id": execution.response_id,
+ "response_id": execution.response_id,
+ "agent_reference": {},
+ "object": "response",
+ "status": "completed",
+ "output": [],
+ },
+ }
+ ]
+ execution.apply_event(events[0], events)
+
+ assert execution.status == "cancelled"
+ assert execution.response is None
+
+
+# ---------------------------------------------------------------------------
+# T11 – apply_event output_item.added appends item
+# ---------------------------------------------------------------------------
+
+
+def test_apply_event_output_item_added() -> None:
+ from azure.ai.agentserver.responses.models._generated import ResponseObject
+
+ execution = _make_execution(status="in_progress")
+ execution.response = ResponseObject(
+ {
+ "id": execution.response_id,
+ "response_id": execution.response_id,
+ "agent_reference": {},
+ "object": "response",
+ "status": "in_progress",
+ "output": [],
+ }
+ )
+
+ item = {"id": "item_1", "type": "text"}
+ event = {"type": "response.output_item.added", "output_index": 0, "item": item}
+ execution.apply_event(event, [event])
+
+ output = execution.response.get("output", [])
+ assert isinstance(output, list)
+ assert len(output) == 1
+ assert output[0]["id"] == "item_1"
+
+
+# ---------------------------------------------------------------------------
+# T12 – build_cancelled_response
+# ---------------------------------------------------------------------------
+
+
+def test_build_cancelled_response() -> None:
+ response = build_cancelled_response(
+ "caresp_xxx0000000000000000000000000000",
+ {"name": "agent-a"},
+ "gpt-4o",
+ )
+ assert response is not None
+ assert response.get("status") == "cancelled"
+ assert response.get("output") == []
+ assert response.get("id") == "caresp_xxx0000000000000000000000000000"
+
+
+# ---------------------------------------------------------------------------
+# Extra – new fields exist with expected defaults
+# ---------------------------------------------------------------------------
+
+
+def test_new_fields_have_correct_defaults() -> None:
+ execution = _make_execution()
+ assert execution.subject is None
+ assert isinstance(execution.cancel_signal, asyncio.Event)
+ assert execution.input_items == []
+ assert execution.previous_response_id is None
+ assert execution.response_context is None
+
+
+def test_input_items_and_previous_response_id_set() -> None:
+ items = [{"id": "i1", "type": "message"}]
+ execution = _make_execution(
+ input_items=items,
+ previous_response_id="caresp_parent00000000000000000000000",
+ )
+ assert execution.input_items == items
+ assert execution.previous_response_id == "caresp_parent00000000000000000000000"
+
+
+def test_input_items_are_independent_copy() -> None:
+ original = [{"id": "i1"}]
+ execution = _make_execution(input_items=original)
+ original.append({"id": "i2"})
+ # The execution's list is the same reference passed in — plan does not require deep copy at construction
+ # just verify the field is correctly set and is a list
+ assert isinstance(execution.input_items, list)
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_responses_provider_parity.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_responses_provider_parity.py
new file mode 100644
index 000000000000..18aea9d233f2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_responses_provider_parity.py
@@ -0,0 +1,32 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Parity checks for provider surface method naming."""
+
+from __future__ import annotations
+
+from azure.ai.agentserver.responses.store._base import ResponseProviderProtocol
+from azure.ai.agentserver.responses.store._memory import InMemoryResponseProvider
+
+
+def test_provider_parity__in_memory_class_name_is_canonical() -> None:
+ provider = InMemoryResponseProvider()
+
+ assert isinstance(provider, InMemoryResponseProvider)
+
+
+def test_provider_parity__interface_name_is_responseproviderprotocol() -> None:
+ provider = InMemoryResponseProvider()
+
+ assert isinstance(provider, ResponseProviderProtocol)
+
+
+def test_provider_parity__surface_methods_exist() -> None:
+ provider = InMemoryResponseProvider()
+
+ assert hasattr(provider, "create_response")
+ assert hasattr(provider, "get_response")
+ assert hasattr(provider, "update_response")
+ assert hasattr(provider, "delete_response")
+ assert hasattr(provider, "get_input_items")
+ assert hasattr(provider, "get_items")
+ assert hasattr(provider, "get_history_item_ids")
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_runtime_state.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_runtime_state.py
new file mode 100644
index 000000000000..57ff645d1fd8
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_runtime_state.py
@@ -0,0 +1,252 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for _RuntimeState with ResponseExecution values."""
+
+from __future__ import annotations
+
+import pytest
+
+from azure.ai.agentserver.responses.hosting._runtime_state import _RuntimeState
+from azure.ai.agentserver.responses.models._generated import ResponseObject
+from azure.ai.agentserver.responses.models.runtime import ResponseExecution, ResponseModeFlags
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_execution(
+ response_id: str,
+ *,
+ store: bool = True,
+ background: bool = False,
+ stream: bool = False,
+ status: str = "queued",
+ input_items: list[dict] | None = None,
+ previous_response_id: str | None = None,
+) -> ResponseExecution:
+ return ResponseExecution(
+ response_id=response_id,
+ mode_flags=ResponseModeFlags(stream=stream, store=store, background=background),
+ status=status, # type: ignore[arg-type]
+ input_items=input_items,
+ previous_response_id=previous_response_id,
+ )
+
+
+# ---------------------------------------------------------------------------
+# T1 – add + get returns the same object
+# ---------------------------------------------------------------------------
+
+
+async def test_add_and_get() -> None:
+ state = _RuntimeState()
+ execution = _make_execution("caresp_aaa0000000000000000000000000000")
+ await state.add(execution)
+ retrieved = await state.get("caresp_aaa0000000000000000000000000000")
+ assert retrieved is execution
+
+
+# ---------------------------------------------------------------------------
+# T2 – get unknown returns None
+# ---------------------------------------------------------------------------
+
+
+async def test_get_nonexistent_returns_none() -> None:
+ state = _RuntimeState()
+ assert await state.get("unknown_id") is None
+
+
+# ---------------------------------------------------------------------------
+# T3 – delete marks deleted; get returns None; is_deleted returns True
+# ---------------------------------------------------------------------------
+
+
+async def test_delete_marks_deleted() -> None:
+ state = _RuntimeState()
+ execution = _make_execution("caresp_bbb0000000000000000000000000000")
+ await state.add(execution)
+
+ result = await state.delete("caresp_bbb0000000000000000000000000000")
+
+ assert result is True
+ assert await state.get("caresp_bbb0000000000000000000000000000") is None
+ assert await state.is_deleted("caresp_bbb0000000000000000000000000000") is True
+
+
+# ---------------------------------------------------------------------------
+# T4 – delete non-existent returns False
+# ---------------------------------------------------------------------------
+
+
+async def test_delete_nonexistent_returns_false() -> None:
+ state = _RuntimeState()
+ assert await state.delete("nonexistent_id") is False
+
+
+# ---------------------------------------------------------------------------
+# T5 – get_input_items single execution (no chain)
+# ---------------------------------------------------------------------------
+
+
+async def test_get_input_items_single() -> None:
+ state = _RuntimeState()
+ items = [{"id": "item_1", "type": "message"}]
+ execution = _make_execution(
+ "caresp_ccc0000000000000000000000000000",
+ input_items=items,
+ previous_response_id=None,
+ )
+ await state.add(execution)
+
+ result = await state.get_input_items("caresp_ccc0000000000000000000000000000")
+ assert result == items
+
+
+# ---------------------------------------------------------------------------
+# T6 – get_input_items chain walk (parent items come first)
+# ---------------------------------------------------------------------------
+
+
+async def test_get_input_items_chain_walk() -> None:
+ state = _RuntimeState()
+ parent_id = "caresp_parent000000000000000000000000"
+ child_id = "caresp_child0000000000000000000000000"
+
+ parent = _make_execution(parent_id, input_items=[{"id": "a"}])
+ child = _make_execution(child_id, input_items=[{"id": "b"}], previous_response_id=parent_id)
+
+ await state.add(parent)
+ await state.add(child)
+
+ result = await state.get_input_items(child_id)
+ ids = [item["id"] for item in result]
+ assert ids == ["a", "b"]
+
+
+# ---------------------------------------------------------------------------
+# T7 – get_input_items on deleted response raises ValueError
+# ---------------------------------------------------------------------------
+
+
+async def test_get_input_items_deleted_raises_value_error() -> None:
+ state = _RuntimeState()
+ execution = _make_execution("caresp_ddd0000000000000000000000000000")
+ await state.add(execution)
+ await state.delete("caresp_ddd0000000000000000000000000000")
+
+ with pytest.raises(ValueError, match="deleted"):
+ await state.get_input_items("caresp_ddd0000000000000000000000000000")
+
+
+# ---------------------------------------------------------------------------
+# T8 – to_snapshot with response set returns dict with required fields
+# ---------------------------------------------------------------------------
+
+
+def test_to_snapshot_with_response() -> None:
+ rid = "caresp_eee0000000000000000000000000000"
+ execution = _make_execution(rid, status="completed")
+ execution.response = ResponseObject(
+ {
+ "id": rid,
+ "response_id": rid,
+ "agent_reference": {"name": "test-agent"},
+ "object": "response",
+ "status": "completed",
+ "output": [],
+ }
+ )
+
+ snapshot = _RuntimeState.to_snapshot(execution)
+
+ assert isinstance(snapshot, dict)
+ assert snapshot["status"] == "completed"
+ assert snapshot["id"] == rid
+ assert snapshot["response_id"] == rid
+
+
+# ---------------------------------------------------------------------------
+# T9 – to_snapshot with no response returns minimal dict for queued state
+# ---------------------------------------------------------------------------
+
+
+def test_to_snapshot_queued_no_response() -> None:
+ rid = "caresp_fff0000000000000000000000000000"
+ execution = _make_execution(rid, status="queued")
+ # execution.response is None
+
+ snapshot = _RuntimeState.to_snapshot(execution)
+
+ assert snapshot["id"] == rid
+ assert snapshot["response_id"] == rid
+ assert snapshot["object"] == "response"
+ assert snapshot["status"] == "queued"
+
+
+# ---------------------------------------------------------------------------
+# Extra: to_snapshot status field overrides response payload status
+# ---------------------------------------------------------------------------
+
+
+def test_to_snapshot_status_matches_execution_status() -> None:
+ """to_snapshot should authoritative-stamp status from execution.status."""
+ rid = "caresp_ggg0000000000000000000000000000"
+ execution = _make_execution(rid, status="in_progress")
+ # Give a response that says completed but execution.status says in_progress
+ execution.response = ResponseObject({"id": rid, "status": "completed", "output": []})
+
+ snapshot = _RuntimeState.to_snapshot(execution)
+
+ assert snapshot["status"] == "in_progress"
+
+
+# ---------------------------------------------------------------------------
+# Extra: to_snapshot injects id/response_id defaults when missing from response
+# ---------------------------------------------------------------------------
+
+
+def test_to_snapshot_injects_defaults_when_response_missing_ids() -> None:
+ rid = "caresp_hhh0000000000000000000000000000"
+ execution = _make_execution(rid, status="completed")
+ # Response without id/response_id
+ execution.response = ResponseObject({"status": "completed", "output": []})
+
+ snapshot = _RuntimeState.to_snapshot(execution)
+
+ assert snapshot["id"] == rid
+ assert snapshot["response_id"] == rid
+ assert snapshot["object"] == "response"
+
+
+# ---------------------------------------------------------------------------
+# Extra: list_records returns all stored executions
+# ---------------------------------------------------------------------------
+
+
+async def test_list_records_returns_all() -> None:
+ state = _RuntimeState()
+ e1 = _make_execution("caresp_iii0000000000000000000000000000")
+ e2 = _make_execution("caresp_jjj0000000000000000000000000000")
+ await state.add(e1)
+ await state.add(e2)
+
+ records = await state.list_records()
+ assert len(records) == 2
+ ids = {r.response_id for r in records}
+ assert ids == {"caresp_iii0000000000000000000000000000", "caresp_jjj0000000000000000000000000000"}
+
+
+# ---------------------------------------------------------------------------
+# T1 (Task 7.1) – _ExecutionRecord is no longer exported from _runtime_state
+# ---------------------------------------------------------------------------
+
+
+def test_import_does_not_expose_execution_record() -> None:
+ """_ExecutionRecord was deleted in Task 7.1; the module must not export it."""
+ import importlib
+
+ mod = importlib.import_module("azure.ai.agentserver.responses.hosting._runtime_state")
+ assert not hasattr(mod, "_ExecutionRecord"), (
+ "_ExecutionRecord should have been removed from _runtime_state in Phase 7 / Task 7.1"
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_session_and_response_id_resolution.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_session_and_response_id_resolution.py
new file mode 100644
index 000000000000..ee137f237b2c
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_session_and_response_id_resolution.py
@@ -0,0 +1,276 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for B38 (Response ID Resolution) and B39 (Session ID Resolution)."""
+
+from __future__ import annotations
+
+import uuid
+
+import pytest
+
+from azure.ai.agentserver.responses._id_generator import IdGenerator
+from azure.ai.agentserver.responses.hosting._request_parsing import (
+ _resolve_identity_fields,
+ _resolve_session_id,
+)
+from azure.ai.agentserver.responses.streaming._internals import (
+ apply_common_defaults,
+)
+
+# ---------------------------------------------------------------------------
+# Minimal stub for parsed CreateResponse
+# ---------------------------------------------------------------------------
+
+
+class _FakeParsed:
+ """Minimal stub matching the CreateResponse model interface."""
+
+ def __init__(self, **kwargs):
+ for key, value in kwargs.items():
+ setattr(self, key, value)
+ if not hasattr(self, "agent_reference"):
+ self.agent_reference = None
+ if not hasattr(self, "conversation"):
+ self.conversation = None
+ if not hasattr(self, "previous_response_id"):
+ self.previous_response_id = None
+
+ def as_dict(self):
+ d = {}
+ for key in ("response_id", "agent_reference", "conversation", "previous_response_id", "agent_session_id"):
+ if hasattr(self, key):
+ d[key] = getattr(self, key)
+ return d
+
+
+# ===================================================================
+# B38: Response ID Resolution
+# ===================================================================
+
+
+class TestResponseIdResolution:
+ """Tests for B38 — x-agent-response-id header override."""
+
+ def test_header_overrides_generated_id(self):
+ """B38: header value is used as response_id when present."""
+ # Generate a valid response_id to use as header value
+ valid_id = IdGenerator.new_response_id()
+ parsed = _FakeParsed()
+ response_id, _ = _resolve_identity_fields(
+ parsed,
+ request_headers={"x-agent-response-id": valid_id},
+ )
+ assert response_id == valid_id
+
+ def test_header_overrides_payload_response_id(self):
+ """B38: header takes precedence over payload response_id."""
+ header_id = IdGenerator.new_response_id()
+ payload_id = IdGenerator.new_response_id()
+ parsed = _FakeParsed(response_id=payload_id)
+ response_id, _ = _resolve_identity_fields(
+ parsed,
+ request_headers={"x-agent-response-id": header_id},
+ )
+ assert response_id == header_id
+
+ def test_empty_header_falls_back_to_generated(self):
+ """B38: empty header causes library to generate a response ID."""
+ parsed = _FakeParsed()
+ response_id, _ = _resolve_identity_fields(
+ parsed,
+ request_headers={"x-agent-response-id": ""},
+ )
+ assert response_id.startswith("caresp_")
+
+ def test_absent_header_falls_back_to_generated(self):
+ """B38: absent header causes library to generate a response ID."""
+ parsed = _FakeParsed()
+ response_id, _ = _resolve_identity_fields(
+ parsed,
+ request_headers={},
+ )
+ assert response_id.startswith("caresp_")
+
+ def test_no_headers_arg_falls_back_to_generated(self):
+ """B38: no headers kwarg causes library to generate a response ID."""
+ parsed = _FakeParsed()
+ response_id, _ = _resolve_identity_fields(parsed)
+ assert response_id.startswith("caresp_")
+
+ def test_payload_response_id_used_when_no_header(self):
+ """B38: payload response_id is used when header is absent."""
+ payload_id = IdGenerator.new_response_id()
+ parsed = _FakeParsed(response_id=payload_id)
+ response_id, _ = _resolve_identity_fields(
+ parsed,
+ request_headers={},
+ )
+ assert response_id == payload_id
+
+ def test_invalid_header_format_raises(self):
+ """B38: malformed header value is rejected by response ID validation."""
+ parsed = _FakeParsed()
+ with pytest.raises(Exception):
+ # "bad-id" doesn't conform to the caresp_ prefix + partition key + entropy format
+ _resolve_identity_fields(
+ parsed,
+ request_headers={"x-agent-response-id": "bad-id"},
+ )
+
+
+# ===================================================================
+# B39: Session ID Resolution
+# ===================================================================
+
+
+class TestSessionIdResolution:
+ """Tests for B39 — agent_session_id priority chain."""
+
+ def test_payload_field_highest_priority(self):
+ """B39 P1: request.agent_session_id payload field wins."""
+ parsed = _FakeParsed(agent_session_id="session-from-payload")
+ result = _resolve_session_id(parsed, {"agent_session_id": "session-from-raw-payload"})
+ assert result == "session-from-payload"
+
+ def test_raw_payload_used_when_model_missing_field(self):
+ """B39 P1: raw payload dict is used if parsed model lacks the field."""
+ parsed = _FakeParsed() # no agent_session_id attr
+ result = _resolve_session_id(parsed, {"agent_session_id": "session-from-raw"})
+ assert result == "session-from-raw"
+
+ def test_env_var_second_priority(self):
+ """B39 P2: env_session_id (from AgentConfig) when no payload field."""
+ parsed = _FakeParsed()
+ result = _resolve_session_id(parsed, {}, env_session_id="env-session-123")
+ assert result == "env-session-123"
+
+ def test_generated_uuid_third_priority(self):
+ """B39 P3: generated UUID when no payload field or env_session_id."""
+ parsed = _FakeParsed()
+ result = _resolve_session_id(parsed, {})
+ # Should be a valid UUID
+ uuid.UUID(result) # raises ValueError if invalid
+
+ def test_payload_overrides_env_var(self):
+ """B39: payload field takes precedence over env_session_id."""
+ parsed = _FakeParsed(agent_session_id="payload-session")
+ result = _resolve_session_id(parsed, {}, env_session_id="env-session")
+ assert result == "payload-session"
+
+ def test_empty_payload_falls_to_env(self):
+ """B39: empty/whitespace payload field falls through to env_session_id."""
+ parsed = _FakeParsed(agent_session_id=" ")
+ result = _resolve_session_id(parsed, {}, env_session_id="env-fallback")
+ assert result == "env-fallback"
+
+ def test_empty_env_falls_to_uuid(self):
+ """B39: empty env_session_id falls through to generated UUID."""
+ parsed = _FakeParsed()
+ result = _resolve_session_id(parsed, {}, env_session_id=" ")
+ uuid.UUID(result)
+
+
+# ===================================================================
+# B39: Session ID stamping on response.* events
+# ===================================================================
+
+
+class TestSessionIdStamping:
+ """Tests for B39 — agent_session_id auto-stamping on response events."""
+
+ def test_session_id_stamped_on_response_created(self):
+ """B39: agent_session_id is stamped on response.created event."""
+ events = [
+ {"type": "response.created", "response": {"id": "resp_1", "status": "in_progress"}},
+ ]
+ apply_common_defaults(
+ events,
+ response_id="resp_1",
+ agent_reference=None,
+ model=None,
+ agent_session_id="test-session",
+ )
+ assert events[0]["response"]["agent_session_id"] == "test-session"
+
+ def test_session_id_stamped_on_response_completed(self):
+ """B39: agent_session_id is stamped on response.completed event."""
+ events = [
+ {"type": "response.completed", "response": {"id": "resp_1", "status": "completed"}},
+ ]
+ apply_common_defaults(
+ events,
+ response_id="resp_1",
+ agent_reference=None,
+ model=None,
+ agent_session_id="completed-session",
+ )
+ assert events[0]["response"]["agent_session_id"] == "completed-session"
+
+ def test_session_id_forcibly_stamped_overrides_handler(self):
+ """B39: agent_session_id is forcibly set, even if handler sets it."""
+ events = [
+ {
+ "type": "response.created",
+ "response": {"id": "resp_1", "agent_session_id": "handler-set"},
+ },
+ ]
+ apply_common_defaults(
+ events,
+ response_id="resp_1",
+ agent_reference=None,
+ model=None,
+ agent_session_id="library-resolved",
+ )
+ # Library-resolved session ID overrides handler-set value
+ assert events[0]["response"]["agent_session_id"] == "library-resolved"
+
+ def test_no_session_id_when_none(self):
+ """B39: no stamping when agent_session_id is None."""
+ events = [
+ {"type": "response.created", "response": {"id": "resp_1"}},
+ ]
+ apply_common_defaults(
+ events,
+ response_id="resp_1",
+ agent_reference=None,
+ model=None,
+ agent_session_id=None,
+ )
+ assert "agent_session_id" not in events[0]["response"]
+
+ def test_non_lifecycle_events_not_stamped(self):
+ """B39: non-response.* events are not stamped."""
+ events = [
+ {"type": "response.output_text.delta", "delta": "hello"},
+ ]
+ apply_common_defaults(
+ events,
+ response_id="resp_1",
+ agent_reference=None,
+ model=None,
+ agent_session_id="should-not-appear",
+ )
+ assert "agent_session_id" not in events[0]
+
+ def test_session_id_stamped_on_all_lifecycle_types(self):
+ """B39: stamped on all response.* lifecycle event types."""
+ lifecycle_types = [
+ "response.queued",
+ "response.created",
+ "response.in_progress",
+ "response.completed",
+ "response.failed",
+ "response.incomplete",
+ ]
+ for event_type in lifecycle_types:
+ events = [{"type": event_type, "response": {"id": "resp_1"}}]
+ apply_common_defaults(
+ events,
+ response_id="resp_1",
+ agent_reference=None,
+ model=None,
+ agent_session_id="all-types-session",
+ )
+ assert events[0]["response"]["agent_session_id"] == "all-types-session", (
+ f"Missing agent_session_id on {event_type}"
+ )
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_sse_writer.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_sse_writer.py
new file mode 100644
index 000000000000..259063f82960
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_sse_writer.py
@@ -0,0 +1,61 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for SSE encoding helpers."""
+
+from __future__ import annotations
+
+from azure.ai.agentserver.responses.streaming import _sse
+
+
+class _FakeEvent:
+ def __init__(self, type: str, sequence_number: int, text: str) -> None:
+ self.type = type
+ self.sequence_number = sequence_number
+ self.text = text
+
+
+def test_sse_writer__encodes_event_and_data_lines_with_separator() -> None:
+ event = _FakeEvent(type="response.created", sequence_number=0, text="hello")
+
+ encoded = _sse.encode_sse_event(event) # type: ignore[arg-type]
+ assert encoded.startswith("event: response.created\n")
+ assert "data:" in encoded
+ assert encoded.endswith("\n\n")
+
+
+def test_sse_writer__encodes_multiline_text_as_single_data_line() -> None:
+ event = _FakeEvent(type="response.output_text.delta", sequence_number=1, text="line1\nline2")
+
+ encoded = _sse.encode_sse_event(event) # type: ignore[arg-type]
+ # Spec requires a single data: line with JSON payload — no extra data: lines
+ assert encoded.count("data: ") == 1
+ assert "data: line1" not in encoded
+ assert r"line1\nline2" in encoded
+
+
+def test_sse_writer__keep_alive_comment_frame_format() -> None:
+ keep_alive_frame = _sse.encode_keep_alive_comment() # type: ignore[attr-defined]
+ assert keep_alive_frame == ": keep-alive\n\n"
+
+
+def test_sse_writer__injects_monotonic_sequence_numbers() -> None:
+ import json as _json
+
+ _sse.new_stream_counter()
+
+ first_event = _FakeEvent(type="response.created", sequence_number=-1, text="a")
+ second_event = _FakeEvent(type="response.in_progress", sequence_number=-1, text="b")
+
+ encoded_first = _sse.encode_sse_event(first_event) # type: ignore[arg-type]
+ encoded_second = _sse.encode_sse_event(second_event) # type: ignore[arg-type]
+
+ def _extract_sequence_number(encoded: str) -> int:
+ data_line = next(line for line in encoded.splitlines() if line.startswith("data:"))
+ payload = _json.loads(data_line[len("data:") :].strip())
+ return int(payload["sequence_number"])
+
+ seq_first = _extract_sequence_number(encoded_first)
+ seq_second = _extract_sequence_number(encoded_second)
+
+ assert seq_first == 0, f"first sequence_number must be 0 for a new stream, got {seq_first}"
+ assert seq_second == 1, f"second sequence_number must be 1 for a new stream, got {seq_second}"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validation.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validation.py
new file mode 100644
index 000000000000..3a62ff1cc23e
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validation.py
@@ -0,0 +1,53 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Unit tests for validation helpers."""
+
+from __future__ import annotations
+
+import pytest
+
+from azure.ai.agentserver.responses.hosting._validation import (
+ parse_create_response,
+ to_api_error_response,
+ validate_create_response,
+)
+from azure.ai.agentserver.responses.models.errors import RequestValidationError
+
+
+class _FakeCreateRequest:
+ def __init__(
+ self,
+ store: bool | None = True,
+ background: bool = False,
+ stream: bool | None = False,
+ stream_options: object | None = None,
+ model: str | None = "gpt-4o-mini",
+ ) -> None:
+ self.store = store
+ self.background = background
+ self.stream = stream
+ self.stream_options = stream_options
+ self.model = model
+
+
+def test_validation__non_object_payload_returns_invalid_request() -> None:
+ with pytest.raises(RequestValidationError) as exc_info:
+ parse_create_response(["not", "an", "object"]) # type: ignore[arg-type]
+
+ assert exc_info.value.code == "invalid_request"
+
+
+def test_validation__cross_field_stream_options_requires_stream_flag() -> None:
+ request = _FakeCreateRequest(stream=False, stream_options={"foo": "bar"})
+
+ with pytest.raises(RequestValidationError) as exc_info:
+ validate_create_response(request) # type: ignore[arg-type]
+
+ assert exc_info.value.param == "stream"
+
+
+def test_validation__unexpected_exception_maps_to_bad_request_category() -> None:
+ error = ValueError("bad payload")
+ envelope = to_api_error_response(error)
+
+ assert envelope.error.type == "invalid_request_error"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_emitter.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_emitter.py
new file mode 100644
index 000000000000..fd5f91f165e2
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_emitter.py
@@ -0,0 +1,303 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Tests for validator emitter behavior."""
+
+from __future__ import annotations
+
+import re
+from types import ModuleType
+
+from scripts.validator_emitter import build_validator_module
+
+
+def _load_module(code: str) -> ModuleType:
+ module = ModuleType("generated_validators")
+ exec(code, module.__dict__)
+ return module
+
+
+def test_emitter_generates_required_property_check() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "required": ["model"],
+ "properties": {"model": {"type": "string"}},
+ }
+ }
+ module = _load_module(build_validator_module(schemas, ["CreateResponse"]))
+ errors = module.validate_CreateResponse({})
+ assert any(e["path"] == "$.model" and "missing" in e["message"].lower() for e in errors)
+
+
+def test_emitter_generates_class_without_schema_definition() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "required": ["model"],
+ "properties": {"model": {"type": "string"}},
+ }
+ }
+ code = build_validator_module(schemas, ["CreateResponse"])
+ assert "class CreateResponseValidator" in code
+ assert "\nSCHEMAS =" not in code
+
+
+def test_emitter_uses_generated_enum_values_when_available() -> None:
+ schemas = {
+ "OpenAI.ToolType": {
+ "anyOf": [
+ {"type": "string"},
+ {"type": "string", "enum": ["function", "file_search"]},
+ ]
+ }
+ }
+ code = build_validator_module(schemas, ["OpenAI.ToolType"])
+ assert "_enum_values('ToolType')" in code
+
+
+def test_emitter_deduplicates_string_union_error_message() -> None:
+ schemas = {
+ "OpenAI.InputItemType": {
+ "anyOf": [
+ {"type": "string"},
+ {"type": "string", "enum": ["message", "item_reference"]},
+ ]
+ }
+ }
+
+ module = _load_module(build_validator_module(schemas, ["OpenAI.InputItemType"]))
+ errors = module.validate_OpenAI_InputItemType(123)
+ assert errors
+ assert errors[0]["path"] == "$"
+ assert "InputItemType" in errors[0]["message"]
+ assert "got integer" in errors[0]["message"].lower()
+ assert "string, string" not in errors[0]["message"]
+
+
+def test_emitter_generates_nullable_handling() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {"instructions": {"type": "string", "nullable": True}},
+ }
+ }
+ module = _load_module(build_validator_module(schemas, ["CreateResponse"]))
+ assert module.validate_CreateResponse({"instructions": None}) == []
+
+
+def test_emitter_generates_primitive_type_checks_and_enum_literal() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {
+ "model": {"type": "string", "enum": ["gpt-4o", "gpt-4.1"]},
+ "temperature": {"type": "number"},
+ "stream": {"type": "boolean"},
+ },
+ }
+ }
+ module = _load_module(build_validator_module(schemas, ["CreateResponse"]))
+ errors = module.validate_CreateResponse({"model": "bad", "temperature": "hot", "stream": "yes"})
+ assert any(e["path"] == "$.model" and "allowed" in e["message"].lower() for e in errors)
+ assert any(e["path"] == "$.temperature" and "number" in e["message"].lower() for e in errors)
+ assert any(e["path"] == "$.stream" and "boolean" in e["message"].lower() for e in errors)
+
+
+def test_emitter_generates_nested_delegate_calls() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {"metadata": {"$ref": "#/components/schemas/Metadata"}},
+ },
+ "Metadata": {
+ "type": "object",
+ "required": ["id"],
+ "properties": {"id": {"type": "string"}},
+ },
+ }
+ module = _load_module(build_validator_module(schemas, ["CreateResponse"]))
+ errors = module.validate_CreateResponse({"metadata": {}})
+ assert any(e["path"] == "$.metadata.id" for e in errors)
+
+
+def test_emitter_generates_union_kind_check_for_oneof_anyof() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {
+ "tool_choice": {
+ "anyOf": [
+ {"type": "string"},
+ {"$ref": "#/components/schemas/ToolChoiceParam"},
+ ]
+ }
+ },
+ },
+ "ToolChoiceParam": {
+ "type": "object",
+ "required": ["type"],
+ "properties": {"type": {"type": "string"}},
+ },
+ }
+ module = _load_module(build_validator_module(schemas, ["CreateResponse"]))
+ errors = module.validate_CreateResponse({"tool_choice": 123})
+ assert any(e["path"] == "$.tool_choice" and "expected one of" in e["message"].lower() for e in errors)
+
+
+def test_emitter_validates_create_response_input_property() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {
+ "input": {
+ "anyOf": [
+ {"type": "string"},
+ {
+ "type": "array",
+ "items": {"$ref": "#/components/schemas/InputItem"},
+ },
+ ]
+ }
+ },
+ },
+ "InputItem": {
+ "type": "object",
+ "required": ["type"],
+ "properties": {"type": {"type": "string"}},
+ },
+ }
+
+ module = _load_module(build_validator_module(schemas, ["CreateResponse"]))
+
+ # Invalid input kind should fail the CreateResponse.input union check.
+ invalid_errors = module.validate_CreateResponse({"input": 123})
+ assert any(e["path"] == "$.input" and "expected one of" in e["message"].lower() for e in invalid_errors)
+
+ # Supported input kinds should pass.
+ assert module.validate_CreateResponse({"input": "hello"}) == []
+ assert module.validate_CreateResponse({"input": [{"type": "message"}]}) == []
+
+
+def test_emitter_generates_discriminator_dispatch() -> None:
+ schemas = {
+ "Tool": {
+ "type": "object",
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "function": "#/components/schemas/FunctionTool",
+ },
+ },
+ "properties": {"type": {"type": "string"}},
+ },
+ "FunctionTool": {
+ "type": "object",
+ "required": ["name"],
+ "properties": {
+ "type": {"type": "string"},
+ "name": {"type": "string"},
+ },
+ },
+ }
+ module = _load_module(build_validator_module(schemas, ["Tool"]))
+ errors = module.validate_Tool({"type": "function"})
+ assert any(e["path"] == "$.name" and "missing" in e["message"].lower() for e in errors)
+
+
+def test_emitter_generates_default_discriminator_fallback() -> None:
+ """When defaultValue is set on a discriminator, absent 'type' uses the default."""
+ schemas = {
+ "Item": {
+ "type": "object",
+ "required": ["type"],
+ "discriminator": {
+ "propertyName": "type",
+ "defaultValue": "message",
+ "mapping": {
+ "message": "#/components/schemas/ItemMessage",
+ "function_call": "#/components/schemas/ItemFunctionCall",
+ },
+ },
+ "properties": {"type": {"type": "string"}},
+ },
+ "ItemMessage": {
+ "type": "object",
+ "required": ["role"],
+ "properties": {
+ "type": {"type": "string"},
+ "role": {"type": "string"},
+ },
+ },
+ "ItemFunctionCall": {
+ "type": "object",
+ "required": ["name"],
+ "properties": {
+ "type": {"type": "string"},
+ "name": {"type": "string"},
+ },
+ },
+ }
+ module = _load_module(build_validator_module(schemas, ["Item"]))
+
+ # No type → defaults to "message", validates against ItemMessage
+ errors = module.validate_Item({"role": "user"})
+ # Should NOT get "Required discriminator 'type' is missing" or "Required property 'type' is missing"
+ assert not any("type" in e.get("path", "") and "missing" in e.get("message", "").lower() for e in errors)
+ # Should validate as message — role present, so no errors
+ assert errors == []
+
+ # No type, missing required "role" → defaults to "message" but fails message validation
+ errors_no_role = module.validate_Item({"content": "hi"})
+ assert not any("type" in e.get("path", "") and "missing" in e.get("message", "").lower() for e in errors_no_role)
+ assert any(e["path"] == "$.role" and "missing" in e["message"].lower() for e in errors_no_role)
+
+ # Explicit type still works
+ errors_fc = module.validate_Item({"type": "function_call"})
+ assert any(e["path"] == "$.name" and "missing" in e["message"].lower() for e in errors_fc)
+
+
+def test_emitter_generates_array_and_map_checks() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {
+ "tools": {
+ "type": "array",
+ "items": {"$ref": "#/components/schemas/Tool"},
+ },
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {"type": "string"},
+ },
+ },
+ },
+ "Tool": {
+ "type": "object",
+ "required": ["name"],
+ "properties": {"name": {"type": "string"}},
+ },
+ }
+ module = _load_module(build_validator_module(schemas, ["CreateResponse"]))
+ errors = module.validate_CreateResponse({"tools": [{}], "metadata": {"a": 1}})
+ assert any(e["path"] == "$.tools[0].name" for e in errors)
+ assert any(e["path"] == "$.metadata.a" for e in errors)
+
+
+def test_emitter_uses_descriptive_helper_function_names() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {
+ "model": {"type": "string"},
+ "metadata": {
+ "type": "object",
+ "additionalProperties": {"type": "string"},
+ },
+ },
+ }
+ }
+
+ code = build_validator_module(schemas, ["CreateResponse"])
+ assert "_validate_CreateResponse_model" in code
+ assert "_validate_CreateResponse_metadata" in code
+ assert re.search(r"_validate_branch_\d+", code) is None
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_generator_contract.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_generator_contract.py
new file mode 100644
index 000000000000..cdc6953a4533
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_generator_contract.py
@@ -0,0 +1,195 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Tests for validator generator contract behavior."""
+
+from __future__ import annotations
+
+import subprocess
+import sys
+from pathlib import Path
+
+
+def _script_path() -> Path:
+ return Path(__file__).resolve().parents[2] / "scripts" / "generate_validators.py"
+
+
+def _minimal_spec() -> str:
+ return """{
+ "paths": {
+ "/responses": {
+ "post": {
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreateResponse"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "components": {
+ "schemas": {
+ "CreateResponse": {
+ "type": "object",
+ "required": ["model"],
+ "properties": {
+ "model": {"type": "string"}
+ }
+ }
+ }
+ }
+}
+"""
+
+
+def test_generator_requires_cli_args() -> None:
+ proc = subprocess.run(
+ [sys.executable, str(_script_path())],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+ assert proc.returncode != 0
+ assert "--input" in proc.stderr
+ assert "--output" in proc.stderr
+
+
+def test_generated_file_has_autogen_header(tmp_path: Path) -> None:
+ spec_path = tmp_path / "spec.json"
+ out_path = tmp_path / "_validators.py"
+ spec_path.write_text(_minimal_spec(), encoding="utf-8")
+
+ proc = subprocess.run(
+ [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "CreateResponse",
+ ],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+
+ assert proc.returncode == 0, proc.stderr
+ content = out_path.read_text(encoding="utf-8")
+ assert content.startswith("# pylint: disable=line-too-long,useless-suppression,too-many-lines")
+ assert "# Code generated by Microsoft (R) Python Code Generator." in content
+
+
+def test_generation_is_deterministic_for_same_input(tmp_path: Path) -> None:
+ spec_path = tmp_path / "spec.json"
+ out_path = tmp_path / "_validators.py"
+ spec_path.write_text(_minimal_spec(), encoding="utf-8")
+
+ cmd = [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "CreateResponse",
+ ]
+
+ first = subprocess.run(cmd, capture_output=True, text=True, check=False)
+ assert first.returncode == 0, first.stderr
+ first_output = out_path.read_text(encoding="utf-8")
+
+ second = subprocess.run(cmd, capture_output=True, text=True, check=False)
+ assert second.returncode == 0, second.stderr
+ second_output = out_path.read_text(encoding="utf-8")
+
+ assert first_output == second_output
+
+
+def test_overlay_removes_required_field(tmp_path: Path) -> None:
+ """--overlay required:[] removes all required checks from the named schema."""
+ spec_path = tmp_path / "spec.json"
+ out_path = tmp_path / "_validators.py"
+ overlay_path = tmp_path / "overlay.yaml"
+
+ spec_path.write_text(_minimal_spec(), encoding="utf-8")
+ # overlay removes model from required
+ overlay_path.write_text("schemas:\n CreateResponse:\n required: []\n", encoding="utf-8")
+
+ proc = subprocess.run(
+ [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "CreateResponse",
+ "--overlay",
+ str(overlay_path),
+ ],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+ assert proc.returncode == 0, proc.stderr
+ content = out_path.read_text(encoding="utf-8")
+ # "Required property 'model'" must not appear when required is cleared
+ assert "Required property 'model'" not in content
+
+
+def test_overlay_not_required_marks_field_nullable(tmp_path: Path) -> None:
+ """--overlay not_required removes a field from required and adds a None guard."""
+ spec_path = tmp_path / "spec.json"
+ out_path = tmp_path / "_validators.py"
+ overlay_path = tmp_path / "overlay.yaml"
+
+ spec_path.write_text(
+ """{
+ "paths": {},
+ "components": {
+ "schemas": {
+ "Item": {
+ "type": "object",
+ "required": ["type", "role"],
+ "properties": {
+ "type": {"type": "string"},
+ "role": {"type": "string"}
+ }
+ }
+ }
+ }
+}""",
+ encoding="utf-8",
+ )
+ overlay_path.write_text("schemas:\n Item:\n not_required:\n - type\n", encoding="utf-8")
+
+ proc = subprocess.run(
+ [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "Item",
+ "--overlay",
+ str(overlay_path),
+ ],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+ assert proc.returncode == 0, proc.stderr
+ content = out_path.read_text(encoding="utf-8")
+ # 'type' must NOT appear as a required property check
+ assert "Required property 'type'" not in content
+ # 'role' must still be required
+ assert "Required property 'role'" in content
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_generator_e2e.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_generator_e2e.py
new file mode 100644
index 000000000000..adaffd242655
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_generator_e2e.py
@@ -0,0 +1,194 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""End-to-end tests for validator generator CLI output."""
+
+from __future__ import annotations
+
+import importlib.util
+import subprocess
+import sys
+from pathlib import Path
+
+
+def _script_path() -> Path:
+ return Path(__file__).resolve().parents[2] / "scripts" / "generate_validators.py"
+
+
+def _spec() -> str:
+ return """{
+ "paths": {
+ "/responses": {
+ "post": {
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreateResponse"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "components": {
+ "schemas": {
+ "CreateResponse": {
+ "type": "object",
+ "required": ["model"],
+ "properties": {
+ "model": {"type": "string"},
+ "metadata": {"$ref": "#/components/schemas/Metadata"}
+ }
+ },
+ "Metadata": {
+ "type": "object",
+ "additionalProperties": {"type": "string"}
+ }
+ }
+ }
+}
+"""
+
+
+def test_generator_emits_valid_python_module(tmp_path: Path) -> None:
+ spec_path = tmp_path / "spec.json"
+ out_path = tmp_path / "_validators.py"
+ spec_path.write_text(_spec(), encoding="utf-8")
+
+ proc = subprocess.run(
+ [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "CreateResponse",
+ ],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+ assert proc.returncode == 0, proc.stderr
+
+ source = out_path.read_text(encoding="utf-8")
+ compile(source, str(out_path), "exec")
+
+
+def test_generated_module_exposes_expected_validate_functions(tmp_path: Path) -> None:
+ spec_path = tmp_path / "spec.json"
+ out_path = tmp_path / "_validators.py"
+ spec_path.write_text(_spec(), encoding="utf-8")
+
+ proc = subprocess.run(
+ [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "CreateResponse",
+ ],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+ assert proc.returncode == 0, proc.stderr
+
+ module_name = "generated_validator_module"
+ spec = importlib.util.spec_from_file_location(module_name, out_path)
+ assert spec is not None and spec.loader is not None
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ assert hasattr(module, "validate_CreateResponse")
+
+
+def test_regeneration_overwrites_previous_output_cleanly(tmp_path: Path) -> None:
+ spec_path = tmp_path / "spec.json"
+ out_path = tmp_path / "_validators.py"
+ spec_path.write_text(_spec(), encoding="utf-8")
+
+ out_path.write_text("stale-content", encoding="utf-8")
+
+ proc = subprocess.run(
+ [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "CreateResponse",
+ ],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+ assert proc.returncode == 0, proc.stderr
+
+ content = out_path.read_text(encoding="utf-8")
+ assert "stale-content" not in content
+ assert content.startswith("# pylint: disable=line-too-long,useless-suppression,too-many-lines")
+
+
+def test_generator_handles_inline_create_response_schema(tmp_path: Path) -> None:
+ spec_path = tmp_path / "spec-inline.json"
+ out_path = tmp_path / "_validators.py"
+ spec_path.write_text(
+ """{
+ "paths": {
+ "/responses": {
+ "post": {
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "anyOf": [
+ {
+ "type": "object",
+ "required": ["model"],
+ "properties": {
+ "model": {"type": "string"}
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "components": {
+ "schemas": {}
+ }
+}
+""",
+ encoding="utf-8",
+ )
+
+ proc = subprocess.run(
+ [
+ sys.executable,
+ str(_script_path()),
+ "--input",
+ str(spec_path),
+ "--output",
+ str(out_path),
+ "--root-schemas",
+ "CreateResponse",
+ ],
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+ assert proc.returncode == 0, proc.stderr
+ content = out_path.read_text(encoding="utf-8")
+ assert "def _validate_CreateResponse(" in content
+ assert "class CreateResponseValidator" in content
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_schema_walker.py b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_schema_walker.py
new file mode 100644
index 000000000000..1d5b27234743
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/tests/unit/test_validator_schema_walker.py
@@ -0,0 +1,230 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+"""Tests for OpenAPI schema walker behavior used by validator generation."""
+
+from __future__ import annotations
+
+from scripts.validator_schema_walker import SchemaWalker, discover_post_request_roots, resolve_ref
+
+
+def test_resolve_ref_extracts_schema_name() -> None:
+ assert resolve_ref("#/components/schemas/CreateResponse") == "CreateResponse"
+
+
+def test_schema_walker_collects_reachable_from_root_schema() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {
+ "metadata": {"$ref": "#/components/schemas/Metadata"},
+ },
+ },
+ "Metadata": {
+ "type": "object",
+ "properties": {"id": {"type": "string"}},
+ },
+ }
+
+ walker = SchemaWalker(schemas)
+ walker.walk("CreateResponse")
+
+ assert "CreateResponse" in walker.reachable
+ assert "Metadata" in walker.reachable
+
+
+def test_schema_walker_discovers_inline_post_request_schema() -> None:
+ spec = {
+ "paths": {
+ "/responses": {
+ "post": {
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreateResponse",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ assert discover_post_request_roots(spec) == ["CreateResponse"]
+
+
+def test_schema_walker_handles_oneof_anyof_ref_branches() -> None:
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "properties": {
+ "input": {
+ "oneOf": [
+ {"$ref": "#/components/schemas/InputText"},
+ {"$ref": "#/components/schemas/InputImage"},
+ ]
+ },
+ "tool_choice": {
+ "anyOf": [
+ {"type": "string"},
+ {"$ref": "#/components/schemas/ToolChoiceParam"},
+ ]
+ },
+ },
+ },
+ "InputText": {"type": "string"},
+ "InputImage": {"type": "object", "properties": {"url": {"type": "string"}}},
+ "ToolChoiceParam": {"type": "object", "properties": {"type": {"type": "string"}}},
+ }
+
+ walker = SchemaWalker(schemas)
+ walker.walk("CreateResponse")
+
+ assert "InputText" in walker.reachable
+ assert "InputImage" in walker.reachable
+ assert "ToolChoiceParam" in walker.reachable
+
+
+def test_schema_walker_follows_discriminator_mapping_refs() -> None:
+ """Discriminator mapping targets must be walked so overlay can be applied to them."""
+ schemas = {
+ "Item": {
+ "type": "object",
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "message": "#/components/schemas/ItemMessage",
+ "tool_call": "#/components/schemas/ToolCall",
+ },
+ },
+ },
+ "ItemMessage": {
+ "type": "object",
+ "required": ["type", "role", "content"],
+ "properties": {
+ "type": {"type": "string"},
+ "role": {"type": "string"},
+ "content": {"type": "string"},
+ },
+ },
+ "ToolCall": {
+ "type": "object",
+ "required": ["type", "name"],
+ "properties": {
+ "type": {"type": "string"},
+ "name": {"type": "string"},
+ },
+ },
+ }
+
+ walker = SchemaWalker(schemas)
+ walker.walk("Item")
+
+ assert "ItemMessage" in walker.reachable
+ assert "ToolCall" in walker.reachable
+
+
+def test_schema_walker_applies_overlay_required_replacement() -> None:
+ """overlay required: [] removes all required fields from a schema."""
+ schemas = {
+ "CreateResponse": {
+ "type": "object",
+ "required": ["model"],
+ "properties": {"model": {"type": "string"}},
+ }
+ }
+ overlay = {"schemas": {"CreateResponse": {"required": []}}}
+
+ walker = SchemaWalker(schemas, overlay=overlay)
+ walker.walk("CreateResponse")
+
+ assert walker.reachable["CreateResponse"].get("required") == []
+
+
+def test_schema_walker_applies_overlay_not_required() -> None:
+ """overlay not_required removes a field from required and marks it nullable."""
+ schemas = {
+ "ItemMessage": {
+ "type": "object",
+ "required": ["type", "role", "content"],
+ "properties": {
+ "type": {"type": "string"},
+ "role": {"type": "string"},
+ "content": {"type": "string"},
+ },
+ }
+ }
+ overlay = {"schemas": {"ItemMessage": {"not_required": ["type"]}}}
+
+ walker = SchemaWalker(schemas, overlay=overlay)
+ walker.walk("ItemMessage")
+
+ schema = walker.reachable["ItemMessage"]
+ assert "type" not in schema["required"]
+ assert schema["properties"]["type"].get("nullable") is True
+ # role and content remain required
+ assert "role" in schema["required"]
+ assert "content" in schema["required"]
+
+
+def test_schema_walker_applies_overlay_property_constraints() -> None:
+ """overlay properties: merges per-property constraint overrides."""
+ schemas = {
+ "Config": {
+ "type": "object",
+ "properties": {"temperature": {"type": "number"}},
+ }
+ }
+ overlay = {"schemas": {"Config": {"properties": {"temperature": {"minimum": 0, "maximum": 2}}}}}
+
+ walker = SchemaWalker(schemas, overlay=overlay)
+ walker.walk("Config")
+
+ prop = walker.reachable["Config"]["properties"]["temperature"]
+ assert prop["minimum"] == 0
+ assert prop["maximum"] == 2
+
+
+def test_schema_walker_overlay_matches_vendor_prefixed_schema_by_bare_name() -> None:
+ """Overlay keys like 'ItemMessage' must match 'OpenAI.ItemMessage' in the spec."""
+ schemas = {
+ "OpenAI.ItemMessage": {
+ "type": "object",
+ "required": ["type", "role"],
+ "properties": {
+ "type": {"type": "string"},
+ "role": {"type": "string"},
+ },
+ }
+ }
+ overlay = {"schemas": {"ItemMessage": {"not_required": ["type"]}}}
+
+ walker = SchemaWalker(schemas, overlay=overlay)
+ walker.walk("OpenAI.ItemMessage")
+
+ schema = walker.reachable["OpenAI.ItemMessage"]
+ assert "type" not in schema["required"]
+ assert schema["properties"]["type"].get("nullable") is True
+
+
+def test_schema_walker_applies_overlay_default_discriminator() -> None:
+ """Overlay default_discriminator injects defaultValue into the discriminator dict."""
+ schemas = {
+ "OpenAI.Item": {
+ "type": "object",
+ "required": ["type"],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {"message": "#/components/schemas/ItemMessage"},
+ },
+ "properties": {"type": {"type": "string"}},
+ }
+ }
+ overlay = {"schemas": {"Item": {"default_discriminator": "message"}}}
+
+ walker = SchemaWalker(schemas, overlay=overlay)
+ walker.walk("OpenAI.Item")
+
+ disc = walker.reachable["OpenAI.Item"]["discriminator"]
+ assert disc["defaultValue"] == "message"
diff --git a/sdk/agentserver/azure-ai-agentserver-responses/type_spec/tsp-location.yaml b/sdk/agentserver/azure-ai-agentserver-responses/type_spec/tsp-location.yaml
new file mode 100644
index 000000000000..f4c8a845e0b5
--- /dev/null
+++ b/sdk/agentserver/azure-ai-agentserver-responses/type_spec/tsp-location.yaml
@@ -0,0 +1,11 @@
+directory: specification/ai-foundry/data-plane/Foundry/src/sdk-service-agentserver-contracts
+commit: d5cf570f983359ce3045f08715ae862455e1d501
+repo: Azure/azure-rest-api-specs
+emitterPackageJsonPath: eng/emitter-package.json
+entrypointFile: client.tsp
+additionalDirectories:
+ - specification/ai-foundry/data-plane/Foundry/src/openai-responses
+ - specification/ai-foundry/data-plane/Foundry/src/openai-conversations
+ - specification/ai-foundry/data-plane/Foundry/src/tools
+ - specification/ai-foundry/data-plane/Foundry/src/common
+ - specification/ai-foundry/data-plane/Foundry/src/memory-stores
diff --git a/sdk/agentserver/ci.yml b/sdk/agentserver/ci.yml
index 330f12c491ae..25459a58f2d1 100644
--- a/sdk/agentserver/ci.yml
+++ b/sdk/agentserver/ci.yml
@@ -3,27 +3,27 @@
trigger:
branches:
include:
- - main
- - hotfix/*
- - release/*
- - restapi*
+ - main
+ - hotfix/*
+ - release/*
+ - restapi*
paths:
include:
- - sdk/agentserver/
- - sdk/core/
+ - sdk/agentserver/
+ - sdk/core/
pr:
branches:
include:
- - main
- - feature/*
- - hotfix/*
- - release/*
- - restapi*
+ - main
+ - feature/*
+ - hotfix/*
+ - release/*
+ - restapi*
paths:
include:
- - sdk/agentserver/
- - sdk/core/
+ - sdk/agentserver/
+ - sdk/core/
extends:
template: /eng/pipelines/templates/stages/archetype-sdk-client.yml
@@ -32,7 +32,7 @@ extends:
TestProxy: true
BuildDocs: true
TestTimeoutInMinutes: 60
- PythonVersionForAnalyze: '3.11'
+ PythonVersionForAnalyze: "3.11"
# The job "Test ubuntu2404_pypy311" in the "python - agentserver" pipeline hangs and eventually times out.
# Disable it until the issue is understood.
MatrixConfigs:
@@ -41,9 +41,11 @@ extends:
Selection: sparse
GenerateVMJobs: true
Artifacts:
- - name: azure-ai-agentserver-invocations
- safeName: azureaiagentserverinvocations
- - name: azure-ai-agentserver-core
- safeName: azureaiagentservercore
- - name: azure-ai-agentserver-githubcopilot
- safeName: azureaiagentservergithubcopilot
+ - name: azure-ai-agentserver-core
+ safeName: azureaiagentservercore
+ - name: azure-ai-agentserver-invocations
+ safeName: azureaiagentserverinvocations
+ - name: azure-ai-agentserver-responses
+ safeName: azureaiagentserverresponses
+ - name: azure-ai-agentserver-githubcopilot
+ safeName: azureaiagentservergithubcopilot