From e417740f7f8a71c2f3414d2c823fac5ed4dcd8f9 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Wed, 28 Jan 2026 17:27:26 +0100 Subject: [PATCH 01/13] feat: add dapr-ext-agent_core Signed-off-by: Casper Nielsen --- ext/dapr-ext-agent-core/LICENSE | 203 ++ ext/dapr-ext-agent-core/README.rst | 22 + .../dapr/ext/agent_core/__init__.py | 14 + .../dapr/ext/agent_core/metadata.py | 277 +++ .../dapr/ext/agent_core/py.typed | 0 .../dapr/ext/agent_core/types.py | 143 ++ .../dapr/ext/agent_core/version.py | 16 + .../schemas/agent-metadata/index.json | 8 + .../schemas/agent-metadata/latest.json | 2018 +++++++++++++++++ .../schemas/agent-metadata/v0.10.6.json | 2018 +++++++++++++++++ .../schemas/agent-metadata/v0.10.7.json | 2018 +++++++++++++++++ .../scripts/generate_schema.py | 114 + ext/dapr-ext-agent-core/setup.cfg | 38 + ext/dapr-ext-agent-core/setup.py | 65 + 14 files changed, 6954 insertions(+) create mode 100644 ext/dapr-ext-agent-core/LICENSE create mode 100644 ext/dapr-ext-agent-core/README.rst create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/py.typed create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py create mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/index.json create mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json create mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json create mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json create mode 100644 ext/dapr-ext-agent-core/scripts/generate_schema.py create mode 100644 ext/dapr-ext-agent-core/setup.cfg create mode 100644 ext/dapr-ext-agent-core/setup.py diff --git a/ext/dapr-ext-agent-core/LICENSE b/ext/dapr-ext-agent-core/LICENSE new file mode 100644 index 000000000..be033a7fd --- /dev/null +++ b/ext/dapr-ext-agent-core/LICENSE @@ -0,0 +1,203 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 The Dapr Authors. + + and others that have contributed code to the public domain. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/README.rst b/ext/dapr-ext-agent-core/README.rst new file mode 100644 index 000000000..16f73bb10 --- /dev/null +++ b/ext/dapr-ext-agent-core/README.rst @@ -0,0 +1,22 @@ +dapr-ext-agent_core extension +======================= + +|pypi| + +.. |pypi| image:: https://badge.fury.io/py/dapr-ext-agent-core.svg + :target: https://pypi.org/project/dapr-ext-agent-core/ + +This is the Dapr Agent Core extension for Dapr for Agents in the Dapr Python SDK. + +Installation +------------ + +:: + + pip install dapr-ext-agent_core + +References +---------- + +* `Dapr `_ +* `Dapr Python-SDK `_ diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py new file mode 100644 index 000000000..e2039e296 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py @@ -0,0 +1,14 @@ +from .types import SupportedFrameworks, AgentMetadataSchema, AgentMetadata, LLMMetadata, PubSubMetadata, ToolMetadata, RegistryMetadata, MemoryMetadata +from .metadata import AgentRegistryAdapter + +__all__ = [ + "SupportedFrameworks", + "AgentMetadataSchema", + "AgentMetadata", + "LLMMetadata", + "PubSubMetadata", + "ToolMetadata", + "RegistryMetadata", + "MemoryMetadata", + "AgentRegistryAdapter", +] \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py new file mode 100644 index 000000000..c2df9e88d --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py @@ -0,0 +1,277 @@ +from __future__ import annotations + +from importlib.metadata import PackageNotFoundError, version +import logging +import random +import time +from typing import Any, Callable, Dict, Optional, Sequence + +from dapr.clients import DaprClient +from dapr.clients.grpc._response import ( + GetMetadataResponse, + RegisteredComponents, +) +from dapr.clients.grpc._state import Concurrency, Consistency + +from dapr_agents.agents.configs import ( + AgentRegistryConfig, +) +from dapr_agents.storage.daprstores.stateservice import ( + StateStoreError, + StateStoreService +) + +from dapr.ext.agent_core import SupportedFrameworks, AgentMetadataSchema +import dapr.ext.agent_core.mapping + +logger = logging.getLogger(__name__) + + +class AgentRegistryAdapter: + def __init__(self, registry: Optional[AgentRegistryConfig], framework: str, agent: Any) -> None: + self._registry = registry + + try: + with DaprClient(http_timeout_seconds=10) as _client: + resp: GetMetadataResponse = _client.get_metadata() + self.appid = resp.application_id + if self._registry is None: + components: Sequence[RegisteredComponents] = resp.registered_components + for component in components: + if ( + "state" in component.type + and component.name == "agent-registry" + ): + self._registry = AgentRegistryConfig( + store=StateStoreService(store_name=component.name), + team_name="default", + ) + except TimeoutError: + logger.warning( + "Dapr sidecar not responding; proceeding without auto-configuration." + ) + + if self._registry is None: + return + + self.registry_state: StateStoreService = self._registry.store + self._registry_prefix: str = "agents:" + self._meta: Dict[str, str] = {"contentType": "application/json"} + self._max_etag_attempts: int = 10 + self._save_options: Dict[str, Any] = { + "concurrency": Concurrency.first_write, + "consistency": Consistency.strong, + } + + if not self._can_handle(framework): + raise ValueError(f"Adapter cannot handle framework '{framework}'") + + _metadata = self._extract_metadata(agent) + + # We need to handle some null values here to avoid issues during registration + if _metadata.agent.appid == "": + _metadata.agent.appid = self.appid or "" + + if _metadata.registry: + if _metadata.registry.name is None: + _metadata.registry.name = self._registry.team_name + if _metadata.registry.statestore is None: + _metadata.registry.statestore = self.registry_state.store_name + + self._register(_metadata) + + def _can_handle(self, framework: str) -> bool: + """Check if this adapter can handle the given Agent.""" + + for fw in SupportedFrameworks: + if framework.lower() == fw.value.lower(): + self._framework = fw + return True + return False + + + def _extract_metadata(self, agent: Any) -> AgentMetadataSchema: + """Extract metadata from the given Agent.""" + + try: + schema_version = version("dapr-ext-agent_core") + except PackageNotFoundError: + schema_version = "edge" + + framework_mappers = { + SupportedFrameworks.DAPR_AGENTS: dapr.ext.agent_core.mapping.DaprAgentsMapper().map_agent_metadata, + SupportedFrameworks.LANGGRAPH: dapr.ext.agent_core.mapping.LangGraphMapper().map_agent_metadata, + } + + mapper = framework_mappers.get(self._framework) + if not mapper: + raise ValueError(f"Adapter cannot handle framework '{self._framework}'") + + return mapper(agent=agent, schema_version=schema_version) + + def _register(self, metadata: AgentMetadataSchema) -> None: + """Register the adapter with the given Agent.""" + """ + Upsert this agent's metadata in the team registry. + + Args: + metadata: Additional metadata to store for this agent. + team: Team override; falls back to configured default team. + """ + if not metadata.registry: + raise ValueError("Registry metadata is required for registration") + + self._upsert_agent_entry( + team=metadata.registry.name, + agent_name=metadata.name, + agent_metadata=metadata.model_dump(), + ) + + def _mutate_registry_entry( + self, + *, + team: Optional[str], + mutator: Callable[[Dict[str, Any]], Optional[Dict[str, Any]]], + max_attempts: Optional[int] = None, + ) -> None: + """ + Apply a mutation to the team registry with optimistic concurrency. + + Args: + team: Team identifier. + mutator: Function that returns the updated registry dict (or None for no-op). + max_attempts: Override for concurrency retries; defaults to init value. + + Raises: + StateStoreError: If the mutation fails after retries due to contention. + """ + if not self.registry_state: + raise RuntimeError( + "registry_state must be provided to mutate the agent registry" + ) + + key = f"agents:{team or 'default'}" + self._meta["partitionKey"] = key + attempts = max_attempts or self._max_etag_attempts + + self._ensure_registry_initialized(key=key, meta=self._meta) + + for attempt in range(1, attempts + 1): + logger.debug(f"Mutating registry entry '{key}', attempt {attempt}/{attempts}") + try: + current, etag = self.registry_state.load_with_etag( + key=key, + default={}, + state_metadata=self._meta, + ) + if not isinstance(current, dict): + current = {} + + updated = mutator(dict(current)) + if updated is None: + return + + self.registry_state.save( + key=key, + value=updated, + etag=etag, + state_metadata=self._meta, + state_options=self._save_options, + ) + logger.debug(f"Successfully mutated registry entry '{key}'") + return + except Exception as exc: # noqa: BLE001 + logger.warning( + "Conflict during registry mutation (attempt %d/%d) for '%s': %s", + attempt, + attempts, + key, + exc, + ) + if attempt == attempts: + raise StateStoreError( + f"Failed to mutate agent registry key '{key}' after {attempts} attempts." + ) from exc + # Jittered backoff to reduce thundering herd during contention. + time.sleep(min(1.0 * attempt, 3.0) * (1 + random.uniform(0, 0.25))) + + def _upsert_agent_entry( + self, + *, + team: Optional[str], + agent_name: str, + agent_metadata: Dict[str, Any], + max_attempts: Optional[int] = None, + ) -> None: + """ + Insert/update a single agent record in the team registry. + + Args: + team: Team identifier. + agent_name: Agent name (key). + agent_metadata: Metadata value to write. + max_attempts: Override retry attempts. + """ + + def mutator(current: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if current.get(agent_name) == agent_metadata: + return None + current[agent_name] = agent_metadata + return current + + logger.debug("Upserting agent '%s' in team '%s' registry", agent_name, team or "default") + self._mutate_registry_entry( + team=team, + mutator=mutator, + max_attempts=max_attempts, + ) + + def _remove_agent_entry( + self, + *, + team: Optional[str], + agent_name: str, + max_attempts: Optional[int] = None, + ) -> None: + """ + Delete a single agent record from the team registry. + + Args: + team: Team identifier. + agent_name: Agent name (key). + max_attempts: Override retry attempts. + """ + + def mutator(current: Dict[str, Any]) -> Optional[Dict[str, Any]]: + if agent_name not in current: + return None + del current[agent_name] + return current + + self._mutate_registry_entry( + team=team, + mutator=mutator, + max_attempts=max_attempts, + ) + + def _ensure_registry_initialized(self, *, key: str, meta: Dict[str, str]) -> None: + """ + Ensure a registry document exists to create an ETag for concurrency control. + + Args: + key: Registry document key. + meta: Dapr state metadata to use for the operation. + """ + current, etag = self.registry_state.load_with_etag( # type: ignore[union-attr] + key=key, + default={}, + state_metadata=meta, + ) + if etag is None: + self.registry_state.save( # type: ignore[union-attr] + key=key, + value={}, + etag=None, + state_metadata=meta, + state_options=self._save_options, + ) diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/py.typed b/ext/dapr-ext-agent-core/dapr/ext/agent_core/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py new file mode 100644 index 000000000..33d0b7773 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py @@ -0,0 +1,143 @@ +from enum import StrEnum +from typing import Any, Dict, List, Optional +from pydantic import BaseModel, Field + +class SupportedFrameworks(StrEnum): + DAPR_AGENTS = "dapr-agents" + LANGGRAPH = "langgraph" + + +class AgentMetadata(BaseModel): + """Metadata about an agent's configuration and capabilities.""" + + appid: str = Field(..., description="Dapr application ID of the agent") + type: str = Field(..., description="Type of the agent (e.g., standalone, durable)") + orchestrator: bool = Field( + False, description="Indicates if the agent is an orchestrator" + ) + role: str = Field(default="", description="Role of the agent") + goal: str = Field(default="", description="High-level objective of the agent") + instructions: Optional[List[str]] = Field( + default=None, description="Instructions for the agent" + ) + statestore: Optional[str] = Field( + default=None, description="Dapr state store component name used by the agent" + ) + system_prompt: Optional[str] = Field( + default=None, description="System prompt guiding the agent's behavior" + ) + + +class PubSubMetadata(BaseModel): + """Pub/Sub configuration information.""" + + name: str = Field(..., description="Pub/Sub component name") + broadcast_topic: Optional[str] = Field( + default=None, description="Pub/Sub topic for broadcasting messages" + ) + agent_topic: Optional[str] = Field( + default=None, description="Pub/Sub topic for direct agent messages" + ) + + +class MemoryMetadata(BaseModel): + """Memory configuration information.""" + + type: str = Field(..., description="Type of memory used by the agent") + statestore: Optional[str] = Field( + default=None, description="Dapr state store component name for memory" + ) + session_id: Optional[str] = Field( + default=None, description="Default session ID for the agent's memory" + ) + + +class LLMMetadata(BaseModel): + """LLM configuration information.""" + + client: str = Field(..., description="LLM client used by the agent") + provider: str = Field(..., description="LLM provider used by the agent") + api: str = Field(default="unknown", description="API type used by the LLM client") + model: str = Field(default="unknown", description="Model name or identifier") + component_name: Optional[str] = Field( + default=None, description="Dapr component name for the LLM client" + ) + base_url: Optional[str] = Field( + default=None, description="Base URL for the LLM API if applicable" + ) + azure_endpoint: Optional[str] = Field( + default=None, description="Azure endpoint if using Azure OpenAI" + ) + azure_deployment: Optional[str] = Field( + default=None, description="Azure deployment name if using Azure OpenAI" + ) + prompt_template: Optional[str] = Field( + default=None, description="Prompt template used by the agent" + ) + + +class ToolMetadata(BaseModel): + """Metadata about a tool available to the agent.""" + + tool_name: str = Field(..., description="Name of the tool") + tool_description: str = Field( + ..., description="Description of the tool's functionality" + ) + tool_args: str = Field(..., description="Arguments for the tool") + + +class RegistryMetadata(BaseModel): + """Registry configuration information.""" + + statestore: Optional[str] = Field( + None, description="Name of the statestore component for the registry" + ) + name: Optional[str] = Field(default=None, description="Name of the team registry") + + +class AgentMetadataSchema(BaseModel): + """Schema for agent metadata including schema version.""" + + schema_version: str = Field( + ..., + description="Version of the schema used for the agent metadata.", + ) + agent: AgentMetadata = Field( + ..., description="Agent configuration and capabilities" + ) + name: str = Field(..., description="Name of the agent") + registered_at: str = Field(..., description="ISO 8601 timestamp of registration") + pubsub: Optional[PubSubMetadata] = Field( + None, description="Pub/sub configuration if enabled" + ) + memory: Optional[MemoryMetadata] = Field( + None, description="Memory configuration if enabled" + ) + llm: Optional[LLMMetadata] = Field(None, description="LLM configuration") + registry: Optional[RegistryMetadata] = Field( + None, description="Registry configuration" + ) + tools: Optional[List[ToolMetadata]] = Field(None, description="Available tools") + max_iterations: Optional[int] = Field( + None, description="Maximum iterations for agent execution" + ) + tool_choice: Optional[str] = Field(None, description="Tool choice strategy") + agent_metadata: Optional[Dict[str, Any]] = Field( + None, description="Additional metadata about the agent" + ) + + @classmethod + def export_json_schema(cls, version: str) -> Dict[str, Any]: + """ + Export the JSON schema with version information. + + Args: + version: The dapr-agents version for this schema + + Returns: + JSON schema dictionary with metadata + """ + schema = cls.model_json_schema() + schema["$schema"] = "https://json-schema.org/draft/2020-12/schema" + schema["version"] = version + return schema \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py new file mode 100644 index 000000000..b81f0d988 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +__version__ = '1.17.0.dev' diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json new file mode 100644 index 000000000..394fae28e --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json @@ -0,0 +1,8 @@ +{ + "current_version": "0.10.7", + "schema_url": "https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json", + "available_versions": [ + "v0.10.7", + "v0.10.6" + ] +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json new file mode 100644 index 000000000..7dea02627 --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json @@ -0,0 +1,2018 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "name": { + "default": "", + "description": "Namememory of the agent", + "title": "Name", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "AzureOpenAIModelConfig": { + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the Azure OpenAI API", + "title": "Api Key" + }, + "azure_ad_token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure Active Directory token for authentication", + "title": "Azure Ad Token" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure organization associated with the OpenAI resource", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure project associated with the OpenAI resource", + "title": "Project" + }, + "api_version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "2024-07-01-preview", + "description": "API version for Azure OpenAI models", + "title": "Api Version" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint for Azure OpenAI models", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment for Azure OpenAI models", + "title": "Azure Deployment" + }, + "azure_client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Client ID for Managed Identity authentication.", + "title": "Azure Client Id" + }, + "type": { + "const": "azure_openai", + "default": "azure_openai", + "description": "Type of the model, must always be 'azure_openai'", + "title": "Type", + "type": "string" + } + }, + "title": "AzureOpenAIModelConfig", + "type": "object" + }, + "HFHubChatCompletionParams": { + "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", + "title": "Model" + }, + "frequency_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their existing frequency in the text so far.", + "title": "Frequency Penalty" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify the likelihood of specified tokens appearing in the completion.", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities of the output tokens or not.", + "title": "Logprobs" + }, + "max_tokens": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 100, + "description": "Maximum number of tokens allowed in the response.", + "title": "Max Tokens" + }, + "n": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "UNUSED. Included for compatibility.", + "title": "N" + }, + "presence_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their presence in the text so far.", + "title": "Presence Penalty" + }, + "response_format": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Grammar constraints. Can be either a JSONSchema or a regex.", + "title": "Response Format" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for reproducible control flow.", + "title": "Seed" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Up to four strings which trigger the end of the response.", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Enable realtime streaming of responses.", + "title": "Stream" + }, + "stream_options": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Options for streaming completions.", + "title": "Stream Options" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Controls randomness of the generations.", + "title": "Temperature" + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of most likely tokens to return at each position.", + "title": "Top Logprobs" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Fraction of the most likely next words to sample from.", + "title": "Top P" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tool to use for the completion. Defaults to 'auto'.", + "title": "Tool Choice" + }, + "tool_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A prompt to be appended before the tools.", + "title": "Tool Prompt" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of tools the model may call.", + "title": "Tools" + } + }, + "title": "HFHubChatCompletionParams", + "type": "object" + }, + "HFHubModelConfig": { + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", + "title": "Model" + }, + "hf_provider": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "auto", + "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", + "title": "Hf Provider" + }, + "token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", + "title": "Token" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Alias for token. Use only one of token or api_key.", + "title": "Api Key" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", + "title": "Base Url" + }, + "timeout": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", + "title": "Timeout" + }, + "headers": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", + "title": "Headers" + }, + "cookies": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra cookies to send with requests.", + "title": "Cookies" + }, + "proxies": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Proxy settings for HTTP requests. Use standard requests format.", + "title": "Proxies" + }, + "bill_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Billing account for requests. Only used for enterprise/organization billing.", + "title": "Bill To" + }, + "type": { + "const": "huggingface", + "default": "huggingface", + "description": "Type of the model, must always be 'huggingface'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through Hugging Face", + "title": "Name", + "type": "string" + } + }, + "title": "HFHubModelConfig", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + }, + "prompty": { + "anyOf": [ + { + "$ref": "#/$defs/Prompty" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompty template name if used" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "NVIDIAChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + } + }, + "title": "NVIDIAChatCompletionParams", + "type": "object" + }, + "NVIDIAModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "https://integrate.api.nvidia.com/v1", + "description": "Base URL for the NVIDIA API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the NVIDIA API", + "title": "Api Key" + }, + "type": { + "const": "nvidia", + "default": "nvidia", + "description": "Type of the model, must always be 'nvidia'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through NVIDIA", + "title": "Name", + "type": "string" + } + }, + "title": "NVIDIAModelConfig", + "type": "object" + }, + "OpenAIChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "response_format": { + "anyOf": [ + { + "additionalProperties": { + "enum": [ + "text", + "json_object" + ], + "type": "string" + }, + "propertyNames": { + "const": "type" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Format of the response", + "title": "Response Format" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which function is called", + "title": "Function Call" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for deterministic sampling", + "title": "Seed" + }, + "user": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Unique identifier representing the end-user", + "title": "User" + } + }, + "title": "OpenAIChatCompletionParams", + "type": "object" + }, + "OpenAIModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the OpenAI API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the OpenAI API", + "title": "Api Key" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Organization name for OpenAI", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "OpenAI project name.", + "title": "Project" + }, + "type": { + "const": "openai", + "default": "openai", + "description": "Type of the model, must always be 'openai'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the OpenAI model", + "title": "Name", + "type": "string" + } + }, + "title": "OpenAIModelConfig", + "type": "object" + }, + "OpenAITextCompletionParams": { + "description": "Specific configs for the text completions endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "best_of": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of best completions to generate", + "title": "Best Of" + }, + "echo": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to echo the prompt", + "title": "Echo" + }, + "logprobs": { + "anyOf": [ + { + "maximum": 5, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include log probabilities", + "title": "Logprobs" + }, + "suffix": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Suffix to append to the prompt", + "title": "Suffix" + } + }, + "title": "OpenAITextCompletionParams", + "type": "object" + }, + "Prompty": { + "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Name of the Prompty file.", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Description of the Prompty file.", + "title": "Description" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "1.0", + "description": "Version of the Prompty.", + "title": "Version" + }, + "authors": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "List of authors for the Prompty.", + "title": "Authors" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "Tags to categorize the Prompty.", + "title": "Tags" + }, + "model": { + "$ref": "#/$defs/PromptyModelConfig", + "description": "Model configuration. Can be either OpenAI or Azure OpenAI." + }, + "inputs": { + "additionalProperties": true, + "default": {}, + "description": "Input parameters for the Prompty. These define the expected inputs.", + "title": "Inputs", + "type": "object" + }, + "sample": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Sample input or the path to a sample file for testing the Prompty.", + "title": "Sample" + }, + "outputs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": {}, + "description": "Optional outputs for the Prompty. Defines expected output format.", + "title": "Outputs" + }, + "content": { + "description": "The prompt messages defined in the Prompty file.", + "title": "Content", + "type": "string" + } + }, + "required": [ + "model", + "content" + ], + "title": "Prompty", + "type": "object" + }, + "PromptyModelConfig": { + "properties": { + "api": { + "default": "chat", + "description": "The API to use, either 'chat' or 'completion'", + "enum": [ + "chat", + "completion" + ], + "title": "Api", + "type": "string" + }, + "configuration": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAIModelConfig" + }, + { + "$ref": "#/$defs/AzureOpenAIModelConfig" + }, + { + "$ref": "#/$defs/HFHubModelConfig" + }, + { + "$ref": "#/$defs/NVIDIAModelConfig" + } + ], + "description": "Model configuration settings", + "title": "Configuration" + }, + "parameters": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAITextCompletionParams" + }, + { + "$ref": "#/$defs/OpenAIChatCompletionParams" + }, + { + "$ref": "#/$defs/HFHubChatCompletionParams" + }, + { + "$ref": "#/$defs/NVIDIAChatCompletionParams" + } + ], + "description": "Parameters for the model request", + "title": "Parameters" + }, + "response": { + "default": "first", + "description": "Determines if full response or just the first one is returned", + "enum": [ + "first", + "full" + ], + "title": "Response", + "type": "string" + } + }, + "required": [ + "configuration", + "parameters" + ], + "title": "PromptyModelConfig", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "agent_name": { + "description": "Pub/Sub topic the agent subscribes to", + "title": "Agent Name", + "type": "string" + }, + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "agent_name", + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "0.10.7" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json new file mode 100644 index 000000000..b9cbb0bb1 --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json @@ -0,0 +1,2018 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "name": { + "default": "", + "description": "Namememory of the agent", + "title": "Name", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "AzureOpenAIModelConfig": { + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the Azure OpenAI API", + "title": "Api Key" + }, + "azure_ad_token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure Active Directory token for authentication", + "title": "Azure Ad Token" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure organization associated with the OpenAI resource", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure project associated with the OpenAI resource", + "title": "Project" + }, + "api_version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "2024-07-01-preview", + "description": "API version for Azure OpenAI models", + "title": "Api Version" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint for Azure OpenAI models", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment for Azure OpenAI models", + "title": "Azure Deployment" + }, + "azure_client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Client ID for Managed Identity authentication.", + "title": "Azure Client Id" + }, + "type": { + "const": "azure_openai", + "default": "azure_openai", + "description": "Type of the model, must always be 'azure_openai'", + "title": "Type", + "type": "string" + } + }, + "title": "AzureOpenAIModelConfig", + "type": "object" + }, + "HFHubChatCompletionParams": { + "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", + "title": "Model" + }, + "frequency_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their existing frequency in the text so far.", + "title": "Frequency Penalty" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify the likelihood of specified tokens appearing in the completion.", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities of the output tokens or not.", + "title": "Logprobs" + }, + "max_tokens": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 100, + "description": "Maximum number of tokens allowed in the response.", + "title": "Max Tokens" + }, + "n": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "UNUSED. Included for compatibility.", + "title": "N" + }, + "presence_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their presence in the text so far.", + "title": "Presence Penalty" + }, + "response_format": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Grammar constraints. Can be either a JSONSchema or a regex.", + "title": "Response Format" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for reproducible control flow.", + "title": "Seed" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Up to four strings which trigger the end of the response.", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Enable realtime streaming of responses.", + "title": "Stream" + }, + "stream_options": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Options for streaming completions.", + "title": "Stream Options" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Controls randomness of the generations.", + "title": "Temperature" + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of most likely tokens to return at each position.", + "title": "Top Logprobs" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Fraction of the most likely next words to sample from.", + "title": "Top P" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tool to use for the completion. Defaults to 'auto'.", + "title": "Tool Choice" + }, + "tool_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A prompt to be appended before the tools.", + "title": "Tool Prompt" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of tools the model may call.", + "title": "Tools" + } + }, + "title": "HFHubChatCompletionParams", + "type": "object" + }, + "HFHubModelConfig": { + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", + "title": "Model" + }, + "hf_provider": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "auto", + "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", + "title": "Hf Provider" + }, + "token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", + "title": "Token" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Alias for token. Use only one of token or api_key.", + "title": "Api Key" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", + "title": "Base Url" + }, + "timeout": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", + "title": "Timeout" + }, + "headers": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", + "title": "Headers" + }, + "cookies": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra cookies to send with requests.", + "title": "Cookies" + }, + "proxies": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Proxy settings for HTTP requests. Use standard requests format.", + "title": "Proxies" + }, + "bill_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Billing account for requests. Only used for enterprise/organization billing.", + "title": "Bill To" + }, + "type": { + "const": "huggingface", + "default": "huggingface", + "description": "Type of the model, must always be 'huggingface'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through Hugging Face", + "title": "Name", + "type": "string" + } + }, + "title": "HFHubModelConfig", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + }, + "prompty": { + "anyOf": [ + { + "$ref": "#/$defs/Prompty" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompty template name if used" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "NVIDIAChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + } + }, + "title": "NVIDIAChatCompletionParams", + "type": "object" + }, + "NVIDIAModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "https://integrate.api.nvidia.com/v1", + "description": "Base URL for the NVIDIA API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the NVIDIA API", + "title": "Api Key" + }, + "type": { + "const": "nvidia", + "default": "nvidia", + "description": "Type of the model, must always be 'nvidia'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through NVIDIA", + "title": "Name", + "type": "string" + } + }, + "title": "NVIDIAModelConfig", + "type": "object" + }, + "OpenAIChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "response_format": { + "anyOf": [ + { + "additionalProperties": { + "enum": [ + "text", + "json_object" + ], + "type": "string" + }, + "propertyNames": { + "const": "type" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Format of the response", + "title": "Response Format" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which function is called", + "title": "Function Call" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for deterministic sampling", + "title": "Seed" + }, + "user": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Unique identifier representing the end-user", + "title": "User" + } + }, + "title": "OpenAIChatCompletionParams", + "type": "object" + }, + "OpenAIModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the OpenAI API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the OpenAI API", + "title": "Api Key" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Organization name for OpenAI", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "OpenAI project name.", + "title": "Project" + }, + "type": { + "const": "openai", + "default": "openai", + "description": "Type of the model, must always be 'openai'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the OpenAI model", + "title": "Name", + "type": "string" + } + }, + "title": "OpenAIModelConfig", + "type": "object" + }, + "OpenAITextCompletionParams": { + "description": "Specific configs for the text completions endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "best_of": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of best completions to generate", + "title": "Best Of" + }, + "echo": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to echo the prompt", + "title": "Echo" + }, + "logprobs": { + "anyOf": [ + { + "maximum": 5, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include log probabilities", + "title": "Logprobs" + }, + "suffix": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Suffix to append to the prompt", + "title": "Suffix" + } + }, + "title": "OpenAITextCompletionParams", + "type": "object" + }, + "Prompty": { + "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Name of the Prompty file.", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Description of the Prompty file.", + "title": "Description" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "1.0", + "description": "Version of the Prompty.", + "title": "Version" + }, + "authors": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "List of authors for the Prompty.", + "title": "Authors" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "Tags to categorize the Prompty.", + "title": "Tags" + }, + "model": { + "$ref": "#/$defs/PromptyModelConfig", + "description": "Model configuration. Can be either OpenAI or Azure OpenAI." + }, + "inputs": { + "additionalProperties": true, + "default": {}, + "description": "Input parameters for the Prompty. These define the expected inputs.", + "title": "Inputs", + "type": "object" + }, + "sample": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Sample input or the path to a sample file for testing the Prompty.", + "title": "Sample" + }, + "outputs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": {}, + "description": "Optional outputs for the Prompty. Defines expected output format.", + "title": "Outputs" + }, + "content": { + "description": "The prompt messages defined in the Prompty file.", + "title": "Content", + "type": "string" + } + }, + "required": [ + "model", + "content" + ], + "title": "Prompty", + "type": "object" + }, + "PromptyModelConfig": { + "properties": { + "api": { + "default": "chat", + "description": "The API to use, either 'chat' or 'completion'", + "enum": [ + "chat", + "completion" + ], + "title": "Api", + "type": "string" + }, + "configuration": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAIModelConfig" + }, + { + "$ref": "#/$defs/AzureOpenAIModelConfig" + }, + { + "$ref": "#/$defs/HFHubModelConfig" + }, + { + "$ref": "#/$defs/NVIDIAModelConfig" + } + ], + "description": "Model configuration settings", + "title": "Configuration" + }, + "parameters": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAITextCompletionParams" + }, + { + "$ref": "#/$defs/OpenAIChatCompletionParams" + }, + { + "$ref": "#/$defs/HFHubChatCompletionParams" + }, + { + "$ref": "#/$defs/NVIDIAChatCompletionParams" + } + ], + "description": "Parameters for the model request", + "title": "Parameters" + }, + "response": { + "default": "first", + "description": "Determines if full response or just the first one is returned", + "enum": [ + "first", + "full" + ], + "title": "Response", + "type": "string" + } + }, + "required": [ + "configuration", + "parameters" + ], + "title": "PromptyModelConfig", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "agent_name": { + "description": "Pub/Sub topic the agent subscribes to", + "title": "Agent Name", + "type": "string" + }, + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "agent_name", + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "0.10.6" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json new file mode 100644 index 000000000..7dea02627 --- /dev/null +++ b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json @@ -0,0 +1,2018 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "name": { + "default": "", + "description": "Namememory of the agent", + "title": "Name", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "AzureOpenAIModelConfig": { + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the Azure OpenAI API", + "title": "Api Key" + }, + "azure_ad_token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure Active Directory token for authentication", + "title": "Azure Ad Token" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure organization associated with the OpenAI resource", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure project associated with the OpenAI resource", + "title": "Project" + }, + "api_version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "2024-07-01-preview", + "description": "API version for Azure OpenAI models", + "title": "Api Version" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint for Azure OpenAI models", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment for Azure OpenAI models", + "title": "Azure Deployment" + }, + "azure_client_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Client ID for Managed Identity authentication.", + "title": "Azure Client Id" + }, + "type": { + "const": "azure_openai", + "default": "azure_openai", + "description": "Type of the model, must always be 'azure_openai'", + "title": "Type", + "type": "string" + } + }, + "title": "AzureOpenAIModelConfig", + "type": "object" + }, + "HFHubChatCompletionParams": { + "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", + "title": "Model" + }, + "frequency_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their existing frequency in the text so far.", + "title": "Frequency Penalty" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify the likelihood of specified tokens appearing in the completion.", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities of the output tokens or not.", + "title": "Logprobs" + }, + "max_tokens": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 100, + "description": "Maximum number of tokens allowed in the response.", + "title": "Max Tokens" + }, + "n": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "UNUSED. Included for compatibility.", + "title": "N" + }, + "presence_penalty": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Penalizes new tokens based on their presence in the text so far.", + "title": "Presence Penalty" + }, + "response_format": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Grammar constraints. Can be either a JSONSchema or a regex.", + "title": "Response Format" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for reproducible control flow.", + "title": "Seed" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Up to four strings which trigger the end of the response.", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Enable realtime streaming of responses.", + "title": "Stream" + }, + "stream_options": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Options for streaming completions.", + "title": "Stream Options" + }, + "temperature": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Controls randomness of the generations.", + "title": "Temperature" + }, + "top_logprobs": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of most likely tokens to return at each position.", + "title": "Top Logprobs" + }, + "top_p": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Fraction of the most likely next words to sample from.", + "title": "Top P" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tool to use for the completion. Defaults to 'auto'.", + "title": "Tool Choice" + }, + "tool_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A prompt to be appended before the tools.", + "title": "Tool Prompt" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "A list of tools the model may call.", + "title": "Tools" + } + }, + "title": "HFHubChatCompletionParams", + "type": "object" + }, + "HFHubModelConfig": { + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", + "title": "Model" + }, + "hf_provider": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "auto", + "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", + "title": "Hf Provider" + }, + "token": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", + "title": "Token" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Alias for token. Use only one of token or api_key.", + "title": "Api Key" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", + "title": "Base Url" + }, + "timeout": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", + "title": "Timeout" + }, + "headers": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", + "title": "Headers" + }, + "cookies": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "description": "Extra cookies to send with requests.", + "title": "Cookies" + }, + "proxies": { + "anyOf": [ + {}, + { + "type": "null" + } + ], + "default": null, + "description": "Proxy settings for HTTP requests. Use standard requests format.", + "title": "Proxies" + }, + "bill_to": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Billing account for requests. Only used for enterprise/organization billing.", + "title": "Bill To" + }, + "type": { + "const": "huggingface", + "default": "huggingface", + "description": "Type of the model, must always be 'huggingface'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through Hugging Face", + "title": "Name", + "type": "string" + } + }, + "title": "HFHubModelConfig", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + }, + "prompty": { + "anyOf": [ + { + "$ref": "#/$defs/Prompty" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompty template name if used" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "NVIDIAChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + } + }, + "title": "NVIDIAChatCompletionParams", + "type": "object" + }, + "NVIDIAModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "https://integrate.api.nvidia.com/v1", + "description": "Base URL for the NVIDIA API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the NVIDIA API", + "title": "Api Key" + }, + "type": { + "const": "nvidia", + "default": "nvidia", + "description": "Type of the model, must always be 'nvidia'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the model available through NVIDIA", + "title": "Name", + "type": "string" + } + }, + "title": "NVIDIAModelConfig", + "type": "object" + }, + "OpenAIChatCompletionParams": { + "description": "Specific settings for the Chat Completion endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "logit_bias": { + "anyOf": [ + { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Modify likelihood of specified tokens", + "title": "Logit Bias" + }, + "logprobs": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to return log probabilities", + "title": "Logprobs" + }, + "top_logprobs": { + "anyOf": [ + { + "maximum": 20, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of top log probabilities to return", + "title": "Top Logprobs" + }, + "n": { + "anyOf": [ + { + "maximum": 128, + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": 1, + "description": "Number of chat completion choices to generate", + "title": "N" + }, + "response_format": { + "anyOf": [ + { + "additionalProperties": { + "enum": [ + "text", + "json_object" + ], + "type": "string" + }, + "propertyNames": { + "const": "type" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Format of the response", + "title": "Response Format" + }, + "tools": { + "anyOf": [ + { + "items": { + "additionalProperties": true, + "type": "object" + }, + "maxItems": 64, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "List of tools the model may call", + "title": "Tools" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which tool is called", + "title": "Tool Choice" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Controls which function is called", + "title": "Function Call" + }, + "seed": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Seed for deterministic sampling", + "title": "Seed" + }, + "user": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Unique identifier representing the end-user", + "title": "User" + } + }, + "title": "OpenAIChatCompletionParams", + "type": "object" + }, + "OpenAIModelConfig": { + "properties": { + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the OpenAI API", + "title": "Base Url" + }, + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key to authenticate the OpenAI API", + "title": "Api Key" + }, + "organization": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Organization name for OpenAI", + "title": "Organization" + }, + "project": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "OpenAI project name.", + "title": "Project" + }, + "type": { + "const": "openai", + "default": "openai", + "description": "Type of the model, must always be 'openai'", + "title": "Type", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the OpenAI model", + "title": "Name", + "type": "string" + } + }, + "title": "OpenAIModelConfig", + "type": "object" + }, + "OpenAITextCompletionParams": { + "description": "Specific configs for the text completions endpoint.", + "properties": { + "model": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "ID of the model to use", + "title": "Model" + }, + "temperature": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0, + "description": "Sampling temperature", + "title": "Temperature" + }, + "max_tokens": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum number of tokens to generate. Can be None or a positive integer.", + "title": "Max Tokens" + }, + "top_p": { + "anyOf": [ + { + "maximum": 1.0, + "minimum": 0.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 1.0, + "description": "Nucleus sampling probability mass", + "title": "Top P" + }, + "frequency_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Frequency penalty", + "title": "Frequency Penalty" + }, + "presence_penalty": { + "anyOf": [ + { + "maximum": 2.0, + "minimum": -2.0, + "type": "number" + }, + { + "type": "null" + } + ], + "default": 0.0, + "description": "Presence penalty", + "title": "Presence Penalty" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Stop sequences", + "title": "Stop" + }, + "stream": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to stream responses", + "title": "Stream" + }, + "best_of": { + "anyOf": [ + { + "minimum": 1, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Number of best completions to generate", + "title": "Best Of" + }, + "echo": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "default": false, + "description": "Whether to echo the prompt", + "title": "Echo" + }, + "logprobs": { + "anyOf": [ + { + "maximum": 5, + "minimum": 0, + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Include log probabilities", + "title": "Logprobs" + }, + "suffix": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Suffix to append to the prompt", + "title": "Suffix" + } + }, + "title": "OpenAITextCompletionParams", + "type": "object" + }, + "Prompty": { + "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Name of the Prompty file.", + "title": "Name" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "", + "description": "Description of the Prompty file.", + "title": "Description" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": "1.0", + "description": "Version of the Prompty.", + "title": "Version" + }, + "authors": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "List of authors for the Prompty.", + "title": "Authors" + }, + "tags": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": [], + "description": "Tags to categorize the Prompty.", + "title": "Tags" + }, + "model": { + "$ref": "#/$defs/PromptyModelConfig", + "description": "Model configuration. Can be either OpenAI or Azure OpenAI." + }, + "inputs": { + "additionalProperties": true, + "default": {}, + "description": "Input parameters for the Prompty. These define the expected inputs.", + "title": "Inputs", + "type": "object" + }, + "sample": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Sample input or the path to a sample file for testing the Prompty.", + "title": "Sample" + }, + "outputs": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": {}, + "description": "Optional outputs for the Prompty. Defines expected output format.", + "title": "Outputs" + }, + "content": { + "description": "The prompt messages defined in the Prompty file.", + "title": "Content", + "type": "string" + } + }, + "required": [ + "model", + "content" + ], + "title": "Prompty", + "type": "object" + }, + "PromptyModelConfig": { + "properties": { + "api": { + "default": "chat", + "description": "The API to use, either 'chat' or 'completion'", + "enum": [ + "chat", + "completion" + ], + "title": "Api", + "type": "string" + }, + "configuration": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAIModelConfig" + }, + { + "$ref": "#/$defs/AzureOpenAIModelConfig" + }, + { + "$ref": "#/$defs/HFHubModelConfig" + }, + { + "$ref": "#/$defs/NVIDIAModelConfig" + } + ], + "description": "Model configuration settings", + "title": "Configuration" + }, + "parameters": { + "anyOf": [ + { + "$ref": "#/$defs/OpenAITextCompletionParams" + }, + { + "$ref": "#/$defs/OpenAIChatCompletionParams" + }, + { + "$ref": "#/$defs/HFHubChatCompletionParams" + }, + { + "$ref": "#/$defs/NVIDIAChatCompletionParams" + } + ], + "description": "Parameters for the model request", + "title": "Parameters" + }, + "response": { + "default": "first", + "description": "Determines if full response or just the first one is returned", + "enum": [ + "first", + "full" + ], + "title": "Response", + "type": "string" + } + }, + "required": [ + "configuration", + "parameters" + ], + "title": "PromptyModelConfig", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "agent_name": { + "description": "Pub/Sub topic the agent subscribes to", + "title": "Agent Name", + "type": "string" + }, + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "agent_name", + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "0.10.7" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/scripts/generate_schema.py b/ext/dapr-ext-agent-core/scripts/generate_schema.py new file mode 100644 index 000000000..127595761 --- /dev/null +++ b/ext/dapr-ext-agent-core/scripts/generate_schema.py @@ -0,0 +1,114 @@ +import argparse +import json +from pathlib import Path +from importlib.metadata import version, PackageNotFoundError +from typing import Any, Optional + +from dapr.ext.agent_core import AgentMetadataSchema + + +def get_auto_version() -> str: + """Get current package version automatically.""" + try: + return version("dapr-agents") + except PackageNotFoundError: + return "0.0.0.dev0" + + +def generate_schema(output_dir: Path, schema_version: Optional[str] = None): + """ + Generate versioned schema files. + + Args: + output_dir: Directory to output schema files + schema_version: Specific version to use. If None, auto-detects from package. + """ + # Use provided version or auto-detect + current_version = schema_version or get_auto_version() + + print(f"Generating schema for version: {current_version}") + schema_dir = output_dir / "agent-metadata" + + # Export schema + schema: dict[Any, Any] = AgentMetadataSchema.export_json_schema(current_version) + + # Write versioned file + version_file = schema_dir / f"v{current_version}.json" + with open(version_file, "w") as f: + json.dump(schema, f, indent=2) + print(f"✓ Generated {version_file}") + + # Write latest.json + latest_file = schema_dir / "latest.json" + with open(latest_file, "w") as f: + json.dump(schema, f, indent=2) + print(f"✓ Generated {latest_file}") + + # Write index with all versions + index: dict[Any, Any] = { + "current_version": current_version, + "schema_url": f"https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent-core/schemas/agent-metadata/v{current_version}.json", + "available_versions": sorted( + [f.stem for f in schema_dir.glob("v*.json")], reverse=True + ), + } + + index_file = schema_dir / "index.json" + with open(index_file, "w") as f: + json.dump(index, f, indent=2) + print(f"✓ Generated {index_file}") + print(f"\nSchema generation complete for version {current_version}") + + +def main(): + """Main entry point with CLI argument parsing.""" + parser = argparse.ArgumentParser( + description="Generate JSON schema files for agent metadata", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Auto-detect version from installed package + python scripts/generate_schema.py + + # Generate schema for specific version + python scripts/generate_schema.py --version 1.0.0 + + # Generate for pre-release + python scripts/generate_schema.py --version 1.1.0-rc1 + + # Custom output directory + python scripts/generate_schema.py --version 1.0.0 --output ./custom-schemas + """, + ) + + parser.add_argument( + "--version", + "-v", + type=str, + default=None, + help="Specific version to use for schema generation. If not provided, auto-detects from installed package.", + ) + + parser.add_argument( + "--output", + "-o", + type=Path, + default=None, + help="Output directory for schemas. Defaults to 'schemas' in repo root.", + ) + + args = parser.parse_args() + + # Determine output directory + if args.output: + schemas_dir = args.output + else: + repo_root = Path(__file__).parent.parent + schemas_dir = repo_root / "schemas" + + # Generate schemas + generate_schema(schemas_dir, schema_version=args.version) + + +if __name__ == "__main__": + main() diff --git a/ext/dapr-ext-agent-core/setup.cfg b/ext/dapr-ext-agent-core/setup.cfg new file mode 100644 index 000000000..ba1e96e18 --- /dev/null +++ b/ext/dapr-ext-agent-core/setup.cfg @@ -0,0 +1,38 @@ +[metadata] +url = https://dapr.io/ +author = Dapr Authors +author_email = daprweb@microsoft.com +license = Apache +license_file = LICENSE +classifiers = + Development Status :: 5 - Production/Stable + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Operating System :: OS Independent + Programming Language :: Python + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 + Programming Language :: Python :: 3.13 + Programming Language :: Python :: 3.14 +project_urls = + Documentation = https://github.com/dapr/docs + Source = https://github.com/dapr/python-sdk + +[options] +python_requires = >=3.10 +packages = find_namespace: +include_package_data = True +install_requires = + dapr >= 1.17.0.dev + dapr-agents >= 0.10.7 + +[options.packages.find] +include = + dapr.* + +exclude = + tests + +[options.package_data] +dapr.ext.agent_core = + py.typed diff --git a/ext/dapr-ext-agent-core/setup.py b/ext/dapr-ext-agent-core/setup.py new file mode 100644 index 000000000..d23aed9ff --- /dev/null +++ b/ext/dapr-ext-agent-core/setup.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2025 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import os + +from setuptools import setup + +# Load version in dapr package. +version_info = {} +with open('dapr/ext/agent_core/version.py') as fp: + exec(fp.read(), version_info) +__version__ = version_info['__version__'] + + +def is_release(): + return '.dev' not in __version__ + + +name = 'dapr-ext-agent_core' +version = __version__ +description = 'The official release of Dapr Python SDK Agent Core Extension.' +long_description = """ +This is the core extension for Dapr Python SDK Agent integrations. +Dapr is a portable, serverless, event-driven runtime that makes it easy for developers to +build resilient, stateless and stateful microservices that run on the cloud and edge and +embraces the diversity of languages and developer frameworks. + +Dapr codifies the best practices for building microservice applications into open, +independent, building blocks that enable you to build portable applications with the language +and framework of your choice. Each building block is independent and you can use one, some, +or all of them in your application. +""".lstrip() + +# Get build number from GITHUB_RUN_NUMBER environment variable +build_number = os.environ.get('GITHUB_RUN_NUMBER', '0') + +if not is_release(): + name += '-dev' + version = f'{__version__}{build_number}' + description = ( + 'The developmental release for the Dapr Session Manager extension for Strands Agents' + ) + long_description = 'This is the developmental release for the Dapr Session Manager extension for Strands Agents' + +print(f'package name: {name}, version: {version}', flush=True) + + +setup( + name=name, + version=version, + description=description, + long_description=long_description, +) From 49cba9f678b7a61ead1d65a343241a0e939f5e04 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Wed, 28 Jan 2026 17:28:53 +0100 Subject: [PATCH 02/13] feat: include dapr-agents mapper & init of LangGraph mapper Signed-off-by: Casper Nielsen --- examples/langgraph-checkpointer/agent.py | 1 + .../components/agent-registry.yaml | 14 ++++ .../dapr/ext/agent_core/mapping/__init__.py | 7 ++ .../ext/agent_core/mapping/dapr_agents.py | 74 +++++++++++++++++ .../dapr/ext/agent_core/mapping/langgraph.py | 82 +++++++++++++++++++ .../dapr/ext/langgraph/dapr_checkpointer.py | 10 +++ ext/dapr-ext-langgraph/setup.cfg | 1 + 7 files changed, 189 insertions(+) create mode 100644 examples/langgraph-checkpointer/components/agent-registry.yaml create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py create mode 100644 ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py diff --git a/examples/langgraph-checkpointer/agent.py b/examples/langgraph-checkpointer/agent.py index 8ea98ae82..9f4c90006 100644 --- a/examples/langgraph-checkpointer/agent.py +++ b/examples/langgraph-checkpointer/agent.py @@ -52,6 +52,7 @@ def assistant(state: MessagesState): memory = DaprCheckpointer(store_name='statestore', key_prefix='dapr') react_graph_memory = builder.compile(checkpointer=memory) +memory.set_agent(react_graph_memory) config = {'configurable': {'thread_id': '1'}} diff --git a/examples/langgraph-checkpointer/components/agent-registry.yaml b/examples/langgraph-checkpointer/components/agent-registry.yaml new file mode 100644 index 000000000..b6c1fe4d4 --- /dev/null +++ b/examples/langgraph-checkpointer/components/agent-registry.yaml @@ -0,0 +1,14 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: agent-registry +spec: + type: state.redis + version: v1 + metadata: + - name: redisHost + value: localhost:6379 + - name: redisPassword + value: "" + - name: keyPrefix + value: none \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py new file mode 100644 index 000000000..6aee51ba4 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py @@ -0,0 +1,7 @@ +from .dapr_agents import DaprAgentsMapper +from .langgraph import LangGraphMapper + +__all__ = [ + "DaprAgentsMapper", + "LangGraphMapper", +] \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py new file mode 100644 index 000000000..0e4c5f787 --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py @@ -0,0 +1,74 @@ + +from datetime import datetime, timezone +import json +import logging +from typing import Any +from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata + +logger = logging.getLogger(__name__) + + +class DaprAgentsMapper: + def __init__(self) -> None: + pass + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + profile = getattr(agent, "profile", None) + memory = getattr(agent, "memory", None) + pubsub = getattr(agent, "pubsub", None) + llm = getattr(agent, "llm", None) + registry = getattr(agent, "_registry", None) + execution = getattr(agent, "execution", None) + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid=getattr(agent, "appid", ""), + type=type(agent).__name__, + orchestrator=False, + role=getattr(profile, "role", "") if profile else "", + goal=getattr(profile, "goal", "") if profile else "", + instructions=getattr(profile, "instructions", None) if profile else [], + statestore=getattr(memory, "store_name", "") if memory else "", + system_prompt=getattr(profile, "system_prompt", "") if profile else "", + ), + name=getattr(agent, "name", ""), + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=PubSubMetadata( + name=getattr(pubsub, "pubsub_name", "") if pubsub else "", + broadcast_topic=getattr(pubsub, "broadcast_topic", None) if pubsub else None, + agent_topic=getattr(pubsub, "agent_topic", None) if pubsub else None, + ), + memory=MemoryMetadata( + type=type(memory).__name__ if memory else "", + session_id=getattr(memory, "session_id", None) if memory else None, + statestore=getattr(memory, "store_name", None) if memory else None, + ), + llm=LLMMetadata( + client=type(llm).__name__ if llm else "", + provider=getattr(llm, "provider", "unknown") if llm else "unknown", + api=getattr(llm, "api", "unknown") if llm else "unknown", + model=getattr(llm, "model", "unknown") if llm else "unknown", + component_name=getattr(llm, "component_name", None) if llm else None, + base_url=getattr(llm, "base_url", None) if llm else None, + azure_endpoint=getattr(llm, "azure_endpoint", None) if llm else None, + azure_deployment=getattr(llm, "azure_deployment", None) if llm else None, + prompt_template=type(getattr(llm, "prompt_template", None)).__name__ if llm and getattr(llm, "prompt_template", None) else None, + ), + registry=RegistryMetadata( + statestore=getattr(getattr(registry, "store", None), "store_name", None) if registry else None, + name=getattr(registry, "team_name", None) if registry else None, + ), + tools=[ + ToolMetadata( + tool_name=getattr(tool, "name", ""), + tool_description=getattr(tool, "description", ""), + tool_args=json.dumps(getattr(tool, "args_schema", {})) + if hasattr(tool, "args_schema") else "{}", + ) + for tool in getattr(agent, "tools", []) + ], + max_iterations=getattr(execution, "max_iterations", None) if execution else None, + tool_choice=getattr(execution, "tool_choice", None) if execution else None, + agent_metadata=getattr(agent, "agent_metadata", None), + ) diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py new file mode 100644 index 000000000..55d34f9cc --- /dev/null +++ b/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py @@ -0,0 +1,82 @@ + +from datetime import datetime, timezone +import json +import logging +from typing import TYPE_CHECKING, Any, Dict, Optional +from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata + +if TYPE_CHECKING: + from dapr.ext.langgraph import DaprCheckpointer + +logger = logging.getLogger(__name__) + +class LangGraphMapper: + def __init__(self) -> None: + pass + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + + logger.info(f"LangGraph log vars: {vars(agent)}") + print(f"LangGraph print vars: {vars(agent)}") + logger.info(f"LangGraph log dir: {dir(agent)}") + print(f"LangGraph print dir: {dir(agent)}") + + introspected_vars: Dict[str, Any] = vars(agent) # type: ignore + introspected_dir = dir(agent) + + checkpointer: Optional["DaprCheckpointer"] = introspected_vars.get("checkpointer", None) # type: ignore + tools = introspected_vars.get("tools", []) # type: ignore + print(f"LangGraph tools: {tools}") + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid="", + type=type(agent).__name__, + orchestrator=False, + role="", + goal="", + instructions=[], + statestore=checkpointer.store_name if checkpointer else None, + system_prompt="", + ), + name=agent.get_name() if hasattr(agent, "get_name") else "", + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=PubSubMetadata( + name="", + broadcast_topic=None, + agent_topic=None, + ), + memory=MemoryMetadata( + type="DaprCheckpointer", + session_id=None, + statestore=checkpointer.store_name if checkpointer else None, + ), + llm=LLMMetadata( + client="", + provider="unknown", + api="unknown", + model="unknown", + component_name=None, + base_url=None, + azure_endpoint=None, + azure_deployment=None, + prompt_template=None, + ), + registry=RegistryMetadata( + statestore=None, + name=None, + ), + tools=[ + ToolMetadata( + tool_name="", + tool_description="", + tool_args=json.dumps({}) + if hasattr(tool, "args_schema") else "{}", + ) + for tool in getattr(agent, "tools", []) + ], + max_iterations=None, + tool_choice=None, + agent_metadata=None, + ) diff --git a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py index 6d2614d90..c64e1b027 100644 --- a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py +++ b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py @@ -22,6 +22,7 @@ from ulid import ULID from dapr.clients import DaprClient +from dapr.ext.agent_core import AgentRegistryAdapter from langgraph.checkpoint.base import ( WRITES_IDX_MAP, BaseCheckpointSaver, @@ -48,6 +49,15 @@ def __init__(self, store_name: str, key_prefix: str): self.client = DaprClient() self._key_cache: Dict[str, str] = {} + + def set_agent(self, agent: Any) -> None: + self.registry_adapter = AgentRegistryAdapter( + registry=None, + framework='langgraph', + agent=agent, + ) + + # helper: construct Dapr key for a thread def _get_key(self, config: RunnableConfig) -> str: thread_id = None diff --git a/ext/dapr-ext-langgraph/setup.cfg b/ext/dapr-ext-langgraph/setup.cfg index 5a252a797..e7f0b94f8 100644 --- a/ext/dapr-ext-langgraph/setup.cfg +++ b/ext/dapr-ext-langgraph/setup.cfg @@ -25,6 +25,7 @@ packages = find_namespace: include_package_data = True install_requires = dapr >= 1.17.0.dev + dapr.ext.agent_core >= 1.17.0.dev langgraph >= 0.3.6 langchain >= 0.1.17 python-ulid >= 3.0.0 From 0e536850c0e0e5292882211e87d571bc338c66e7 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Mon, 2 Feb 2026 11:07:16 +0100 Subject: [PATCH 03/13] chore: rename folder Signed-off-by: Casper Nielsen --- .../schemas/agent-metadata/index.json | 8 - .../schemas/agent-metadata/latest.json | 2018 ----------------- .../schemas/agent-metadata/v0.10.6.json | 2018 ----------------- .../schemas/agent-metadata/v0.10.7.json | 2018 ----------------- .../LICENSE | 0 .../README.rst | 0 .../dapr/ext/agent_core/__init__.py | 0 .../dapr/ext/agent_core/mapping/__init__.py | 0 .../ext/agent_core/mapping/dapr_agents.py | 0 .../dapr/ext/agent_core/mapping/langgraph.py | 0 .../dapr/ext/agent_core/metadata.py | 0 .../dapr/ext/agent_core/py.typed | 0 .../dapr/ext/agent_core/types.py | 0 .../dapr/ext/agent_core/version.py | 0 .../schemas/agent-metadata/index.json | 7 + .../schemas/agent-metadata/latest.json | 461 ++++ .../schemas/agent-metadata/v1.17.0.dev.json | 461 ++++ .../scripts/generate_schema.py | 4 +- .../setup.cfg | 3 +- .../setup.py | 0 20 files changed, 933 insertions(+), 6065 deletions(-) delete mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/index.json delete mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json delete mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json delete mode 100644 ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/LICENSE (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/README.rst (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/__init__.py (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/mapping/__init__.py (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/mapping/dapr_agents.py (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/mapping/langgraph.py (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/metadata.py (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/py.typed (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/types.py (100%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/dapr/ext/agent_core/version.py (100%) create mode 100644 ext/dapr-ext-agent_core/schemas/agent-metadata/index.json create mode 100644 ext/dapr-ext-agent_core/schemas/agent-metadata/latest.json create mode 100644 ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/scripts/generate_schema.py (96%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/setup.cfg (94%) rename ext/{dapr-ext-agent-core => dapr-ext-agent_core}/setup.py (100%) diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json deleted file mode 100644 index 394fae28e..000000000 --- a/ext/dapr-ext-agent-core/schemas/agent-metadata/index.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "current_version": "0.10.7", - "schema_url": "https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json", - "available_versions": [ - "v0.10.7", - "v0.10.6" - ] -} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json deleted file mode 100644 index 7dea02627..000000000 --- a/ext/dapr-ext-agent-core/schemas/agent-metadata/latest.json +++ /dev/null @@ -1,2018 +0,0 @@ -{ - "$defs": { - "AgentMetadata": { - "description": "Metadata about an agent's configuration and capabilities.", - "properties": { - "appid": { - "description": "Dapr application ID of the agent", - "title": "Appid", - "type": "string" - }, - "type": { - "description": "Type of the agent (e.g., standalone, durable)", - "title": "Type", - "type": "string" - }, - "orchestrator": { - "default": false, - "description": "Indicates if the agent is an orchestrator", - "title": "Orchestrator", - "type": "boolean" - }, - "role": { - "default": "", - "description": "Role of the agent", - "title": "Role", - "type": "string" - }, - "goal": { - "default": "", - "description": "High-level objective of the agent", - "title": "Goal", - "type": "string" - }, - "name": { - "default": "", - "description": "Namememory of the agent", - "title": "Name", - "type": "string" - }, - "instructions": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Instructions for the agent", - "title": "Instructions" - }, - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr state store component name used by the agent", - "title": "Statestore" - }, - "system_prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "System prompt guiding the agent's behavior", - "title": "System Prompt" - } - }, - "required": [ - "appid", - "type" - ], - "title": "AgentMetadata", - "type": "object" - }, - "AzureOpenAIModelConfig": { - "properties": { - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the Azure OpenAI API", - "title": "Api Key" - }, - "azure_ad_token": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure Active Directory token for authentication", - "title": "Azure Ad Token" - }, - "organization": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure organization associated with the OpenAI resource", - "title": "Organization" - }, - "project": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure project associated with the OpenAI resource", - "title": "Project" - }, - "api_version": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "2024-07-01-preview", - "description": "API version for Azure OpenAI models", - "title": "Api Version" - }, - "azure_endpoint": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure endpoint for Azure OpenAI models", - "title": "Azure Endpoint" - }, - "azure_deployment": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure deployment for Azure OpenAI models", - "title": "Azure Deployment" - }, - "azure_client_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Client ID for Managed Identity authentication.", - "title": "Azure Client Id" - }, - "type": { - "const": "azure_openai", - "default": "azure_openai", - "description": "Type of the model, must always be 'azure_openai'", - "title": "Type", - "type": "string" - } - }, - "title": "AzureOpenAIModelConfig", - "type": "object" - }, - "HFHubChatCompletionParams": { - "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", - "title": "Model" - }, - "frequency_penalty": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Penalizes new tokens based on their existing frequency in the text so far.", - "title": "Frequency Penalty" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify the likelihood of specified tokens appearing in the completion.", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities of the output tokens or not.", - "title": "Logprobs" - }, - "max_tokens": { - "anyOf": [ - { - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 100, - "description": "Maximum number of tokens allowed in the response.", - "title": "Max Tokens" - }, - "n": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "UNUSED. Included for compatibility.", - "title": "N" - }, - "presence_penalty": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Penalizes new tokens based on their presence in the text so far.", - "title": "Presence Penalty" - }, - "response_format": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Grammar constraints. Can be either a JSONSchema or a regex.", - "title": "Response Format" - }, - "seed": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Seed for reproducible control flow.", - "title": "Seed" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Up to four strings which trigger the end of the response.", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Enable realtime streaming of responses.", - "title": "Stream" - }, - "stream_options": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Options for streaming completions.", - "title": "Stream Options" - }, - "temperature": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Controls randomness of the generations.", - "title": "Temperature" - }, - "top_logprobs": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of most likely tokens to return at each position.", - "title": "Top Logprobs" - }, - "top_p": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Fraction of the most likely next words to sample from.", - "title": "Top P" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The tool to use for the completion. Defaults to 'auto'.", - "title": "Tool Choice" - }, - "tool_prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "A prompt to be appended before the tools.", - "title": "Tool Prompt" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "A list of tools the model may call.", - "title": "Tools" - } - }, - "title": "HFHubChatCompletionParams", - "type": "object" - }, - "HFHubModelConfig": { - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", - "title": "Model" - }, - "hf_provider": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "auto", - "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", - "title": "Hf Provider" - }, - "token": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", - "title": "Token" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Alias for token. Use only one of token or api_key.", - "title": "Api Key" - }, - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", - "title": "Base Url" - }, - "timeout": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", - "title": "Timeout" - }, - "headers": { - "anyOf": [ - { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", - "title": "Headers" - }, - "cookies": { - "anyOf": [ - { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "description": "Extra cookies to send with requests.", - "title": "Cookies" - }, - "proxies": { - "anyOf": [ - {}, - { - "type": "null" - } - ], - "default": null, - "description": "Proxy settings for HTTP requests. Use standard requests format.", - "title": "Proxies" - }, - "bill_to": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Billing account for requests. Only used for enterprise/organization billing.", - "title": "Bill To" - }, - "type": { - "const": "huggingface", - "default": "huggingface", - "description": "Type of the model, must always be 'huggingface'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the model available through Hugging Face", - "title": "Name", - "type": "string" - } - }, - "title": "HFHubModelConfig", - "type": "object" - }, - "LLMMetadata": { - "description": "LLM configuration information.", - "properties": { - "client": { - "description": "LLM client used by the agent", - "title": "Client", - "type": "string" - }, - "provider": { - "description": "LLM provider used by the agent", - "title": "Provider", - "type": "string" - }, - "api": { - "default": "unknown", - "description": "API type used by the LLM client", - "title": "Api", - "type": "string" - }, - "model": { - "default": "unknown", - "description": "Model name or identifier", - "title": "Model", - "type": "string" - }, - "component_name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr component name for the LLM client", - "title": "Component Name" - }, - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Base URL for the LLM API if applicable", - "title": "Base Url" - }, - "azure_endpoint": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure endpoint if using Azure OpenAI", - "title": "Azure Endpoint" - }, - "azure_deployment": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure deployment name if using Azure OpenAI", - "title": "Azure Deployment" - }, - "prompt_template": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Prompt template used by the agent", - "title": "Prompt Template" - }, - "prompty": { - "anyOf": [ - { - "$ref": "#/$defs/Prompty" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Prompty template name if used" - } - }, - "required": [ - "client", - "provider" - ], - "title": "LLMMetadata", - "type": "object" - }, - "MemoryMetadata": { - "description": "Memory configuration information.", - "properties": { - "type": { - "description": "Type of memory used by the agent", - "title": "Type", - "type": "string" - }, - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr state store component name for memory", - "title": "Statestore" - }, - "session_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Default session ID for the agent's memory", - "title": "Session Id" - } - }, - "required": [ - "type" - ], - "title": "MemoryMetadata", - "type": "object" - }, - "NVIDIAChatCompletionParams": { - "description": "Specific settings for the Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify likelihood of specified tokens", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities", - "title": "Logprobs" - }, - "top_logprobs": { - "anyOf": [ - { - "maximum": 20, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of top log probabilities to return", - "title": "Top Logprobs" - }, - "n": { - "anyOf": [ - { - "maximum": 128, - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 1, - "description": "Number of chat completion choices to generate", - "title": "N" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "maxItems": 64, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "List of tools the model may call", - "title": "Tools" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which tool is called", - "title": "Tool Choice" - } - }, - "title": "NVIDIAChatCompletionParams", - "type": "object" - }, - "NVIDIAModelConfig": { - "properties": { - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "https://integrate.api.nvidia.com/v1", - "description": "Base URL for the NVIDIA API", - "title": "Base Url" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the NVIDIA API", - "title": "Api Key" - }, - "type": { - "const": "nvidia", - "default": "nvidia", - "description": "Type of the model, must always be 'nvidia'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the model available through NVIDIA", - "title": "Name", - "type": "string" - } - }, - "title": "NVIDIAModelConfig", - "type": "object" - }, - "OpenAIChatCompletionParams": { - "description": "Specific settings for the Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify likelihood of specified tokens", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities", - "title": "Logprobs" - }, - "top_logprobs": { - "anyOf": [ - { - "maximum": 20, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of top log probabilities to return", - "title": "Top Logprobs" - }, - "n": { - "anyOf": [ - { - "maximum": 128, - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 1, - "description": "Number of chat completion choices to generate", - "title": "N" - }, - "response_format": { - "anyOf": [ - { - "additionalProperties": { - "enum": [ - "text", - "json_object" - ], - "type": "string" - }, - "propertyNames": { - "const": "type" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Format of the response", - "title": "Response Format" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "maxItems": 64, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "List of tools the model may call", - "title": "Tools" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which tool is called", - "title": "Tool Choice" - }, - "function_call": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which function is called", - "title": "Function Call" - }, - "seed": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Seed for deterministic sampling", - "title": "Seed" - }, - "user": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Unique identifier representing the end-user", - "title": "User" - } - }, - "title": "OpenAIChatCompletionParams", - "type": "object" - }, - "OpenAIModelConfig": { - "properties": { - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Base URL for the OpenAI API", - "title": "Base Url" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the OpenAI API", - "title": "Api Key" - }, - "organization": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Organization name for OpenAI", - "title": "Organization" - }, - "project": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "OpenAI project name.", - "title": "Project" - }, - "type": { - "const": "openai", - "default": "openai", - "description": "Type of the model, must always be 'openai'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the OpenAI model", - "title": "Name", - "type": "string" - } - }, - "title": "OpenAIModelConfig", - "type": "object" - }, - "OpenAITextCompletionParams": { - "description": "Specific configs for the text completions endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "best_of": { - "anyOf": [ - { - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of best completions to generate", - "title": "Best Of" - }, - "echo": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to echo the prompt", - "title": "Echo" - }, - "logprobs": { - "anyOf": [ - { - "maximum": 5, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Include log probabilities", - "title": "Logprobs" - }, - "suffix": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Suffix to append to the prompt", - "title": "Suffix" - } - }, - "title": "OpenAITextCompletionParams", - "type": "object" - }, - "Prompty": { - "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", - "properties": { - "name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "", - "description": "Name of the Prompty file.", - "title": "Name" - }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "", - "description": "Description of the Prompty file.", - "title": "Description" - }, - "version": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "1.0", - "description": "Version of the Prompty.", - "title": "Version" - }, - "authors": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": [], - "description": "List of authors for the Prompty.", - "title": "Authors" - }, - "tags": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": [], - "description": "Tags to categorize the Prompty.", - "title": "Tags" - }, - "model": { - "$ref": "#/$defs/PromptyModelConfig", - "description": "Model configuration. Can be either OpenAI or Azure OpenAI." - }, - "inputs": { - "additionalProperties": true, - "default": {}, - "description": "Input parameters for the Prompty. These define the expected inputs.", - "title": "Inputs", - "type": "object" - }, - "sample": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Sample input or the path to a sample file for testing the Prompty.", - "title": "Sample" - }, - "outputs": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": {}, - "description": "Optional outputs for the Prompty. Defines expected output format.", - "title": "Outputs" - }, - "content": { - "description": "The prompt messages defined in the Prompty file.", - "title": "Content", - "type": "string" - } - }, - "required": [ - "model", - "content" - ], - "title": "Prompty", - "type": "object" - }, - "PromptyModelConfig": { - "properties": { - "api": { - "default": "chat", - "description": "The API to use, either 'chat' or 'completion'", - "enum": [ - "chat", - "completion" - ], - "title": "Api", - "type": "string" - }, - "configuration": { - "anyOf": [ - { - "$ref": "#/$defs/OpenAIModelConfig" - }, - { - "$ref": "#/$defs/AzureOpenAIModelConfig" - }, - { - "$ref": "#/$defs/HFHubModelConfig" - }, - { - "$ref": "#/$defs/NVIDIAModelConfig" - } - ], - "description": "Model configuration settings", - "title": "Configuration" - }, - "parameters": { - "anyOf": [ - { - "$ref": "#/$defs/OpenAITextCompletionParams" - }, - { - "$ref": "#/$defs/OpenAIChatCompletionParams" - }, - { - "$ref": "#/$defs/HFHubChatCompletionParams" - }, - { - "$ref": "#/$defs/NVIDIAChatCompletionParams" - } - ], - "description": "Parameters for the model request", - "title": "Parameters" - }, - "response": { - "default": "first", - "description": "Determines if full response or just the first one is returned", - "enum": [ - "first", - "full" - ], - "title": "Response", - "type": "string" - } - }, - "required": [ - "configuration", - "parameters" - ], - "title": "PromptyModelConfig", - "type": "object" - }, - "PubSubMetadata": { - "description": "Pub/Sub configuration information.", - "properties": { - "agent_name": { - "description": "Pub/Sub topic the agent subscribes to", - "title": "Agent Name", - "type": "string" - }, - "name": { - "description": "Pub/Sub component name", - "title": "Name", - "type": "string" - }, - "broadcast_topic": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/Sub topic for broadcasting messages", - "title": "Broadcast Topic" - }, - "agent_topic": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/Sub topic for direct agent messages", - "title": "Agent Topic" - } - }, - "required": [ - "agent_name", - "name" - ], - "title": "PubSubMetadata", - "type": "object" - }, - "RegistryMetadata": { - "description": "Registry configuration information.", - "properties": { - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Name of the statestore component for the registry", - "title": "Statestore" - }, - "name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Name of the team registry", - "title": "Name" - } - }, - "title": "RegistryMetadata", - "type": "object" - }, - "ToolMetadata": { - "description": "Metadata about a tool available to the agent.", - "properties": { - "tool_name": { - "description": "Name of the tool", - "title": "Tool Name", - "type": "string" - }, - "tool_description": { - "description": "Description of the tool's functionality", - "title": "Tool Description", - "type": "string" - }, - "tool_args": { - "description": "Arguments for the tool", - "title": "Tool Args", - "type": "string" - } - }, - "required": [ - "tool_name", - "tool_description", - "tool_args" - ], - "title": "ToolMetadata", - "type": "object" - } - }, - "description": "Schema for agent metadata including schema version.", - "properties": { - "schema_version": { - "description": "Version of the schema used for the agent metadata.", - "title": "Schema Version", - "type": "string" - }, - "agent": { - "$ref": "#/$defs/AgentMetadata", - "description": "Agent configuration and capabilities" - }, - "name": { - "description": "Name of the agent", - "title": "Name", - "type": "string" - }, - "registered_at": { - "description": "ISO 8601 timestamp of registration", - "title": "Registered At", - "type": "string" - }, - "pubsub": { - "anyOf": [ - { - "$ref": "#/$defs/PubSubMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/sub configuration if enabled" - }, - "memory": { - "anyOf": [ - { - "$ref": "#/$defs/MemoryMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Memory configuration if enabled" - }, - "llm": { - "anyOf": [ - { - "$ref": "#/$defs/LLMMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "LLM configuration" - }, - "registry": { - "anyOf": [ - { - "$ref": "#/$defs/RegistryMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Registry configuration" - }, - "tools": { - "anyOf": [ - { - "items": { - "$ref": "#/$defs/ToolMetadata" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Available tools", - "title": "Tools" - }, - "max_iterations": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum iterations for agent execution", - "title": "Max Iterations" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Tool choice strategy", - "title": "Tool Choice" - }, - "agent_metadata": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Additional metadata about the agent", - "title": "Agent Metadata" - } - }, - "required": [ - "schema_version", - "agent", - "name", - "registered_at" - ], - "title": "AgentMetadataSchema", - "type": "object", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "version": "0.10.7" -} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json deleted file mode 100644 index b9cbb0bb1..000000000 --- a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.6.json +++ /dev/null @@ -1,2018 +0,0 @@ -{ - "$defs": { - "AgentMetadata": { - "description": "Metadata about an agent's configuration and capabilities.", - "properties": { - "appid": { - "description": "Dapr application ID of the agent", - "title": "Appid", - "type": "string" - }, - "type": { - "description": "Type of the agent (e.g., standalone, durable)", - "title": "Type", - "type": "string" - }, - "orchestrator": { - "default": false, - "description": "Indicates if the agent is an orchestrator", - "title": "Orchestrator", - "type": "boolean" - }, - "role": { - "default": "", - "description": "Role of the agent", - "title": "Role", - "type": "string" - }, - "goal": { - "default": "", - "description": "High-level objective of the agent", - "title": "Goal", - "type": "string" - }, - "name": { - "default": "", - "description": "Namememory of the agent", - "title": "Name", - "type": "string" - }, - "instructions": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Instructions for the agent", - "title": "Instructions" - }, - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr state store component name used by the agent", - "title": "Statestore" - }, - "system_prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "System prompt guiding the agent's behavior", - "title": "System Prompt" - } - }, - "required": [ - "appid", - "type" - ], - "title": "AgentMetadata", - "type": "object" - }, - "AzureOpenAIModelConfig": { - "properties": { - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the Azure OpenAI API", - "title": "Api Key" - }, - "azure_ad_token": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure Active Directory token for authentication", - "title": "Azure Ad Token" - }, - "organization": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure organization associated with the OpenAI resource", - "title": "Organization" - }, - "project": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure project associated with the OpenAI resource", - "title": "Project" - }, - "api_version": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "2024-07-01-preview", - "description": "API version for Azure OpenAI models", - "title": "Api Version" - }, - "azure_endpoint": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure endpoint for Azure OpenAI models", - "title": "Azure Endpoint" - }, - "azure_deployment": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure deployment for Azure OpenAI models", - "title": "Azure Deployment" - }, - "azure_client_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Client ID for Managed Identity authentication.", - "title": "Azure Client Id" - }, - "type": { - "const": "azure_openai", - "default": "azure_openai", - "description": "Type of the model, must always be 'azure_openai'", - "title": "Type", - "type": "string" - } - }, - "title": "AzureOpenAIModelConfig", - "type": "object" - }, - "HFHubChatCompletionParams": { - "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", - "title": "Model" - }, - "frequency_penalty": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Penalizes new tokens based on their existing frequency in the text so far.", - "title": "Frequency Penalty" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify the likelihood of specified tokens appearing in the completion.", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities of the output tokens or not.", - "title": "Logprobs" - }, - "max_tokens": { - "anyOf": [ - { - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 100, - "description": "Maximum number of tokens allowed in the response.", - "title": "Max Tokens" - }, - "n": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "UNUSED. Included for compatibility.", - "title": "N" - }, - "presence_penalty": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Penalizes new tokens based on their presence in the text so far.", - "title": "Presence Penalty" - }, - "response_format": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Grammar constraints. Can be either a JSONSchema or a regex.", - "title": "Response Format" - }, - "seed": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Seed for reproducible control flow.", - "title": "Seed" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Up to four strings which trigger the end of the response.", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Enable realtime streaming of responses.", - "title": "Stream" - }, - "stream_options": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Options for streaming completions.", - "title": "Stream Options" - }, - "temperature": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Controls randomness of the generations.", - "title": "Temperature" - }, - "top_logprobs": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of most likely tokens to return at each position.", - "title": "Top Logprobs" - }, - "top_p": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Fraction of the most likely next words to sample from.", - "title": "Top P" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The tool to use for the completion. Defaults to 'auto'.", - "title": "Tool Choice" - }, - "tool_prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "A prompt to be appended before the tools.", - "title": "Tool Prompt" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "A list of tools the model may call.", - "title": "Tools" - } - }, - "title": "HFHubChatCompletionParams", - "type": "object" - }, - "HFHubModelConfig": { - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", - "title": "Model" - }, - "hf_provider": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "auto", - "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", - "title": "Hf Provider" - }, - "token": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", - "title": "Token" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Alias for token. Use only one of token or api_key.", - "title": "Api Key" - }, - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", - "title": "Base Url" - }, - "timeout": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", - "title": "Timeout" - }, - "headers": { - "anyOf": [ - { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", - "title": "Headers" - }, - "cookies": { - "anyOf": [ - { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "description": "Extra cookies to send with requests.", - "title": "Cookies" - }, - "proxies": { - "anyOf": [ - {}, - { - "type": "null" - } - ], - "default": null, - "description": "Proxy settings for HTTP requests. Use standard requests format.", - "title": "Proxies" - }, - "bill_to": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Billing account for requests. Only used for enterprise/organization billing.", - "title": "Bill To" - }, - "type": { - "const": "huggingface", - "default": "huggingface", - "description": "Type of the model, must always be 'huggingface'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the model available through Hugging Face", - "title": "Name", - "type": "string" - } - }, - "title": "HFHubModelConfig", - "type": "object" - }, - "LLMMetadata": { - "description": "LLM configuration information.", - "properties": { - "client": { - "description": "LLM client used by the agent", - "title": "Client", - "type": "string" - }, - "provider": { - "description": "LLM provider used by the agent", - "title": "Provider", - "type": "string" - }, - "api": { - "default": "unknown", - "description": "API type used by the LLM client", - "title": "Api", - "type": "string" - }, - "model": { - "default": "unknown", - "description": "Model name or identifier", - "title": "Model", - "type": "string" - }, - "component_name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr component name for the LLM client", - "title": "Component Name" - }, - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Base URL for the LLM API if applicable", - "title": "Base Url" - }, - "azure_endpoint": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure endpoint if using Azure OpenAI", - "title": "Azure Endpoint" - }, - "azure_deployment": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure deployment name if using Azure OpenAI", - "title": "Azure Deployment" - }, - "prompt_template": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Prompt template used by the agent", - "title": "Prompt Template" - }, - "prompty": { - "anyOf": [ - { - "$ref": "#/$defs/Prompty" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Prompty template name if used" - } - }, - "required": [ - "client", - "provider" - ], - "title": "LLMMetadata", - "type": "object" - }, - "MemoryMetadata": { - "description": "Memory configuration information.", - "properties": { - "type": { - "description": "Type of memory used by the agent", - "title": "Type", - "type": "string" - }, - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr state store component name for memory", - "title": "Statestore" - }, - "session_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Default session ID for the agent's memory", - "title": "Session Id" - } - }, - "required": [ - "type" - ], - "title": "MemoryMetadata", - "type": "object" - }, - "NVIDIAChatCompletionParams": { - "description": "Specific settings for the Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify likelihood of specified tokens", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities", - "title": "Logprobs" - }, - "top_logprobs": { - "anyOf": [ - { - "maximum": 20, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of top log probabilities to return", - "title": "Top Logprobs" - }, - "n": { - "anyOf": [ - { - "maximum": 128, - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 1, - "description": "Number of chat completion choices to generate", - "title": "N" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "maxItems": 64, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "List of tools the model may call", - "title": "Tools" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which tool is called", - "title": "Tool Choice" - } - }, - "title": "NVIDIAChatCompletionParams", - "type": "object" - }, - "NVIDIAModelConfig": { - "properties": { - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "https://integrate.api.nvidia.com/v1", - "description": "Base URL for the NVIDIA API", - "title": "Base Url" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the NVIDIA API", - "title": "Api Key" - }, - "type": { - "const": "nvidia", - "default": "nvidia", - "description": "Type of the model, must always be 'nvidia'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the model available through NVIDIA", - "title": "Name", - "type": "string" - } - }, - "title": "NVIDIAModelConfig", - "type": "object" - }, - "OpenAIChatCompletionParams": { - "description": "Specific settings for the Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify likelihood of specified tokens", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities", - "title": "Logprobs" - }, - "top_logprobs": { - "anyOf": [ - { - "maximum": 20, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of top log probabilities to return", - "title": "Top Logprobs" - }, - "n": { - "anyOf": [ - { - "maximum": 128, - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 1, - "description": "Number of chat completion choices to generate", - "title": "N" - }, - "response_format": { - "anyOf": [ - { - "additionalProperties": { - "enum": [ - "text", - "json_object" - ], - "type": "string" - }, - "propertyNames": { - "const": "type" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Format of the response", - "title": "Response Format" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "maxItems": 64, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "List of tools the model may call", - "title": "Tools" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which tool is called", - "title": "Tool Choice" - }, - "function_call": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which function is called", - "title": "Function Call" - }, - "seed": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Seed for deterministic sampling", - "title": "Seed" - }, - "user": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Unique identifier representing the end-user", - "title": "User" - } - }, - "title": "OpenAIChatCompletionParams", - "type": "object" - }, - "OpenAIModelConfig": { - "properties": { - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Base URL for the OpenAI API", - "title": "Base Url" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the OpenAI API", - "title": "Api Key" - }, - "organization": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Organization name for OpenAI", - "title": "Organization" - }, - "project": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "OpenAI project name.", - "title": "Project" - }, - "type": { - "const": "openai", - "default": "openai", - "description": "Type of the model, must always be 'openai'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the OpenAI model", - "title": "Name", - "type": "string" - } - }, - "title": "OpenAIModelConfig", - "type": "object" - }, - "OpenAITextCompletionParams": { - "description": "Specific configs for the text completions endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "best_of": { - "anyOf": [ - { - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of best completions to generate", - "title": "Best Of" - }, - "echo": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to echo the prompt", - "title": "Echo" - }, - "logprobs": { - "anyOf": [ - { - "maximum": 5, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Include log probabilities", - "title": "Logprobs" - }, - "suffix": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Suffix to append to the prompt", - "title": "Suffix" - } - }, - "title": "OpenAITextCompletionParams", - "type": "object" - }, - "Prompty": { - "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", - "properties": { - "name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "", - "description": "Name of the Prompty file.", - "title": "Name" - }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "", - "description": "Description of the Prompty file.", - "title": "Description" - }, - "version": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "1.0", - "description": "Version of the Prompty.", - "title": "Version" - }, - "authors": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": [], - "description": "List of authors for the Prompty.", - "title": "Authors" - }, - "tags": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": [], - "description": "Tags to categorize the Prompty.", - "title": "Tags" - }, - "model": { - "$ref": "#/$defs/PromptyModelConfig", - "description": "Model configuration. Can be either OpenAI or Azure OpenAI." - }, - "inputs": { - "additionalProperties": true, - "default": {}, - "description": "Input parameters for the Prompty. These define the expected inputs.", - "title": "Inputs", - "type": "object" - }, - "sample": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Sample input or the path to a sample file for testing the Prompty.", - "title": "Sample" - }, - "outputs": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": {}, - "description": "Optional outputs for the Prompty. Defines expected output format.", - "title": "Outputs" - }, - "content": { - "description": "The prompt messages defined in the Prompty file.", - "title": "Content", - "type": "string" - } - }, - "required": [ - "model", - "content" - ], - "title": "Prompty", - "type": "object" - }, - "PromptyModelConfig": { - "properties": { - "api": { - "default": "chat", - "description": "The API to use, either 'chat' or 'completion'", - "enum": [ - "chat", - "completion" - ], - "title": "Api", - "type": "string" - }, - "configuration": { - "anyOf": [ - { - "$ref": "#/$defs/OpenAIModelConfig" - }, - { - "$ref": "#/$defs/AzureOpenAIModelConfig" - }, - { - "$ref": "#/$defs/HFHubModelConfig" - }, - { - "$ref": "#/$defs/NVIDIAModelConfig" - } - ], - "description": "Model configuration settings", - "title": "Configuration" - }, - "parameters": { - "anyOf": [ - { - "$ref": "#/$defs/OpenAITextCompletionParams" - }, - { - "$ref": "#/$defs/OpenAIChatCompletionParams" - }, - { - "$ref": "#/$defs/HFHubChatCompletionParams" - }, - { - "$ref": "#/$defs/NVIDIAChatCompletionParams" - } - ], - "description": "Parameters for the model request", - "title": "Parameters" - }, - "response": { - "default": "first", - "description": "Determines if full response or just the first one is returned", - "enum": [ - "first", - "full" - ], - "title": "Response", - "type": "string" - } - }, - "required": [ - "configuration", - "parameters" - ], - "title": "PromptyModelConfig", - "type": "object" - }, - "PubSubMetadata": { - "description": "Pub/Sub configuration information.", - "properties": { - "agent_name": { - "description": "Pub/Sub topic the agent subscribes to", - "title": "Agent Name", - "type": "string" - }, - "name": { - "description": "Pub/Sub component name", - "title": "Name", - "type": "string" - }, - "broadcast_topic": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/Sub topic for broadcasting messages", - "title": "Broadcast Topic" - }, - "agent_topic": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/Sub topic for direct agent messages", - "title": "Agent Topic" - } - }, - "required": [ - "agent_name", - "name" - ], - "title": "PubSubMetadata", - "type": "object" - }, - "RegistryMetadata": { - "description": "Registry configuration information.", - "properties": { - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Name of the statestore component for the registry", - "title": "Statestore" - }, - "name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Name of the team registry", - "title": "Name" - } - }, - "title": "RegistryMetadata", - "type": "object" - }, - "ToolMetadata": { - "description": "Metadata about a tool available to the agent.", - "properties": { - "tool_name": { - "description": "Name of the tool", - "title": "Tool Name", - "type": "string" - }, - "tool_description": { - "description": "Description of the tool's functionality", - "title": "Tool Description", - "type": "string" - }, - "tool_args": { - "description": "Arguments for the tool", - "title": "Tool Args", - "type": "string" - } - }, - "required": [ - "tool_name", - "tool_description", - "tool_args" - ], - "title": "ToolMetadata", - "type": "object" - } - }, - "description": "Schema for agent metadata including schema version.", - "properties": { - "schema_version": { - "description": "Version of the schema used for the agent metadata.", - "title": "Schema Version", - "type": "string" - }, - "agent": { - "$ref": "#/$defs/AgentMetadata", - "description": "Agent configuration and capabilities" - }, - "name": { - "description": "Name of the agent", - "title": "Name", - "type": "string" - }, - "registered_at": { - "description": "ISO 8601 timestamp of registration", - "title": "Registered At", - "type": "string" - }, - "pubsub": { - "anyOf": [ - { - "$ref": "#/$defs/PubSubMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/sub configuration if enabled" - }, - "memory": { - "anyOf": [ - { - "$ref": "#/$defs/MemoryMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Memory configuration if enabled" - }, - "llm": { - "anyOf": [ - { - "$ref": "#/$defs/LLMMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "LLM configuration" - }, - "registry": { - "anyOf": [ - { - "$ref": "#/$defs/RegistryMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Registry configuration" - }, - "tools": { - "anyOf": [ - { - "items": { - "$ref": "#/$defs/ToolMetadata" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Available tools", - "title": "Tools" - }, - "max_iterations": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum iterations for agent execution", - "title": "Max Iterations" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Tool choice strategy", - "title": "Tool Choice" - }, - "agent_metadata": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Additional metadata about the agent", - "title": "Agent Metadata" - } - }, - "required": [ - "schema_version", - "agent", - "name", - "registered_at" - ], - "title": "AgentMetadataSchema", - "type": "object", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "version": "0.10.6" -} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json b/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json deleted file mode 100644 index 7dea02627..000000000 --- a/ext/dapr-ext-agent-core/schemas/agent-metadata/v0.10.7.json +++ /dev/null @@ -1,2018 +0,0 @@ -{ - "$defs": { - "AgentMetadata": { - "description": "Metadata about an agent's configuration and capabilities.", - "properties": { - "appid": { - "description": "Dapr application ID of the agent", - "title": "Appid", - "type": "string" - }, - "type": { - "description": "Type of the agent (e.g., standalone, durable)", - "title": "Type", - "type": "string" - }, - "orchestrator": { - "default": false, - "description": "Indicates if the agent is an orchestrator", - "title": "Orchestrator", - "type": "boolean" - }, - "role": { - "default": "", - "description": "Role of the agent", - "title": "Role", - "type": "string" - }, - "goal": { - "default": "", - "description": "High-level objective of the agent", - "title": "Goal", - "type": "string" - }, - "name": { - "default": "", - "description": "Namememory of the agent", - "title": "Name", - "type": "string" - }, - "instructions": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Instructions for the agent", - "title": "Instructions" - }, - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr state store component name used by the agent", - "title": "Statestore" - }, - "system_prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "System prompt guiding the agent's behavior", - "title": "System Prompt" - } - }, - "required": [ - "appid", - "type" - ], - "title": "AgentMetadata", - "type": "object" - }, - "AzureOpenAIModelConfig": { - "properties": { - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the Azure OpenAI API", - "title": "Api Key" - }, - "azure_ad_token": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure Active Directory token for authentication", - "title": "Azure Ad Token" - }, - "organization": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure organization associated with the OpenAI resource", - "title": "Organization" - }, - "project": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure project associated with the OpenAI resource", - "title": "Project" - }, - "api_version": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "2024-07-01-preview", - "description": "API version for Azure OpenAI models", - "title": "Api Version" - }, - "azure_endpoint": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure endpoint for Azure OpenAI models", - "title": "Azure Endpoint" - }, - "azure_deployment": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure deployment for Azure OpenAI models", - "title": "Azure Deployment" - }, - "azure_client_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Client ID for Managed Identity authentication.", - "title": "Azure Client Id" - }, - "type": { - "const": "azure_openai", - "default": "azure_openai", - "description": "Type of the model, must always be 'azure_openai'", - "title": "Type", - "type": "string" - } - }, - "title": "AzureOpenAIModelConfig", - "type": "object" - }, - "HFHubChatCompletionParams": { - "description": "Specific settings for Hugging Face Hub Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The model to use for chat-completion. Can be a model ID or a URL to a deployed Inference Endpoint.", - "title": "Model" - }, - "frequency_penalty": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Penalizes new tokens based on their existing frequency in the text so far.", - "title": "Frequency Penalty" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify the likelihood of specified tokens appearing in the completion.", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities of the output tokens or not.", - "title": "Logprobs" - }, - "max_tokens": { - "anyOf": [ - { - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 100, - "description": "Maximum number of tokens allowed in the response.", - "title": "Max Tokens" - }, - "n": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "UNUSED. Included for compatibility.", - "title": "N" - }, - "presence_penalty": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Penalizes new tokens based on their presence in the text so far.", - "title": "Presence Penalty" - }, - "response_format": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Grammar constraints. Can be either a JSONSchema or a regex.", - "title": "Response Format" - }, - "seed": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Seed for reproducible control flow.", - "title": "Seed" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Up to four strings which trigger the end of the response.", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Enable realtime streaming of responses.", - "title": "Stream" - }, - "stream_options": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Options for streaming completions.", - "title": "Stream Options" - }, - "temperature": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Controls randomness of the generations.", - "title": "Temperature" - }, - "top_logprobs": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of most likely tokens to return at each position.", - "title": "Top Logprobs" - }, - "top_p": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Fraction of the most likely next words to sample from.", - "title": "Top P" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "The tool to use for the completion. Defaults to 'auto'.", - "title": "Tool Choice" - }, - "tool_prompt": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "A prompt to be appended before the tools.", - "title": "Tool Prompt" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "A list of tools the model may call.", - "title": "Tools" - } - }, - "title": "HFHubChatCompletionParams", - "type": "object" - }, - "HFHubModelConfig": { - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Model ID on Hugging Face Hub or a URL to a deployed endpoint. If not set, a recommended model may be chosen by your wrapper.", - "title": "Model" - }, - "hf_provider": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "auto", - "description": "Inference provider to use. Defaults to automatic selection based on available providers. Ignored if a custom endpoint URL is provided.", - "title": "Hf Provider" - }, - "token": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Hugging Face access token for authentication. If None, uses the locally saved token. Set to False to skip sending a token. Mutually exclusive with api_key.", - "title": "Token" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Alias for token. Use only one of token or api_key.", - "title": "Api Key" - }, - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Custom endpoint URL for inference. Used for private deployments or TGI endpoints. Cannot be set if 'model' is a Hub ID.", - "title": "Base Url" - }, - "timeout": { - "anyOf": [ - { - "type": "number" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum seconds to wait for a response. If None, waits indefinitely. Useful for slow model loading.", - "title": "Timeout" - }, - "headers": { - "anyOf": [ - { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "description": "Extra HTTP headers to send with requests. Overrides defaults like authorization and user-agent.", - "title": "Headers" - }, - "cookies": { - "anyOf": [ - { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "description": "Extra cookies to send with requests.", - "title": "Cookies" - }, - "proxies": { - "anyOf": [ - {}, - { - "type": "null" - } - ], - "default": null, - "description": "Proxy settings for HTTP requests. Use standard requests format.", - "title": "Proxies" - }, - "bill_to": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Billing account for requests. Only used for enterprise/organization billing.", - "title": "Bill To" - }, - "type": { - "const": "huggingface", - "default": "huggingface", - "description": "Type of the model, must always be 'huggingface'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the model available through Hugging Face", - "title": "Name", - "type": "string" - } - }, - "title": "HFHubModelConfig", - "type": "object" - }, - "LLMMetadata": { - "description": "LLM configuration information.", - "properties": { - "client": { - "description": "LLM client used by the agent", - "title": "Client", - "type": "string" - }, - "provider": { - "description": "LLM provider used by the agent", - "title": "Provider", - "type": "string" - }, - "api": { - "default": "unknown", - "description": "API type used by the LLM client", - "title": "Api", - "type": "string" - }, - "model": { - "default": "unknown", - "description": "Model name or identifier", - "title": "Model", - "type": "string" - }, - "component_name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr component name for the LLM client", - "title": "Component Name" - }, - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Base URL for the LLM API if applicable", - "title": "Base Url" - }, - "azure_endpoint": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure endpoint if using Azure OpenAI", - "title": "Azure Endpoint" - }, - "azure_deployment": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Azure deployment name if using Azure OpenAI", - "title": "Azure Deployment" - }, - "prompt_template": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Prompt template used by the agent", - "title": "Prompt Template" - }, - "prompty": { - "anyOf": [ - { - "$ref": "#/$defs/Prompty" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Prompty template name if used" - } - }, - "required": [ - "client", - "provider" - ], - "title": "LLMMetadata", - "type": "object" - }, - "MemoryMetadata": { - "description": "Memory configuration information.", - "properties": { - "type": { - "description": "Type of memory used by the agent", - "title": "Type", - "type": "string" - }, - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Dapr state store component name for memory", - "title": "Statestore" - }, - "session_id": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Default session ID for the agent's memory", - "title": "Session Id" - } - }, - "required": [ - "type" - ], - "title": "MemoryMetadata", - "type": "object" - }, - "NVIDIAChatCompletionParams": { - "description": "Specific settings for the Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify likelihood of specified tokens", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities", - "title": "Logprobs" - }, - "top_logprobs": { - "anyOf": [ - { - "maximum": 20, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of top log probabilities to return", - "title": "Top Logprobs" - }, - "n": { - "anyOf": [ - { - "maximum": 128, - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 1, - "description": "Number of chat completion choices to generate", - "title": "N" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "maxItems": 64, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "List of tools the model may call", - "title": "Tools" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which tool is called", - "title": "Tool Choice" - } - }, - "title": "NVIDIAChatCompletionParams", - "type": "object" - }, - "NVIDIAModelConfig": { - "properties": { - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "https://integrate.api.nvidia.com/v1", - "description": "Base URL for the NVIDIA API", - "title": "Base Url" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the NVIDIA API", - "title": "Api Key" - }, - "type": { - "const": "nvidia", - "default": "nvidia", - "description": "Type of the model, must always be 'nvidia'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the model available through NVIDIA", - "title": "Name", - "type": "string" - } - }, - "title": "NVIDIAModelConfig", - "type": "object" - }, - "OpenAIChatCompletionParams": { - "description": "Specific settings for the Chat Completion endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "logit_bias": { - "anyOf": [ - { - "additionalProperties": { - "type": "number" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Modify likelihood of specified tokens", - "title": "Logit Bias" - }, - "logprobs": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to return log probabilities", - "title": "Logprobs" - }, - "top_logprobs": { - "anyOf": [ - { - "maximum": 20, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of top log probabilities to return", - "title": "Top Logprobs" - }, - "n": { - "anyOf": [ - { - "maximum": 128, - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": 1, - "description": "Number of chat completion choices to generate", - "title": "N" - }, - "response_format": { - "anyOf": [ - { - "additionalProperties": { - "enum": [ - "text", - "json_object" - ], - "type": "string" - }, - "propertyNames": { - "const": "type" - }, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Format of the response", - "title": "Response Format" - }, - "tools": { - "anyOf": [ - { - "items": { - "additionalProperties": true, - "type": "object" - }, - "maxItems": 64, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "List of tools the model may call", - "title": "Tools" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which tool is called", - "title": "Tool Choice" - }, - "function_call": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Controls which function is called", - "title": "Function Call" - }, - "seed": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Seed for deterministic sampling", - "title": "Seed" - }, - "user": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Unique identifier representing the end-user", - "title": "User" - } - }, - "title": "OpenAIChatCompletionParams", - "type": "object" - }, - "OpenAIModelConfig": { - "properties": { - "base_url": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Base URL for the OpenAI API", - "title": "Base Url" - }, - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key to authenticate the OpenAI API", - "title": "Api Key" - }, - "organization": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Organization name for OpenAI", - "title": "Organization" - }, - "project": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "OpenAI project name.", - "title": "Project" - }, - "type": { - "const": "openai", - "default": "openai", - "description": "Type of the model, must always be 'openai'", - "title": "Type", - "type": "string" - }, - "name": { - "default": "", - "description": "Name of the OpenAI model", - "title": "Name", - "type": "string" - } - }, - "title": "OpenAIModelConfig", - "type": "object" - }, - "OpenAITextCompletionParams": { - "description": "Specific configs for the text completions endpoint.", - "properties": { - "model": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "ID of the model to use", - "title": "Model" - }, - "temperature": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0, - "description": "Sampling temperature", - "title": "Temperature" - }, - "max_tokens": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum number of tokens to generate. Can be None or a positive integer.", - "title": "Max Tokens" - }, - "top_p": { - "anyOf": [ - { - "maximum": 1.0, - "minimum": 0.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 1.0, - "description": "Nucleus sampling probability mass", - "title": "Top P" - }, - "frequency_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Frequency penalty", - "title": "Frequency Penalty" - }, - "presence_penalty": { - "anyOf": [ - { - "maximum": 2.0, - "minimum": -2.0, - "type": "number" - }, - { - "type": "null" - } - ], - "default": 0.0, - "description": "Presence penalty", - "title": "Presence Penalty" - }, - "stop": { - "anyOf": [ - { - "type": "string" - }, - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Stop sequences", - "title": "Stop" - }, - "stream": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to stream responses", - "title": "Stream" - }, - "best_of": { - "anyOf": [ - { - "minimum": 1, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Number of best completions to generate", - "title": "Best Of" - }, - "echo": { - "anyOf": [ - { - "type": "boolean" - }, - { - "type": "null" - } - ], - "default": false, - "description": "Whether to echo the prompt", - "title": "Echo" - }, - "logprobs": { - "anyOf": [ - { - "maximum": 5, - "minimum": 0, - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Include log probabilities", - "title": "Logprobs" - }, - "suffix": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Suffix to append to the prompt", - "title": "Suffix" - } - }, - "title": "OpenAITextCompletionParams", - "type": "object" - }, - "Prompty": { - "description": "A class to handle loading and formatting of Prompty templates for language models workflows.", - "properties": { - "name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "", - "description": "Name of the Prompty file.", - "title": "Name" - }, - "description": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "", - "description": "Description of the Prompty file.", - "title": "Description" - }, - "version": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": "1.0", - "description": "Version of the Prompty.", - "title": "Version" - }, - "authors": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": [], - "description": "List of authors for the Prompty.", - "title": "Authors" - }, - "tags": { - "anyOf": [ - { - "items": { - "type": "string" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": [], - "description": "Tags to categorize the Prompty.", - "title": "Tags" - }, - "model": { - "$ref": "#/$defs/PromptyModelConfig", - "description": "Model configuration. Can be either OpenAI or Azure OpenAI." - }, - "inputs": { - "additionalProperties": true, - "default": {}, - "description": "Input parameters for the Prompty. These define the expected inputs.", - "title": "Inputs", - "type": "object" - }, - "sample": { - "anyOf": [ - { - "type": "string" - }, - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Sample input or the path to a sample file for testing the Prompty.", - "title": "Sample" - }, - "outputs": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": {}, - "description": "Optional outputs for the Prompty. Defines expected output format.", - "title": "Outputs" - }, - "content": { - "description": "The prompt messages defined in the Prompty file.", - "title": "Content", - "type": "string" - } - }, - "required": [ - "model", - "content" - ], - "title": "Prompty", - "type": "object" - }, - "PromptyModelConfig": { - "properties": { - "api": { - "default": "chat", - "description": "The API to use, either 'chat' or 'completion'", - "enum": [ - "chat", - "completion" - ], - "title": "Api", - "type": "string" - }, - "configuration": { - "anyOf": [ - { - "$ref": "#/$defs/OpenAIModelConfig" - }, - { - "$ref": "#/$defs/AzureOpenAIModelConfig" - }, - { - "$ref": "#/$defs/HFHubModelConfig" - }, - { - "$ref": "#/$defs/NVIDIAModelConfig" - } - ], - "description": "Model configuration settings", - "title": "Configuration" - }, - "parameters": { - "anyOf": [ - { - "$ref": "#/$defs/OpenAITextCompletionParams" - }, - { - "$ref": "#/$defs/OpenAIChatCompletionParams" - }, - { - "$ref": "#/$defs/HFHubChatCompletionParams" - }, - { - "$ref": "#/$defs/NVIDIAChatCompletionParams" - } - ], - "description": "Parameters for the model request", - "title": "Parameters" - }, - "response": { - "default": "first", - "description": "Determines if full response or just the first one is returned", - "enum": [ - "first", - "full" - ], - "title": "Response", - "type": "string" - } - }, - "required": [ - "configuration", - "parameters" - ], - "title": "PromptyModelConfig", - "type": "object" - }, - "PubSubMetadata": { - "description": "Pub/Sub configuration information.", - "properties": { - "agent_name": { - "description": "Pub/Sub topic the agent subscribes to", - "title": "Agent Name", - "type": "string" - }, - "name": { - "description": "Pub/Sub component name", - "title": "Name", - "type": "string" - }, - "broadcast_topic": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/Sub topic for broadcasting messages", - "title": "Broadcast Topic" - }, - "agent_topic": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/Sub topic for direct agent messages", - "title": "Agent Topic" - } - }, - "required": [ - "agent_name", - "name" - ], - "title": "PubSubMetadata", - "type": "object" - }, - "RegistryMetadata": { - "description": "Registry configuration information.", - "properties": { - "statestore": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Name of the statestore component for the registry", - "title": "Statestore" - }, - "name": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Name of the team registry", - "title": "Name" - } - }, - "title": "RegistryMetadata", - "type": "object" - }, - "ToolMetadata": { - "description": "Metadata about a tool available to the agent.", - "properties": { - "tool_name": { - "description": "Name of the tool", - "title": "Tool Name", - "type": "string" - }, - "tool_description": { - "description": "Description of the tool's functionality", - "title": "Tool Description", - "type": "string" - }, - "tool_args": { - "description": "Arguments for the tool", - "title": "Tool Args", - "type": "string" - } - }, - "required": [ - "tool_name", - "tool_description", - "tool_args" - ], - "title": "ToolMetadata", - "type": "object" - } - }, - "description": "Schema for agent metadata including schema version.", - "properties": { - "schema_version": { - "description": "Version of the schema used for the agent metadata.", - "title": "Schema Version", - "type": "string" - }, - "agent": { - "$ref": "#/$defs/AgentMetadata", - "description": "Agent configuration and capabilities" - }, - "name": { - "description": "Name of the agent", - "title": "Name", - "type": "string" - }, - "registered_at": { - "description": "ISO 8601 timestamp of registration", - "title": "Registered At", - "type": "string" - }, - "pubsub": { - "anyOf": [ - { - "$ref": "#/$defs/PubSubMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Pub/sub configuration if enabled" - }, - "memory": { - "anyOf": [ - { - "$ref": "#/$defs/MemoryMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Memory configuration if enabled" - }, - "llm": { - "anyOf": [ - { - "$ref": "#/$defs/LLMMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "LLM configuration" - }, - "registry": { - "anyOf": [ - { - "$ref": "#/$defs/RegistryMetadata" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Registry configuration" - }, - "tools": { - "anyOf": [ - { - "items": { - "$ref": "#/$defs/ToolMetadata" - }, - "type": "array" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Available tools", - "title": "Tools" - }, - "max_iterations": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Maximum iterations for agent execution", - "title": "Max Iterations" - }, - "tool_choice": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Tool choice strategy", - "title": "Tool Choice" - }, - "agent_metadata": { - "anyOf": [ - { - "additionalProperties": true, - "type": "object" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Additional metadata about the agent", - "title": "Agent Metadata" - } - }, - "required": [ - "schema_version", - "agent", - "name", - "registered_at" - ], - "title": "AgentMetadataSchema", - "type": "object", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "version": "0.10.7" -} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/LICENSE b/ext/dapr-ext-agent_core/LICENSE similarity index 100% rename from ext/dapr-ext-agent-core/LICENSE rename to ext/dapr-ext-agent_core/LICENSE diff --git a/ext/dapr-ext-agent-core/README.rst b/ext/dapr-ext-agent_core/README.rst similarity index 100% rename from ext/dapr-ext-agent-core/README.rst rename to ext/dapr-ext-agent_core/README.rst diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/__init__.py rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/__init__.py rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/dapr_agents.py rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/mapping/langgraph.py rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/metadata.py rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/py.typed b/ext/dapr-ext-agent_core/dapr/ext/agent_core/py.typed similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/py.typed rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/py.typed diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/types.py rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py diff --git a/ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/version.py similarity index 100% rename from ext/dapr-ext-agent-core/dapr/ext/agent_core/version.py rename to ext/dapr-ext-agent_core/dapr/ext/agent_core/version.py diff --git a/ext/dapr-ext-agent_core/schemas/agent-metadata/index.json b/ext/dapr-ext-agent_core/schemas/agent-metadata/index.json new file mode 100644 index 000000000..aa27e2363 --- /dev/null +++ b/ext/dapr-ext-agent_core/schemas/agent-metadata/index.json @@ -0,0 +1,7 @@ +{ + "current_version": "1.17.0.dev", + "schema_url": "https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json", + "available_versions": [ + "v1.17.0.dev" + ] +} \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/schemas/agent-metadata/latest.json b/ext/dapr-ext-agent_core/schemas/agent-metadata/latest.json new file mode 100644 index 000000000..2e56fd9a6 --- /dev/null +++ b/ext/dapr-ext-agent_core/schemas/agent-metadata/latest.json @@ -0,0 +1,461 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "1.17.0.dev" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json b/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json new file mode 100644 index 000000000..2e56fd9a6 --- /dev/null +++ b/ext/dapr-ext-agent_core/schemas/agent-metadata/v1.17.0.dev.json @@ -0,0 +1,461 @@ +{ + "$defs": { + "AgentMetadata": { + "description": "Metadata about an agent's configuration and capabilities.", + "properties": { + "appid": { + "description": "Dapr application ID of the agent", + "title": "Appid", + "type": "string" + }, + "type": { + "description": "Type of the agent (e.g., standalone, durable)", + "title": "Type", + "type": "string" + }, + "orchestrator": { + "default": false, + "description": "Indicates if the agent is an orchestrator", + "title": "Orchestrator", + "type": "boolean" + }, + "role": { + "default": "", + "description": "Role of the agent", + "title": "Role", + "type": "string" + }, + "goal": { + "default": "", + "description": "High-level objective of the agent", + "title": "Goal", + "type": "string" + }, + "instructions": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Instructions for the agent", + "title": "Instructions" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name used by the agent", + "title": "Statestore" + }, + "system_prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "System prompt guiding the agent's behavior", + "title": "System Prompt" + } + }, + "required": [ + "appid", + "type" + ], + "title": "AgentMetadata", + "type": "object" + }, + "LLMMetadata": { + "description": "LLM configuration information.", + "properties": { + "client": { + "description": "LLM client used by the agent", + "title": "Client", + "type": "string" + }, + "provider": { + "description": "LLM provider used by the agent", + "title": "Provider", + "type": "string" + }, + "api": { + "default": "unknown", + "description": "API type used by the LLM client", + "title": "Api", + "type": "string" + }, + "model": { + "default": "unknown", + "description": "Model name or identifier", + "title": "Model", + "type": "string" + }, + "component_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr component name for the LLM client", + "title": "Component Name" + }, + "base_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Base URL for the LLM API if applicable", + "title": "Base Url" + }, + "azure_endpoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure endpoint if using Azure OpenAI", + "title": "Azure Endpoint" + }, + "azure_deployment": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Azure deployment name if using Azure OpenAI", + "title": "Azure Deployment" + }, + "prompt_template": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Prompt template used by the agent", + "title": "Prompt Template" + } + }, + "required": [ + "client", + "provider" + ], + "title": "LLMMetadata", + "type": "object" + }, + "MemoryMetadata": { + "description": "Memory configuration information.", + "properties": { + "type": { + "description": "Type of memory used by the agent", + "title": "Type", + "type": "string" + }, + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Dapr state store component name for memory", + "title": "Statestore" + }, + "session_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Default session ID for the agent's memory", + "title": "Session Id" + } + }, + "required": [ + "type" + ], + "title": "MemoryMetadata", + "type": "object" + }, + "PubSubMetadata": { + "description": "Pub/Sub configuration information.", + "properties": { + "name": { + "description": "Pub/Sub component name", + "title": "Name", + "type": "string" + }, + "broadcast_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for broadcasting messages", + "title": "Broadcast Topic" + }, + "agent_topic": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/Sub topic for direct agent messages", + "title": "Agent Topic" + } + }, + "required": [ + "name" + ], + "title": "PubSubMetadata", + "type": "object" + }, + "RegistryMetadata": { + "description": "Registry configuration information.", + "properties": { + "statestore": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the statestore component for the registry", + "title": "Statestore" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Name of the team registry", + "title": "Name" + } + }, + "title": "RegistryMetadata", + "type": "object" + }, + "ToolMetadata": { + "description": "Metadata about a tool available to the agent.", + "properties": { + "tool_name": { + "description": "Name of the tool", + "title": "Tool Name", + "type": "string" + }, + "tool_description": { + "description": "Description of the tool's functionality", + "title": "Tool Description", + "type": "string" + }, + "tool_args": { + "description": "Arguments for the tool", + "title": "Tool Args", + "type": "string" + } + }, + "required": [ + "tool_name", + "tool_description", + "tool_args" + ], + "title": "ToolMetadata", + "type": "object" + } + }, + "description": "Schema for agent metadata including schema version.", + "properties": { + "schema_version": { + "description": "Version of the schema used for the agent metadata.", + "title": "Schema Version", + "type": "string" + }, + "agent": { + "$ref": "#/$defs/AgentMetadata", + "description": "Agent configuration and capabilities" + }, + "name": { + "description": "Name of the agent", + "title": "Name", + "type": "string" + }, + "registered_at": { + "description": "ISO 8601 timestamp of registration", + "title": "Registered At", + "type": "string" + }, + "pubsub": { + "anyOf": [ + { + "$ref": "#/$defs/PubSubMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Pub/sub configuration if enabled" + }, + "memory": { + "anyOf": [ + { + "$ref": "#/$defs/MemoryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Memory configuration if enabled" + }, + "llm": { + "anyOf": [ + { + "$ref": "#/$defs/LLMMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "LLM configuration" + }, + "registry": { + "anyOf": [ + { + "$ref": "#/$defs/RegistryMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Registry configuration" + }, + "tools": { + "anyOf": [ + { + "items": { + "$ref": "#/$defs/ToolMetadata" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Available tools", + "title": "Tools" + }, + "max_iterations": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Maximum iterations for agent execution", + "title": "Max Iterations" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Tool choice strategy", + "title": "Tool Choice" + }, + "agent_metadata": { + "anyOf": [ + { + "additionalProperties": true, + "type": "object" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Additional metadata about the agent", + "title": "Agent Metadata" + } + }, + "required": [ + "schema_version", + "agent", + "name", + "registered_at" + ], + "title": "AgentMetadataSchema", + "type": "object", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "version": "1.17.0.dev" +} \ No newline at end of file diff --git a/ext/dapr-ext-agent-core/scripts/generate_schema.py b/ext/dapr-ext-agent_core/scripts/generate_schema.py similarity index 96% rename from ext/dapr-ext-agent-core/scripts/generate_schema.py rename to ext/dapr-ext-agent_core/scripts/generate_schema.py index 127595761..a6e0cce6e 100644 --- a/ext/dapr-ext-agent-core/scripts/generate_schema.py +++ b/ext/dapr-ext-agent_core/scripts/generate_schema.py @@ -10,7 +10,7 @@ def get_auto_version() -> str: """Get current package version automatically.""" try: - return version("dapr-agents") + return version("dapr-ext-agent_core") except PackageNotFoundError: return "0.0.0.dev0" @@ -47,7 +47,7 @@ def generate_schema(output_dir: Path, schema_version: Optional[str] = None): # Write index with all versions index: dict[Any, Any] = { "current_version": current_version, - "schema_url": f"https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent-core/schemas/agent-metadata/v{current_version}.json", + "schema_url": f"https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent_core/schemas/agent-metadata/v{current_version}.json", "available_versions": sorted( [f.stem for f in schema_dir.glob("v*.json")], reverse=True ), diff --git a/ext/dapr-ext-agent-core/setup.cfg b/ext/dapr-ext-agent_core/setup.cfg similarity index 94% rename from ext/dapr-ext-agent-core/setup.cfg rename to ext/dapr-ext-agent_core/setup.cfg index ba1e96e18..e0c746814 100644 --- a/ext/dapr-ext-agent-core/setup.cfg +++ b/ext/dapr-ext-agent_core/setup.cfg @@ -19,12 +19,13 @@ project_urls = Source = https://github.com/dapr/python-sdk [options] -python_requires = >=3.10 +python_requires = >=3.11 packages = find_namespace: include_package_data = True install_requires = dapr >= 1.17.0.dev dapr-agents >= 0.10.7 + pydantic >= 2.12.5 [options.packages.find] include = diff --git a/ext/dapr-ext-agent-core/setup.py b/ext/dapr-ext-agent_core/setup.py similarity index 100% rename from ext/dapr-ext-agent-core/setup.py rename to ext/dapr-ext-agent_core/setup.py From cacefae871336247e9a3c7dac41ed82d957e681a Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Mon, 2 Feb 2026 16:28:07 +0100 Subject: [PATCH 04/13] feat: support extracting metadata from langgraph Signed-off-by: Casper Nielsen --- .../dapr/ext/agent_core/mapping/langgraph.py | 120 ++++++++++++++---- .../dapr/ext/agent_core/metadata.py | 2 +- ext/dapr-ext-agent_core/setup.cfg | 1 + 3 files changed, 95 insertions(+), 28 deletions(-) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py index 55d34f9cc..478b4a342 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py @@ -1,9 +1,9 @@ from datetime import datetime, timezone -import json import logging from typing import TYPE_CHECKING, Any, Dict, Optional from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata +from langgraph.pregel._read import PregelNode if TYPE_CHECKING: from dapr.ext.langgraph import DaprCheckpointer @@ -14,19 +14,86 @@ class LangGraphMapper: def __init__(self) -> None: pass - def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + def _extract_provider(self, module_name: str) -> str: + """Extract provider name from module path.""" + module_lower = module_name.lower() + if 'openai' in module_lower and 'azure' not in module_lower: + return 'openai' + elif 'azure' in module_lower: + return 'azure_openai' + elif 'anthropic' in module_lower: + return 'anthropic' + elif 'ollama' in module_lower: + return 'ollama' + elif 'google' in module_lower or 'gemini' in module_lower: + return 'google' + elif 'cohere' in module_lower: + return 'cohere' + return 'unknown' - logger.info(f"LangGraph log vars: {vars(agent)}") - print(f"LangGraph print vars: {vars(agent)}") - logger.info(f"LangGraph log dir: {dir(agent)}") - print(f"LangGraph print dir: {dir(agent)}") + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: introspected_vars: Dict[str, Any] = vars(agent) # type: ignore - introspected_dir = dir(agent) + + nodes: Dict[str, object] = introspected_vars.get("nodes", {}) # type: ignore + + tools: list[Dict[str, Any]] = [] + llm_metadata: Optional[Dict[str, Any]] = None + system_prompt: Optional[str] = None + + for node_name, obj in nodes.items(): # type: ignore + if node_name == "__start__": + # We don't want to process the start node + continue + + if isinstance(obj, PregelNode): + node_vars = vars(obj) + if "bound" in node_vars.keys(): + bound = node_vars["bound"] + + # Check if it's a ToolNode + if hasattr(bound, "_tools_by_name"): + tools_by_name = getattr(bound, "_tools_by_name", {}) + tools.extend([ + { + "name": name, + "description": getattr(tool, "description", ""), + "args_schema": getattr(tool, "args_schema", {}), # TODO: See if we can extract the pydantic model + } + for name, tool in tools_by_name.items() + ]) + + # Check if it's an assistant RunnableCallable + elif type(bound).__name__ == "RunnableCallable": + logger.info(f"Node '{node_name}' is a RunnableCallable") + + func = getattr(bound, "func", None) + if func and hasattr(func, "__globals__"): + func_globals = func.__globals__ + + for _, global_value in func_globals.items(): + var_type = type(global_value).__name__ + var_module = type(global_value).__module__ + + if 'chat' in var_type.lower(): + model = getattr(global_value, 'model_name', None) or \ + getattr(global_value, 'model', None) + + if model and not llm_metadata: + llm_metadata = { + 'client': var_type, + 'provider': self._extract_provider(var_module), + 'model': model, + 'base_url': getattr(global_value, 'base_url', None), + } + + # Look for system message + elif var_type == 'SystemMessage': + content = getattr(global_value, 'content', None) + if content and not system_prompt: + system_prompt = content checkpointer: Optional["DaprCheckpointer"] = introspected_vars.get("checkpointer", None) # type: ignore - tools = introspected_vars.get("tools", []) # type: ignore - print(f"LangGraph tools: {tools}") return AgentMetadataSchema( schema_version=schema_version, @@ -34,10 +101,10 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc appid="", type=type(agent).__name__, orchestrator=False, - role="", - goal="", + role="Assistant", + goal=system_prompt or "", instructions=[], - statestore=checkpointer.store_name if checkpointer else None, + statestore=checkpointer.store_name if checkpointer else None, # type: ignore system_prompt="", ), name=agent.get_name() if hasattr(agent, "get_name") else "", @@ -50,17 +117,17 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc memory=MemoryMetadata( type="DaprCheckpointer", session_id=None, - statestore=checkpointer.store_name if checkpointer else None, + statestore=checkpointer.store_name if checkpointer else None, # type: ignore ), llm=LLMMetadata( - client="", - provider="unknown", - api="unknown", - model="unknown", + client=llm_metadata.get('client', '') if llm_metadata else '', + provider=llm_metadata.get('provider', 'unknown') if llm_metadata else 'unknown', + api="chat", + model=llm_metadata.get('model', 'unknown') if llm_metadata else 'unknown', component_name=None, - base_url=None, - azure_endpoint=None, - azure_deployment=None, + base_url=llm_metadata.get('base_url') if llm_metadata else None, + azure_endpoint=llm_metadata.get('azure_endpoint') if llm_metadata else None, + azure_deployment=llm_metadata.get('azure_deployment') if llm_metadata else None, prompt_template=None, ), registry=RegistryMetadata( @@ -69,14 +136,13 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc ), tools=[ ToolMetadata( - tool_name="", - tool_description="", - tool_args=json.dumps({}) - if hasattr(tool, "args_schema") else "{}", + tool_name=tool.get("name", ""), + tool_description=tool.get("description", ""), + tool_args="", ) - for tool in getattr(agent, "tools", []) + for tool in tools ], - max_iterations=None, - tool_choice=None, + max_iterations=1, + tool_choice="auto", agent_metadata=None, ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py index c2df9e88d..7b6017292 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py @@ -262,7 +262,7 @@ def _ensure_registry_initialized(self, *, key: str, meta: Dict[str, str]) -> Non key: Registry document key. meta: Dapr state metadata to use for the operation. """ - current, etag = self.registry_state.load_with_etag( # type: ignore[union-attr] + _, etag = self.registry_state.load_with_etag( # type: ignore[union-attr] key=key, default={}, state_metadata=meta, diff --git a/ext/dapr-ext-agent_core/setup.cfg b/ext/dapr-ext-agent_core/setup.cfg index e0c746814..bdd5fc193 100644 --- a/ext/dapr-ext-agent_core/setup.cfg +++ b/ext/dapr-ext-agent_core/setup.cfg @@ -26,6 +26,7 @@ install_requires = dapr >= 1.17.0.dev dapr-agents >= 0.10.7 pydantic >= 2.12.5 + langgraph >= 0.3.6 [options.packages.find] include = From 7ade3b1bf6056ad602246e5af7835928fb65c342 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Mon, 2 Feb 2026 16:54:39 +0100 Subject: [PATCH 05/13] feat: extract agent from gc references to automatically infer the agent object instead of passing Signed-off-by: Casper Nielsen --- examples/langgraph-checkpointer/agent.py | 1 - .../dapr/ext/agent_core/__init__.py | 3 + .../dapr/ext/agent_core/introspection.py | 75 +++++++++++++++++++ .../dapr/ext/agent_core/metadata.py | 24 ++++++ .../dapr/ext/langgraph/dapr_checkpointer.py | 15 ++-- 5 files changed, 108 insertions(+), 10 deletions(-) create mode 100644 ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py diff --git a/examples/langgraph-checkpointer/agent.py b/examples/langgraph-checkpointer/agent.py index 9f4c90006..8ea98ae82 100644 --- a/examples/langgraph-checkpointer/agent.py +++ b/examples/langgraph-checkpointer/agent.py @@ -52,7 +52,6 @@ def assistant(state: MessagesState): memory = DaprCheckpointer(store_name='statestore', key_prefix='dapr') react_graph_memory = builder.compile(checkpointer=memory) -memory.set_agent(react_graph_memory) config = {'configurable': {'thread_id': '1'}} diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py index e2039e296..bcfb6fccb 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py @@ -1,5 +1,6 @@ from .types import SupportedFrameworks, AgentMetadataSchema, AgentMetadata, LLMMetadata, PubSubMetadata, ToolMetadata, RegistryMetadata, MemoryMetadata from .metadata import AgentRegistryAdapter +from .introspection import find_agent_in_stack, detect_framework __all__ = [ "SupportedFrameworks", @@ -11,4 +12,6 @@ "RegistryMetadata", "MemoryMetadata", "AgentRegistryAdapter", + "find_agent_in_stack", + "detect_framework", ] \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py new file mode 100644 index 000000000..45c5f764c --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py @@ -0,0 +1,75 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import gc +import inspect +from typing import Any, Optional + + +def find_agent_in_stack() -> Optional[Any]: + """ + Walk up the call stack to find an agent/graph object. + + Currently supports: + - LangGraph: CompiledStateGraph or SyncPregelLoop + + Returns: + The agent/graph object if found, None otherwise. + """ + # First, try to find in the stack + for frame_info in inspect.stack(): + frame_locals = frame_info.frame.f_locals + + # Look for 'self' in frame locals + if 'self' in frame_locals: + obj = frame_locals['self'] + obj_type = type(obj).__name__ + + # LangGraph support - CompiledStateGraph + if obj_type == 'CompiledStateGraph': + return obj + + # If we found a checkpointer, use gc to find the graph that references it + if obj_type == 'DaprCheckpointer': + # Use garbage collector to find objects referencing this checkpointer + referrers = gc.get_referrers(obj) + for ref in referrers: + ref_type = type(ref).__name__ + if ref_type == 'CompiledStateGraph': + return ref + + return None + + +def detect_framework(agent: Any) -> Optional[str]: + """ + Detect the framework type from an agent object. + + Args: + agent: The agent object to inspect. + + Returns: + Framework name string if detected, None otherwise. + """ + agent_type = type(agent).__name__ + agent_module = type(agent).__module__ + + # LangGraph + if agent_type == 'CompiledStateGraph' or 'langgraph' in agent_module: + return 'langgraph' + + # Dapr Agents + if 'dapr_agents' in agent_module: + return 'dapr_agents' + + return None diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py index 7b6017292..052af8871 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py @@ -22,12 +22,36 @@ ) from dapr.ext.agent_core import SupportedFrameworks, AgentMetadataSchema +from dapr.ext.agent_core.introspection import find_agent_in_stack, detect_framework import dapr.ext.agent_core.mapping logger = logging.getLogger(__name__) class AgentRegistryAdapter: + @classmethod + def create_from_stack( + cls, registry: Optional[AgentRegistryConfig] = None + ) -> Optional['AgentRegistryAdapter']: + """ + Auto-detect and create an AgentRegistryAdapter by walking the call stack. + + Args: + registry: Optional registry configuration. If None, will attempt auto-discovery. + + Returns: + AgentRegistryAdapter instance if agent found, None otherwise. + """ + agent = find_agent_in_stack() + if not agent: + return None + + framework = detect_framework(agent) + if not framework: + return None + + return cls(registry=registry, framework=framework, agent=agent) + def __init__(self, registry: Optional[AgentRegistryConfig], framework: str, agent: Any) -> None: self._registry = registry diff --git a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py index c64e1b027..67b5cbffb 100644 --- a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py +++ b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py @@ -48,15 +48,8 @@ def __init__(self, store_name: str, key_prefix: str): self.serde = JsonPlusSerializer() self.client = DaprClient() self._key_cache: Dict[str, str] = {} - - - def set_agent(self, agent: Any) -> None: - self.registry_adapter = AgentRegistryAdapter( - registry=None, - framework='langgraph', - agent=agent, - ) - + self.registry_adapter: Optional[AgentRegistryAdapter] = None + self._registry_initialized = False # helper: construct Dapr key for a thread def _get_key(self, config: RunnableConfig) -> str: @@ -80,6 +73,10 @@ def put( metadata: CheckpointMetadata, new_versions: ChannelVersions, ) -> RunnableConfig: + if not self._registry_initialized: + self.registry_adapter = AgentRegistryAdapter.create_from_stack(registry=None) + self._registry_initialized = True + thread_id = config['configurable']['thread_id'] checkpoint_ns = config['configurable'].get('checkpoint_ns', '') config_checkpoint_id = config['configurable'].get('checkpoint_id', '') From 13bcc9dd53c50ae1c030fdac3005caad2665b4fe Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Mon, 2 Feb 2026 20:52:43 +0100 Subject: [PATCH 06/13] feat(strands): add strands metadata shim Signed-off-by: Casper Nielsen --- .../dapr/ext/agent_core/introspection.py | 9 ++ .../dapr/ext/agent_core/mapping/__init__.py | 2 + .../dapr/ext/agent_core/mapping/strands.py | 71 ++++++++++ .../dapr/ext/agent_core/metadata.py | 1 + .../dapr/ext/agent_core/types.py | 1 + .../tests/test_strands_metadata.py | 121 ++++++++++++++++++ .../dapr/ext/strands/dapr_session_manager.py | 15 +++ 7 files changed, 220 insertions(+) create mode 100644 ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py create mode 100644 ext/dapr-ext-agent_core/tests/test_strands_metadata.py diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py index 45c5f764c..560c8c91f 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py @@ -22,6 +22,7 @@ def find_agent_in_stack() -> Optional[Any]: Currently supports: - LangGraph: CompiledStateGraph or SyncPregelLoop + - Strands: DaprSessionManager Returns: The agent/graph object if found, None otherwise. @@ -39,6 +40,10 @@ def find_agent_in_stack() -> Optional[Any]: if obj_type == 'CompiledStateGraph': return obj + # Strands support - DaprSessionManager + if obj_type == 'DaprSessionManager': + return obj + # If we found a checkpointer, use gc to find the graph that references it if obj_type == 'DaprCheckpointer': # Use garbage collector to find objects referencing this checkpointer @@ -72,4 +77,8 @@ def detect_framework(agent: Any) -> Optional[str]: if 'dapr_agents' in agent_module: return 'dapr_agents' + # Strands + if agent_type == 'DaprSessionManager' or 'strands' in agent_module: + return 'strands' + return None diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py index 6aee51ba4..d3de8e13f 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py @@ -1,7 +1,9 @@ from .dapr_agents import DaprAgentsMapper from .langgraph import LangGraphMapper +from .strands import StrandsMapper __all__ = [ "DaprAgentsMapper", "LangGraphMapper", + "StrandsMapper", ] \ No newline at end of file diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py new file mode 100644 index 000000000..3376b864f --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py @@ -0,0 +1,71 @@ +from datetime import datetime, timezone +import logging +from typing import TYPE_CHECKING, Any, Optional +from dapr.ext.agent_core.types import ( + AgentMetadata, + AgentMetadataSchema, + MemoryMetadata, + RegistryMetadata, +) + +if TYPE_CHECKING: + from dapr.ext.strands import DaprSessionManager + +logger = logging.getLogger(__name__) + + +class StrandsMapper: + def __init__(self) -> None: + pass + + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + """ + Map Strands DaprSessionManager to AgentMetadataSchema. + + Args: + agent: The DaprSessionManager instance + schema_version: Version of the schema + + Returns: + AgentMetadataSchema with extracted metadata + """ + session_manager: "DaprSessionManager" = agent + + # Extract state store name + state_store_name = getattr(session_manager, '_state_store_name', None) + session_id = getattr(session_manager, '_session_id', None) + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid="", + type="Strands", + orchestrator=False, + role="Session Manager", + goal="Manages multi-agent sessions with distributed state storage", + instructions=[], + statestore=state_store_name, + system_prompt=None, + ), + name=f"strands-session-{session_id}" if session_id else "strands-session", + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=None, + memory=MemoryMetadata( + type="DaprSessionManager", + session_id=session_id, + statestore=state_store_name, + ), + llm=None, + registry=RegistryMetadata( + statestore=None, + name=None, + ), + tools=None, + max_iterations=None, + tool_choice=None, + agent_metadata={ + "framework": "strands", + "session_id": session_id, + "state_store": state_store_name, + }, + ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py index 052af8871..a9844dd41 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py @@ -125,6 +125,7 @@ def _extract_metadata(self, agent: Any) -> AgentMetadataSchema: framework_mappers = { SupportedFrameworks.DAPR_AGENTS: dapr.ext.agent_core.mapping.DaprAgentsMapper().map_agent_metadata, SupportedFrameworks.LANGGRAPH: dapr.ext.agent_core.mapping.LangGraphMapper().map_agent_metadata, + SupportedFrameworks.STRANDS: dapr.ext.agent_core.mapping.StrandsMapper().map_agent_metadata, } mapper = framework_mappers.get(self._framework) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py index 33d0b7773..4a1b5f398 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py @@ -5,6 +5,7 @@ class SupportedFrameworks(StrEnum): DAPR_AGENTS = "dapr-agents" LANGGRAPH = "langgraph" + STRANDS = "strands" class AgentMetadata(BaseModel): diff --git a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py new file mode 100644 index 000000000..b22c09176 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" +Test script to verify Strands metadata integration with agent registry. +""" + +import sys +import os + +# Add the extensions to the path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'ext/dapr-ext-agent_core')) +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'ext/dapr-ext-strands')) + +from dapr.ext.agent_core.types import SupportedFrameworks +from dapr.ext.agent_core.introspection import detect_framework +from dapr.ext.agent_core.mapping.strands import StrandsMapper + + +def test_supported_frameworks(): + """Test that STRANDS framework is in the enum.""" + print("✓ Testing SupportedFrameworks enum...") + assert hasattr(SupportedFrameworks, 'STRANDS'), "STRANDS not in SupportedFrameworks" + assert SupportedFrameworks.STRANDS.value == 'strands', "STRANDS value incorrect" + print(" ✓ STRANDS framework is supported") + return True + + +def test_mapper_instantiation(): + """Test that StrandsMapper can be instantiated.""" + print("✓ Testing StrandsMapper instantiation...") + mapper = StrandsMapper() + assert mapper is not None, "Failed to instantiate StrandsMapper" + print(" ✓ StrandsMapper instantiated successfully") + return True + + +def test_mapper_metadata_extraction(): + """Test that StrandsMapper can extract metadata from a mock object.""" + print("✓ Testing StrandsMapper metadata extraction...") + + # Create a mock DaprSessionManager-like object + class MockSessionManager: + def __init__(self): + self._state_store_name = "test-statestore" + self._session_id = "test-session-123" + + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version="1.0.0") + + assert metadata.schema_version == "1.0.0", "Schema version mismatch" + assert metadata.agent.type == "Strands", "Agent type mismatch" + assert metadata.agent.role == "Session Manager", "Agent role mismatch" + assert metadata.memory.type == "DaprSessionManager", "Memory type mismatch" + assert metadata.memory.session_id == "test-session-123", "Session ID mismatch" + assert metadata.memory.statestore == "test-statestore", "State store mismatch" + assert metadata.name == "strands-session-test-session-123", "Agent name mismatch" + + print(" ✓ Metadata extraction successful") + print(f" - Agent type: {metadata.agent.type}") + print(f" - Agent name: {metadata.name}") + print(f" - Memory type: {metadata.memory.type}") + print(f" - Session ID: {metadata.memory.session_id}") + print(f" - State store: {metadata.memory.statestore}") + return True + + +def test_framework_detection(): + """Test that detect_framework can identify a Strands object.""" + print("✓ Testing framework detection...") + + class MockSessionManager: + def __init__(self): + self._state_store_name = "test-statestore" + self._session_id = "test-session-123" + + MockSessionManager.__name__ = "DaprSessionManager" + mock_manager = MockSessionManager() + + framework = detect_framework(mock_manager) + assert framework == "strands", f"Expected 'strands', got '{framework}'" + + print(" ✓ Framework detection successful") + print(f" - Detected framework: {framework}") + return True + + +def main(): + """Run all tests.""" + print("\n" + "="*60) + print("Testing Strands Metadata Integration") + print("="*60 + "\n") + + tests = [ + test_supported_frameworks, + test_mapper_instantiation, + test_mapper_metadata_extraction, + test_framework_detection, + ] + + passed = 0 + failed = 0 + + for test in tests: + try: + if test(): + passed += 1 + except Exception as e: + print(f" ✗ Test failed: {e}") + failed += 1 + print() + + print("="*60) + print(f"Tests completed: {passed} passed, {failed} failed") + print("="*60 + "\n") + + return 0 if failed == 0 else 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py index c9a98ebdf..279d2562b 100644 --- a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py +++ b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py @@ -66,8 +66,12 @@ def __init__( self._ttl = ttl self._consistency = consistency self._owns_client = False + self._session_id = session_id super().__init__(session_id=session_id, session_repository=self) + + # Register with agent registry after initialization + self._register_with_agent_registry() @classmethod def from_address( @@ -549,3 +553,14 @@ def close(self) -> None: """Close the Dapr client if owned by this manager.""" if self._owns_client: self._dapr_client.close() + + def _register_with_agent_registry(self) -> None: + """Register this session manager with the agent registry.""" + try: + from dapr.ext.agent_core import AgentRegistryAdapter + + AgentRegistryAdapter.create_from_stack(registry=None) + except ImportError: + logger.debug("dapr-ext-agent_core not available, skipping registry registration") + except Exception as e: + logger.warning(f"Failed to register with agent registry: {e}") From 2fa0277117c2870aa4aff4bf96fd81a985dc796e Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Mon, 2 Feb 2026 21:01:27 +0100 Subject: [PATCH 07/13] test: add comprehensive test suite for langgraph and strands Signed-off-by: Casper Nielsen --- ext/dapr-ext-agent_core/tests/__init__.py | 0 .../tests/test_dapr_agents_metadata.py | 276 ++++++++++++++++++ .../tests/test_introspection.py | 101 +++++++ .../tests/test_langgraph_metadata.py | 181 ++++++++++++ .../tests/test_strands_metadata.py | 262 ++++++++++------- ext/dapr-ext-agent_core/tests/test_types.py | 258 ++++++++++++++++ 6 files changed, 966 insertions(+), 112 deletions(-) create mode 100644 ext/dapr-ext-agent_core/tests/__init__.py create mode 100644 ext/dapr-ext-agent_core/tests/test_dapr_agents_metadata.py create mode 100644 ext/dapr-ext-agent_core/tests/test_introspection.py create mode 100644 ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py create mode 100644 ext/dapr-ext-agent_core/tests/test_types.py diff --git a/ext/dapr-ext-agent_core/tests/__init__.py b/ext/dapr-ext-agent_core/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ext/dapr-ext-agent_core/tests/test_dapr_agents_metadata.py b/ext/dapr-ext-agent_core/tests/test_dapr_agents_metadata.py new file mode 100644 index 000000000..13d7861b0 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_dapr_agents_metadata.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.mapping.dapr_agents import DaprAgentsMapper + + +class MockProfile: + """Mock agent profile for testing.""" + + def __init__(self, role='assistant', goal='Help users', system_prompt='You are helpful.'): + self.role = role + self.goal = goal + self.instructions = ['Instruction 1', 'Instruction 2'] + self.system_prompt = system_prompt + + +class MockMemory: + """Mock memory for testing.""" + + def __init__(self, store_name='memory-store', session_id='session-1'): + self.store_name = store_name + self.session_id = session_id + + +class MockPubSub: + """Mock pubsub for testing.""" + + def __init__(self, pubsub_name='pubsub-component'): + self.pubsub_name = pubsub_name + self.broadcast_topic = 'broadcast' + self.agent_topic = 'agent-topic' + + +class MockLLM: + """Mock LLM for testing.""" + + def __init__(self): + self.provider = 'openai' + self.api = 'chat' + self.model = 'gpt-4' + self.component_name = None + self.base_url = None + self.azure_endpoint = None + self.azure_deployment = None + self.prompt_template = None + + +class MockTool: + """Mock tool for testing.""" + + def __init__(self, name='search', description='Search tool'): + self.name = name + self.description = description + self.args_schema = {'query': 'string'} + + +class MockRegistry: + """Mock registry for testing.""" + + def __init__(self): + self.store = MockRegistryStore() + self.team_name = 'team-alpha' + + +class MockRegistryStore: + """Mock registry store for testing.""" + + def __init__(self): + self.store_name = 'registry-store' + + +class MockExecution: + """Mock execution config for testing.""" + + def __init__(self): + self.max_iterations = 10 + self.tool_choice = 'auto' + + +class MockDaprAgent: + """Mock Dapr agent for testing.""" + + def __init__( + self, + name='test-agent', + appid='test-app', + profile=None, + memory=None, + pubsub=None, + llm=None, + tools=None, + registry=None, + execution=None, + ): + self.name = name + self.appid = appid + self.profile = profile + self.memory = memory + self.pubsub = pubsub + self.llm = llm + self.tools = tools or [] + self._registry = registry + self.execution = execution + self.agent_metadata = {'custom_key': 'custom_value'} + + +class DaprAgentsMapperTest(unittest.TestCase): + """Tests for DaprAgentsMapper metadata extraction.""" + + def test_mapper_instantiation(self): + """Test that DaprAgentsMapper can be instantiated.""" + mapper = DaprAgentsMapper() + self.assertIsNotNone(mapper) + + def test_minimal_agent_metadata_extraction(self): + """Test metadata extraction with minimal agent configuration.""" + agent = MockDaprAgent(name='minimal-agent', appid='app-1') + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.schema_version, '1.0.0') + self.assertEqual(metadata.name, 'minimal-agent') + self.assertEqual(metadata.agent.appid, 'app-1') + self.assertEqual(metadata.agent.type, 'MockDaprAgent') + + def test_full_agent_metadata_extraction(self): + """Test metadata extraction with full agent configuration.""" + agent = MockDaprAgent( + name='full-agent', + appid='app-2', + profile=MockProfile(role='coordinator', goal='Coordinate tasks'), + memory=MockMemory(store_name='mem-store', session_id='sess-1'), + pubsub=MockPubSub(pubsub_name='ps-component'), + llm=MockLLM(), + tools=[MockTool(name='tool1', description='Tool 1')], + registry=MockRegistry(), + execution=MockExecution(), + ) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + # Agent metadata + self.assertEqual(metadata.agent.role, 'coordinator') + self.assertEqual(metadata.agent.goal, 'Coordinate tasks') + self.assertEqual(metadata.agent.statestore, 'mem-store') + self.assertEqual(metadata.agent.system_prompt, 'You are helpful.') + + # Memory metadata + self.assertEqual(metadata.memory.type, 'MockMemory') + self.assertEqual(metadata.memory.statestore, 'mem-store') + self.assertEqual(metadata.memory.session_id, 'sess-1') + + # PubSub metadata + self.assertEqual(metadata.pubsub.name, 'ps-component') + self.assertEqual(metadata.pubsub.broadcast_topic, 'broadcast') + self.assertEqual(metadata.pubsub.agent_topic, 'agent-topic') + + # LLM metadata + self.assertEqual(metadata.llm.provider, 'openai') + self.assertEqual(metadata.llm.model, 'gpt-4') + + # Tools + self.assertEqual(len(metadata.tools), 1) + self.assertEqual(metadata.tools[0].tool_name, 'tool1') + + # Registry + self.assertEqual(metadata.registry.name, 'team-alpha') + self.assertEqual(metadata.registry.statestore, 'registry-store') + + # Execution + self.assertEqual(metadata.max_iterations, 10) + self.assertEqual(metadata.tool_choice, 'auto') + + def test_agent_metadata_field(self): + """Test agent_metadata field is preserved.""" + agent = MockDaprAgent() + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.agent_metadata, {'custom_key': 'custom_value'}) + + def test_profile_instructions_extraction(self): + """Test profile instructions are extracted.""" + agent = MockDaprAgent(profile=MockProfile()) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.agent.instructions, ['Instruction 1', 'Instruction 2']) + + def test_registered_at_is_set(self): + """Test registered_at timestamp is set.""" + agent = MockDaprAgent() + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registered_at) + self.assertIn('T', metadata.registered_at) + + def test_empty_tools_list(self): + """Test empty tools list is handled.""" + agent = MockDaprAgent(tools=[]) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.tools, []) + + def test_multiple_tools(self): + """Test multiple tools are extracted.""" + tools = [ + MockTool(name='tool1', description='First tool'), + MockTool(name='tool2', description='Second tool'), + ] + agent = MockDaprAgent(tools=tools) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(len(metadata.tools), 2) + tool_names = [t.tool_name for t in metadata.tools] + self.assertIn('tool1', tool_names) + self.assertIn('tool2', tool_names) + + def test_none_profile(self): + """Test handling of None profile.""" + agent = MockDaprAgent(profile=None) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.agent.role, '') + self.assertEqual(metadata.agent.goal, '') + self.assertEqual(metadata.agent.system_prompt, '') + + def test_none_memory(self): + """Test handling of None memory.""" + agent = MockDaprAgent(memory=None) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.memory.type, '') + self.assertIsNone(metadata.memory.statestore) + + def test_none_llm(self): + """Test handling of None LLM.""" + agent = MockDaprAgent(llm=None) + mapper = DaprAgentsMapper() + + metadata = mapper.map_agent_metadata(agent, schema_version='1.0.0') + + self.assertEqual(metadata.llm.client, '') + self.assertEqual(metadata.llm.provider, 'unknown') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_introspection.py b/ext/dapr-ext-agent_core/tests/test_introspection.py new file mode 100644 index 000000000..1b715a38e --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_introspection.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.introspection import detect_framework + + +class DetectFrameworkTest(unittest.TestCase): + """Tests for detect_framework function.""" + + def test_detect_langgraph_by_class_name(self): + """Test detection of LangGraph by CompiledStateGraph class name.""" + + class CompiledStateGraph: + pass + + agent = CompiledStateGraph() + result = detect_framework(agent) + self.assertEqual(result, 'langgraph') + + def test_detect_langgraph_by_module(self): + """Test detection of LangGraph by module path.""" + + class MockGraph: + pass + + MockGraph.__module__ = 'langgraph.graph.state' + agent = MockGraph() + result = detect_framework(agent) + self.assertEqual(result, 'langgraph') + + def test_detect_dapr_agents_by_module(self): + """Test detection of dapr-agents by module path.""" + + class MockAgent: + pass + + MockAgent.__module__ = 'dapr_agents.agents.base' + agent = MockAgent() + result = detect_framework(agent) + self.assertEqual(result, 'dapr_agents') + + def test_detect_strands_by_class_name(self): + """Test detection of Strands by DaprSessionManager class name.""" + + class DaprSessionManager: + pass + + agent = DaprSessionManager() + result = detect_framework(agent) + self.assertEqual(result, 'strands') + + def test_detect_strands_by_module(self): + """Test detection of Strands by module path.""" + + class MockSessionManager: + pass + + MockSessionManager.__module__ = 'strands.session.manager' + agent = MockSessionManager() + result = detect_framework(agent) + self.assertEqual(result, 'strands') + + def test_detect_unknown_framework(self): + """Test detection returns None for unknown frameworks.""" + + class UnknownAgent: + pass + + UnknownAgent.__module__ = 'some.unknown.module' + agent = UnknownAgent() + result = detect_framework(agent) + self.assertIsNone(result) + + def test_detect_builtin_object(self): + """Test detection returns None for builtin objects.""" + result = detect_framework('string') + self.assertIsNone(result) + + result = detect_framework(42) + self.assertIsNone(result) + + result = detect_framework([1, 2, 3]) + self.assertIsNone(result) + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py b/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py new file mode 100644 index 000000000..8c966ca66 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest +from unittest import mock + +from dapr.ext.agent_core.mapping.langgraph import LangGraphMapper + + +class MockCheckpointer: + """Mock DaprCheckpointer for testing.""" + + def __init__(self, store_name='test-store'): + self.store_name = store_name + + +class MockCompiledStateGraph: + """Mock CompiledStateGraph for testing.""" + + def __init__( + self, + name='test-graph', + checkpointer=None, + nodes=None, + ): + self._name = name + self.checkpointer = checkpointer + self.nodes = nodes or {} + + def get_name(self): + return self._name + + +class LangGraphMapperTest(unittest.TestCase): + """Tests for LangGraphMapper metadata extraction.""" + + def test_mapper_instantiation(self): + """Test that LangGraphMapper can be instantiated.""" + mapper = LangGraphMapper() + self.assertIsNotNone(mapper) + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_basic_metadata_extraction(self, mock_pregel_node): + """Test basic metadata extraction from a mock graph.""" + checkpointer = MockCheckpointer(store_name='my-store') + graph = MockCompiledStateGraph( + name='my-graph', + checkpointer=checkpointer, + nodes={'__start__': None}, + ) + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.schema_version, '1.0.0') + self.assertEqual(metadata.agent.type, 'MockCompiledStateGraph') + self.assertEqual(metadata.name, 'my-graph') + self.assertEqual(metadata.memory.type, 'DaprCheckpointer') + self.assertEqual(metadata.memory.statestore, 'my-store') + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_without_checkpointer(self, mock_pregel_node): + """Test metadata extraction without a checkpointer.""" + graph = MockCompiledStateGraph( + name='no-checkpointer-graph', + checkpointer=None, + nodes={}, + ) + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertIsNone(metadata.memory.statestore) + self.assertIsNone(metadata.agent.statestore) + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_agent_role_defaults(self, mock_pregel_node): + """Test agent metadata default values.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.agent.role, 'Assistant') + self.assertFalse(metadata.agent.orchestrator) + self.assertEqual(metadata.agent.appid, '') + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_llm_defaults(self, mock_pregel_node): + """Test LLM metadata defaults when no LLM is detected.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.llm.client, '') + self.assertEqual(metadata.llm.provider, 'unknown') + self.assertEqual(metadata.llm.model, 'unknown') + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_pubsub_defaults(self, mock_pregel_node): + """Test PubSub metadata defaults.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertEqual(metadata.pubsub.name, '') + self.assertIsNone(metadata.pubsub.broadcast_topic) + self.assertIsNone(metadata.pubsub.agent_topic) + + @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') + def test_metadata_registered_at_is_set(self, mock_pregel_node): + """Test registered_at timestamp is set.""" + graph = MockCompiledStateGraph(name='test') + + mapper = LangGraphMapper() + metadata = mapper.map_agent_metadata(graph, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registered_at) + self.assertIn('T', metadata.registered_at) + + +class LangGraphProviderExtractionTest(unittest.TestCase): + """Tests for LLM provider extraction.""" + + def test_extract_openai_provider(self): + """Test OpenAI provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_openai.chat'), 'openai') + + def test_extract_azure_provider(self): + """Test Azure OpenAI provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain.azure_openai'), 'azure_openai') + + def test_extract_anthropic_provider(self): + """Test Anthropic provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_anthropic.chat'), 'anthropic') + + def test_extract_ollama_provider(self): + """Test Ollama provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_ollama.llms'), 'ollama') + + def test_extract_google_provider(self): + """Test Google provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_google.genai'), 'google') + + def test_extract_gemini_provider(self): + """Test Gemini provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_gemini.chat'), 'google') + + def test_extract_cohere_provider(self): + """Test Cohere provider extraction.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('langchain_cohere.chat'), 'cohere') + + def test_extract_unknown_provider(self): + """Test unknown provider returns 'unknown'.""" + mapper = LangGraphMapper() + self.assertEqual(mapper._extract_provider('some.unknown.module'), 'unknown') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py index b22c09176..3df90c4ea 100644 --- a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py +++ b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py @@ -1,121 +1,159 @@ -#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + """ -Test script to verify Strands metadata integration with agent registry. +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ -import sys -import os - -# Add the extensions to the path -sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'ext/dapr-ext-agent_core')) -sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'ext/dapr-ext-strands')) +import unittest from dapr.ext.agent_core.types import SupportedFrameworks from dapr.ext.agent_core.introspection import detect_framework from dapr.ext.agent_core.mapping.strands import StrandsMapper -def test_supported_frameworks(): - """Test that STRANDS framework is in the enum.""" - print("✓ Testing SupportedFrameworks enum...") - assert hasattr(SupportedFrameworks, 'STRANDS'), "STRANDS not in SupportedFrameworks" - assert SupportedFrameworks.STRANDS.value == 'strands', "STRANDS value incorrect" - print(" ✓ STRANDS framework is supported") - return True - - -def test_mapper_instantiation(): - """Test that StrandsMapper can be instantiated.""" - print("✓ Testing StrandsMapper instantiation...") - mapper = StrandsMapper() - assert mapper is not None, "Failed to instantiate StrandsMapper" - print(" ✓ StrandsMapper instantiated successfully") - return True - - -def test_mapper_metadata_extraction(): - """Test that StrandsMapper can extract metadata from a mock object.""" - print("✓ Testing StrandsMapper metadata extraction...") - - # Create a mock DaprSessionManager-like object - class MockSessionManager: - def __init__(self): - self._state_store_name = "test-statestore" - self._session_id = "test-session-123" - - mock_manager = MockSessionManager() - mapper = StrandsMapper() - - metadata = mapper.map_agent_metadata(mock_manager, schema_version="1.0.0") - - assert metadata.schema_version == "1.0.0", "Schema version mismatch" - assert metadata.agent.type == "Strands", "Agent type mismatch" - assert metadata.agent.role == "Session Manager", "Agent role mismatch" - assert metadata.memory.type == "DaprSessionManager", "Memory type mismatch" - assert metadata.memory.session_id == "test-session-123", "Session ID mismatch" - assert metadata.memory.statestore == "test-statestore", "State store mismatch" - assert metadata.name == "strands-session-test-session-123", "Agent name mismatch" - - print(" ✓ Metadata extraction successful") - print(f" - Agent type: {metadata.agent.type}") - print(f" - Agent name: {metadata.name}") - print(f" - Memory type: {metadata.memory.type}") - print(f" - Session ID: {metadata.memory.session_id}") - print(f" - State store: {metadata.memory.statestore}") - return True - - -def test_framework_detection(): - """Test that detect_framework can identify a Strands object.""" - print("✓ Testing framework detection...") - - class MockSessionManager: - def __init__(self): - self._state_store_name = "test-statestore" - self._session_id = "test-session-123" - - MockSessionManager.__name__ = "DaprSessionManager" - mock_manager = MockSessionManager() - - framework = detect_framework(mock_manager) - assert framework == "strands", f"Expected 'strands', got '{framework}'" - - print(" ✓ Framework detection successful") - print(f" - Detected framework: {framework}") - return True - - -def main(): - """Run all tests.""" - print("\n" + "="*60) - print("Testing Strands Metadata Integration") - print("="*60 + "\n") - - tests = [ - test_supported_frameworks, - test_mapper_instantiation, - test_mapper_metadata_extraction, - test_framework_detection, - ] - - passed = 0 - failed = 0 - - for test in tests: - try: - if test(): - passed += 1 - except Exception as e: - print(f" ✗ Test failed: {e}") - failed += 1 - print() - - print("="*60) - print(f"Tests completed: {passed} passed, {failed} failed") - print("="*60 + "\n") - - return 0 if failed == 0 else 1 - - -if __name__ == "__main__": - sys.exit(main()) +class MockSessionManager: + """Mock DaprSessionManager for testing.""" + + def __init__(self, state_store_name='test-statestore', session_id='test-session-123'): + self._state_store_name = state_store_name + self._session_id = session_id + + +class StrandsMapperTest(unittest.TestCase): + """Tests for StrandsMapper metadata extraction.""" + + def test_mapper_instantiation(self): + """Test that StrandsMapper can be instantiated.""" + mapper = StrandsMapper() + self.assertIsNotNone(mapper) + + def test_metadata_extraction_basic(self): + """Test basic metadata extraction from a mock session manager.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.schema_version, '1.0.0') + self.assertEqual(metadata.agent.type, 'Strands') + self.assertEqual(metadata.agent.role, 'Session Manager') + self.assertEqual(metadata.agent.orchestrator, False) + self.assertEqual( + metadata.agent.goal, 'Manages multi-agent sessions with distributed state storage' + ) + + def test_metadata_memory_extraction(self): + """Test memory metadata extraction.""" + mock_manager = MockSessionManager( + state_store_name='custom-store', session_id='session-456' + ) + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.memory.type, 'DaprSessionManager') + self.assertEqual(metadata.memory.session_id, 'session-456') + self.assertEqual(metadata.memory.statestore, 'custom-store') + + def test_metadata_name_generation(self): + """Test agent name generation with session ID.""" + mock_manager = MockSessionManager(session_id='my-session') + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.name, 'strands-session-my-session') + + def test_metadata_name_without_session_id(self): + """Test agent name generation without session ID.""" + mock_manager = MockSessionManager(session_id=None) + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.name, 'strands-session') + + def test_metadata_agent_metadata_field(self): + """Test agent_metadata field contains framework info.""" + mock_manager = MockSessionManager( + state_store_name='store1', session_id='sess1' + ) + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertEqual(metadata.agent_metadata['framework'], 'strands') + self.assertEqual(metadata.agent_metadata['session_id'], 'sess1') + self.assertEqual(metadata.agent_metadata['state_store'], 'store1') + + def test_metadata_registry_defaults(self): + """Test registry metadata has correct defaults.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registry) + self.assertIsNone(metadata.registry.statestore) + self.assertIsNone(metadata.registry.name) + + def test_metadata_optional_fields_are_none(self): + """Test optional fields are None when not applicable.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertIsNone(metadata.pubsub) + self.assertIsNone(metadata.llm) + self.assertIsNone(metadata.tools) + self.assertIsNone(metadata.max_iterations) + self.assertIsNone(metadata.tool_choice) + + def test_metadata_registered_at_is_set(self): + """Test registered_at timestamp is set.""" + mock_manager = MockSessionManager() + mapper = StrandsMapper() + + metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + + self.assertIsNotNone(metadata.registered_at) + self.assertIn('T', metadata.registered_at) + + +class StrandsFrameworkDetectionTest(unittest.TestCase): + """Tests for framework detection with Strands objects.""" + + def test_detect_framework_by_class_name(self): + """Test detection by DaprSessionManager class name.""" + + class DaprSessionManager: + pass + + mock = DaprSessionManager() + framework = detect_framework(mock) + self.assertEqual(framework, 'strands') + + def test_detect_framework_by_module(self): + """Test detection by strands module path.""" + + class MockAgent: + pass + + MockAgent.__module__ = 'strands.session.manager' + mock = MockAgent() + framework = detect_framework(mock) + self.assertEqual(framework, 'strands') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_types.py b/ext/dapr-ext-agent_core/tests/test_types.py new file mode 100644 index 000000000..515f71804 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_types.py @@ -0,0 +1,258 @@ +# -*- coding: utf-8 -*- + +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest + +from dapr.ext.agent_core.types import ( + SupportedFrameworks, + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + PubSubMetadata, + ToolMetadata, + RegistryMetadata, + MemoryMetadata, +) + + +class SupportedFrameworksTest(unittest.TestCase): + """Tests for SupportedFrameworks enum.""" + + def test_dapr_agents_value(self): + """Test DAPR_AGENTS enum value.""" + self.assertEqual(SupportedFrameworks.DAPR_AGENTS.value, 'dapr-agents') + + def test_langgraph_value(self): + """Test LANGGRAPH enum value.""" + self.assertEqual(SupportedFrameworks.LANGGRAPH.value, 'langgraph') + + def test_strands_value(self): + """Test STRANDS enum value.""" + self.assertEqual(SupportedFrameworks.STRANDS.value, 'strands') + + def test_all_frameworks_present(self): + """Test all expected frameworks are present.""" + framework_values = [f.value for f in SupportedFrameworks] + self.assertIn('dapr-agents', framework_values) + self.assertIn('langgraph', framework_values) + self.assertIn('strands', framework_values) + + +class AgentMetadataTest(unittest.TestCase): + """Tests for AgentMetadata model.""" + + def test_minimal_agent_metadata(self): + """Test AgentMetadata with minimal required fields.""" + agent = AgentMetadata(appid='test-app', type='standalone') + self.assertEqual(agent.appid, 'test-app') + self.assertEqual(agent.type, 'standalone') + self.assertEqual(agent.orchestrator, False) + self.assertEqual(agent.role, '') + self.assertEqual(agent.goal, '') + + def test_full_agent_metadata(self): + """Test AgentMetadata with all fields populated.""" + agent = AgentMetadata( + appid='test-app', + type='durable', + orchestrator=True, + role='coordinator', + goal='Manage tasks', + instructions=['Step 1', 'Step 2'], + statestore='agent-store', + system_prompt='You are a helpful assistant.', + ) + self.assertEqual(agent.appid, 'test-app') + self.assertEqual(agent.type, 'durable') + self.assertTrue(agent.orchestrator) + self.assertEqual(agent.role, 'coordinator') + self.assertEqual(agent.goal, 'Manage tasks') + self.assertEqual(agent.instructions, ['Step 1', 'Step 2']) + self.assertEqual(agent.statestore, 'agent-store') + self.assertEqual(agent.system_prompt, 'You are a helpful assistant.') + + +class LLMMetadataTest(unittest.TestCase): + """Tests for LLMMetadata model.""" + + def test_minimal_llm_metadata(self): + """Test LLMMetadata with minimal required fields.""" + llm = LLMMetadata(client='OpenAI', provider='openai') + self.assertEqual(llm.client, 'OpenAI') + self.assertEqual(llm.provider, 'openai') + self.assertEqual(llm.api, 'unknown') + self.assertEqual(llm.model, 'unknown') + + def test_azure_llm_metadata(self): + """Test LLMMetadata with Azure-specific fields.""" + llm = LLMMetadata( + client='AzureOpenAI', + provider='azure_openai', + api='chat', + model='gpt-4', + azure_endpoint='https://myresource.openai.azure.com', + azure_deployment='gpt-4-deployment', + ) + self.assertEqual(llm.azure_endpoint, 'https://myresource.openai.azure.com') + self.assertEqual(llm.azure_deployment, 'gpt-4-deployment') + + +class PubSubMetadataTest(unittest.TestCase): + """Tests for PubSubMetadata model.""" + + def test_minimal_pubsub_metadata(self): + """Test PubSubMetadata with minimal required fields.""" + pubsub = PubSubMetadata(name='pubsub-component') + self.assertEqual(pubsub.name, 'pubsub-component') + self.assertIsNone(pubsub.broadcast_topic) + self.assertIsNone(pubsub.agent_topic) + + def test_full_pubsub_metadata(self): + """Test PubSubMetadata with all fields.""" + pubsub = PubSubMetadata( + name='pubsub-component', + broadcast_topic='broadcast', + agent_topic='agent-topic', + ) + self.assertEqual(pubsub.broadcast_topic, 'broadcast') + self.assertEqual(pubsub.agent_topic, 'agent-topic') + + +class ToolMetadataTest(unittest.TestCase): + """Tests for ToolMetadata model.""" + + def test_tool_metadata(self): + """Test ToolMetadata creation.""" + tool = ToolMetadata( + tool_name='search', + tool_description='Search the web', + tool_args='{"query": "string"}', + ) + self.assertEqual(tool.tool_name, 'search') + self.assertEqual(tool.tool_description, 'Search the web') + self.assertEqual(tool.tool_args, '{"query": "string"}') + + +class MemoryMetadataTest(unittest.TestCase): + """Tests for MemoryMetadata model.""" + + def test_minimal_memory_metadata(self): + """Test MemoryMetadata with minimal required fields.""" + memory = MemoryMetadata(type='DaprCheckpointer') + self.assertEqual(memory.type, 'DaprCheckpointer') + self.assertIsNone(memory.statestore) + self.assertIsNone(memory.session_id) + + def test_full_memory_metadata(self): + """Test MemoryMetadata with all fields.""" + memory = MemoryMetadata( + type='DaprSessionManager', + statestore='session-store', + session_id='session-123', + ) + self.assertEqual(memory.type, 'DaprSessionManager') + self.assertEqual(memory.statestore, 'session-store') + self.assertEqual(memory.session_id, 'session-123') + + +class RegistryMetadataTest(unittest.TestCase): + """Tests for RegistryMetadata model.""" + + def test_empty_registry_metadata(self): + """Test RegistryMetadata with defaults.""" + registry = RegistryMetadata() + self.assertIsNone(registry.statestore) + self.assertIsNone(registry.name) + + def test_full_registry_metadata(self): + """Test RegistryMetadata with all fields.""" + registry = RegistryMetadata(statestore='registry-store', name='team-alpha') + self.assertEqual(registry.statestore, 'registry-store') + self.assertEqual(registry.name, 'team-alpha') + + +class AgentMetadataSchemaTest(unittest.TestCase): + """Tests for AgentMetadataSchema model.""" + + def test_minimal_schema(self): + """Test AgentMetadataSchema with minimal required fields.""" + schema = AgentMetadataSchema( + schema_version='1.0.0', + agent=AgentMetadata(appid='test-app', type='standalone'), + name='test-agent', + registered_at='2026-01-01T00:00:00Z', + ) + self.assertEqual(schema.schema_version, '1.0.0') + self.assertEqual(schema.agent.appid, 'test-app') + self.assertEqual(schema.name, 'test-agent') + self.assertIsNone(schema.pubsub) + self.assertIsNone(schema.memory) + self.assertIsNone(schema.llm) + + def test_full_schema(self): + """Test AgentMetadataSchema with all fields.""" + schema = AgentMetadataSchema( + schema_version='1.0.0', + agent=AgentMetadata(appid='test-app', type='durable'), + name='full-agent', + registered_at='2026-01-01T00:00:00Z', + pubsub=PubSubMetadata(name='pubsub'), + memory=MemoryMetadata(type='DaprCheckpointer'), + llm=LLMMetadata(client='OpenAI', provider='openai'), + registry=RegistryMetadata(name='team-1'), + tools=[ + ToolMetadata( + tool_name='search', tool_description='Search tool', tool_args='{}' + ) + ], + max_iterations=10, + tool_choice='auto', + agent_metadata={'custom': 'data'}, + ) + self.assertEqual(schema.name, 'full-agent') + self.assertEqual(schema.pubsub.name, 'pubsub') + self.assertEqual(schema.memory.type, 'DaprCheckpointer') + self.assertEqual(schema.llm.client, 'OpenAI') + self.assertEqual(len(schema.tools), 1) + self.assertEqual(schema.max_iterations, 10) + self.assertEqual(schema.agent_metadata['custom'], 'data') + + def test_export_json_schema(self): + """Test export_json_schema method.""" + json_schema = AgentMetadataSchema.export_json_schema(version='1.0.0') + + self.assertIn('$schema', json_schema) + self.assertEqual(json_schema['$schema'], 'https://json-schema.org/draft/2020-12/schema') + self.assertEqual(json_schema['version'], '1.0.0') + self.assertIn('properties', json_schema) + + def test_model_dump(self): + """Test model_dump produces valid dictionary.""" + schema = AgentMetadataSchema( + schema_version='1.0.0', + agent=AgentMetadata(appid='test-app', type='standalone'), + name='test-agent', + registered_at='2026-01-01T00:00:00Z', + ) + data = schema.model_dump() + + self.assertIsInstance(data, dict) + self.assertEqual(data['schema_version'], '1.0.0') + self.assertEqual(data['name'], 'test-agent') + self.assertIn('agent', data) + + +if __name__ == '__main__': + unittest.main() From 44ce2a0287515379760a064cf4619f64d63a7dd6 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Tue, 3 Feb 2026 13:24:01 +0100 Subject: [PATCH 08/13] feat: add strands example & streamline metadata extractor Signed-off-by: Casper Nielsen --- examples/langgraph-checkpointer/dapr-llm.yaml | 12 + examples/strands-agent/README.md | 168 ++++++++ examples/strands-agent/agent.py | 127 ++++++ .../components/agent-registry.yaml | 14 + .../components/conversation.yaml | 8 + .../strands-agent/components/statestore.yaml | 14 + examples/strands-agent/dapr-llm.yaml | 12 + examples/strands-agent/requirements.txt | 3 + .../dapr/ext/agent_core/introspection.py | 20 +- .../dapr/ext/agent_core/mapping/__init__.py | 2 + .../dapr/ext/agent_core/mapping/base.py | 69 ++++ .../ext/agent_core/mapping/dapr_agents.py | 3 +- .../dapr/ext/agent_core/mapping/langgraph.py | 20 +- .../dapr/ext/agent_core/mapping/strands.py | 384 +++++++++++++++++- ext/dapr-ext-agent_core/setup.cfg | 1 + .../tests/test_base_mapper.py | 85 ++++ .../tests/test_introspection.py | 4 +- .../tests/test_langgraph_metadata.py | 6 +- .../tests/test_strands_mapper.py | 184 +++++++++ .../tests/test_strands_metadata.py | 13 +- .../dapr/ext/strands/dapr_session_manager.py | 47 ++- 21 files changed, 1136 insertions(+), 60 deletions(-) create mode 100644 examples/langgraph-checkpointer/dapr-llm.yaml create mode 100644 examples/strands-agent/README.md create mode 100644 examples/strands-agent/agent.py create mode 100644 examples/strands-agent/components/agent-registry.yaml create mode 100644 examples/strands-agent/components/conversation.yaml create mode 100644 examples/strands-agent/components/statestore.yaml create mode 100644 examples/strands-agent/dapr-llm.yaml create mode 100644 examples/strands-agent/requirements.txt create mode 100644 ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py create mode 100644 ext/dapr-ext-agent_core/tests/test_base_mapper.py create mode 100644 ext/dapr-ext-agent_core/tests/test_strands_mapper.py diff --git a/examples/langgraph-checkpointer/dapr-llm.yaml b/examples/langgraph-checkpointer/dapr-llm.yaml new file mode 100644 index 000000000..9bc532409 --- /dev/null +++ b/examples/langgraph-checkpointer/dapr-llm.yaml @@ -0,0 +1,12 @@ +# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties +version: 1 +common: + resourcesPath: ./components + logLevel: info + appLogDestination: console + daprdLogDestination: console + +apps: +- appID: langgraph + appDirPath: ./ + command: ["python3", "agent.py"] \ No newline at end of file diff --git a/examples/strands-agent/README.md b/examples/strands-agent/README.md new file mode 100644 index 000000000..6e271cccc --- /dev/null +++ b/examples/strands-agent/README.md @@ -0,0 +1,168 @@ +# Dapr For Agents - Strands Agent with Persistent Session Storage + +Supporting Dapr-backed session persistence for Strands Agent SDK with distributed state storage. + +## Overview + +This example demonstrates how to use a **real Strands Agent** from the Strands Agent SDK together with `DaprSessionManager` for distributed session persistence. The example shows: + +- Creating a Strands Agent with the official Strands SDK +- Using DaprSessionManager for distributed session storage across restarts +- Tool integration (weather checking example) +- Conversation history persistence and restoration +- Seamless LLM integration through Strands model providers + +**Note:** This uses the actual [Strands Agent SDK](https://strandsagents.com/), not just session types. + +## Pre-requisites + +- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started) +- [Install Python 3.10+](https://www.python.org/downloads/) +- OpenAI API key (or configure a different model provider) + +## Install Dependencies + +```sh +pip3 install -r requirements.txt +``` + +Set your API key: +```sh +export OPENAI_API_KEY=your-key-here +``` + +## Run the Example + +Run the following command in a terminal/command prompt: + +```sh +dapr run --app-id strands-agent --resources-path ./components -- python3 agent.py +``` + +### What to Expect + +The example will: + +1. Create a Strands Agent with: + - GPT-4o model via OpenAIModel provider + - A `get_weather` tool function + - System prompt and agent metadata + - DaprSessionManager for session persistence +2. Process user queries: + - "What's the weather in San Francisco?" → Agent uses get_weather tool + - "How about New York?" → Agent continues conversation with context +3. Persist all conversation state to Dapr state store +4. On subsequent runs, automatically restore full conversation history + +Run the example again to see the conversation resume from where it left off! + +### Example Output + +**First run:** +``` +📂 Using session: assistant-session-1 +✅ Created Strands Agent: weather-assistant + Model: OpenAIModel(model='gpt-4o') + Tools: ['get_weather'] + Session Manager: DaprSessionManager + +🆕 Starting fresh conversation + +👤 USER: What's the weather in San Francisco? +🤖 ASSISTANT: The weather in San Francisco is sunny and 72°F + +👤 USER: How about New York? +🤖 ASSISTANT: Let me check that for you. The weather in New York is sunny and 72°F + +✅ Conversation complete! +🔄 Run again to resume the conversation with full history from Dapr state store. +``` + +**Second run (conversation resumes):** +``` +📂 Using session: assistant-session-1 +✅ Created Strands Agent: weather-assistant + Model: OpenAIModel(model='gpt-4o') + Tools: ['get_weather'] + Session Manager: DaprSessionManager + +💬 Resuming conversation with 4 previous messages +──────────────────────────────────────────────────────────── +👤 USER: What's the weather in San Francisco? +🤖 ASSISTANT: The weather in San Francisco is sunny and 72°F +👤 USER: How about New York? +🤖 ASSISTANT: Let me check that for you. The weather in New York is sunny and 72°F +──────────────────────────────────────────────────────────── + +👤 USER: What's the weather in San Francisco? +🤖 ASSISTANT: I just checked that - it's still sunny and 72°F in San Francisco! +``` + +## Key Features + +### Real Strands Agent +- Uses the official Strands Agent SDK from strandsagents.com +- Full agent capabilities: tools, system prompts, state management +- Multiple LLM provider support (Anthropic, OpenAI, Bedrock, etc.) + +### Distributed Session Persistence +- DaprSessionManager stores all conversation state in Dapr state stores +- Supports any Dapr state store: Redis, PostgreSQL, MongoDB, Cosmos DB, etc. +- Automatic conversation restoration across application restarts +- Full message history maintained + +### Tool Integration +- Define Python functions as tools +- Agent automatically calls tools when needed +- Tool results integrated into conversation flow + +### LLM Provider Flexibility +- Easy to swap model providers (AnthropicModel, OpenAIModel, etc.) +- Configure model parameters (temperature, max tokens, etc.) +- Strands handles all LLM interactions + +### State Persistence +- Automatic state synchronization with Dapr +- Support for TTL and consistency levels +- Compatible with any Dapr state store (Redis, PostgreSQL, Cosmos DB, etc.) + +## Customization + +You can customize the session manager with: + +```python +session_manager = DaprSessionManager( + session_id='my-session', + state_store_name='statestore', + dapr_client=dapr_client, + ttl=3600, # Optional: TTL in seconds + consistency='strong', # Optional: 'eventual' or 'strong' +) +``` + +## Configuration + +### State Store + +The example uses a Redis state store component. You can modify [components/statestore.yaml](./components/statestore.yaml) to use a different state store backend supported by Dapr. + +### Conversation Provider + +The example uses the `echo` conversation component by default (which echoes back your input). To use a real LLM: + +1. Set up a conversation component (e.g., OpenAI, Anthropic) in `components/conversation.yaml` +2. Update the `conversation_provider` variable in `agent.py` to match your component name +3. Set required API keys as environment variables + +Example for OpenAI: +```bash +export OPENAI_API_KEY="your-api-key" +``` + +See [examples/conversation](../conversation/) for more conversation component examples. + +## Learn More + +- [Dapr State Management](https://docs.dapr.io/developing-applications/building-blocks/state-management/) +- [Strands Framework](https://github.com/microsoft/strands) +- [Dapr Python SDK](https://github.com/dapr/python-sdk) diff --git a/examples/strands-agent/agent.py b/examples/strands-agent/agent.py new file mode 100644 index 000000000..40c12edf3 --- /dev/null +++ b/examples/strands-agent/agent.py @@ -0,0 +1,127 @@ +""" +Example demonstrating a Strands Agent with DaprSessionManager for persistent session storage. + +This example shows how to: +- Create a Strands Agent with the Strands Agent SDK +- Use DaprSessionManager for distributed session persistence +- Leverage LLM providers through Strands +- Maintain conversation history across restarts +""" + +import os +from strands import Agent, tool +from strands.models import OpenAIModel +from dapr.ext.strands import DaprSessionManager +from dapr.clients import DaprClient + + +@tool +def get_weather(location: str) -> str: + """Get the current weather for a location. + + Args: + location: The city and state, e.g. "San Francisco, CA" + + Returns: + A description of the current weather + """ + # Mock implementation for demonstration + return f"The weather in {location} is sunny and 72°F" + + +def run_agent_conversation(): + """Run a Strands Agent with Dapr session persistence.""" + + # Check for OpenAI API key + openai_api_key = os.getenv('OPENAI_API_KEY') + if not openai_api_key: + print("❌ Error: OPENAI_API_KEY environment variable not set") + print("💡 Set it with: export OPENAI_API_KEY=your-key-here") + return + + session_id = 'assistant-session-1' + agent_id = 'weather-assistant' + + with DaprClient() as dapr_client: + # Create DaprSessionManager for distributed session storage + session_manager = DaprSessionManager( + session_id=session_id, + state_store_name='statestore', + dapr_client=dapr_client + ) + + # Create a Strands Agent with DaprSessionManager + agent = Agent( + # LLM model configuration - API key read from OPENAI_API_KEY env var + model=OpenAIModel(model_id='gpt-4o'), + + # System prompt + system_prompt=( + "You are a helpful weather assistant. " + "You can check the weather for any location. " + "Be concise and friendly in your responses." + ), + + # Tools available to the agent + tools=[get_weather], + + # Agent metadata + agent_id=agent_id, + name='Weather Assistant', + description='An AI assistant that helps users check the weather', + + # State management - custom state dict + state={ + 'role': 'Weather Assistant', + 'goal': 'Help users get weather information', + 'instructions': ['Be concise', 'Be friendly', 'Always use the get_weather tool'], + 'max_iterations': 5, + }, + + # Session manager for persistent storage + # This enables the agent to resume conversations across restarts + session_manager=session_manager, + ) + + # User queries + queries = [ + "What's the weather in San Francisco?", + "How about New York?", + ] + + for query in queries: + print(f"👤 USER: {query}") + + try: + # Run the agent - this will: + # 1. Add the user message to the conversation + # 2. Call the LLM to generate a response + # 3. Execute any tool calls if needed + # 4. Persist everything through the session manager + import asyncio + response = asyncio.run(agent.invoke_async(query)) + + # Extract the final response text + if hasattr(response, 'content'): + content = response.content + elif hasattr(response, 'text'): + content = response.text + else: + content = str(response) + + print(f"🤖 ASSISTANT: {content}") + + except Exception as e: + print(f"❌ Error: {e}") + print("💡 Tip: Make sure you have OPENAI_API_KEY set in your environment.") + print(" Or switch to a different model provider (Anthropic, Bedrock, etc.)") + break + + print() + + print("✅ Conversation complete!") + print("🔄 Run again to resume the conversation with full history from Dapr state store.") + + +if __name__ == '__main__': + run_agent_conversation() \ No newline at end of file diff --git a/examples/strands-agent/components/agent-registry.yaml b/examples/strands-agent/components/agent-registry.yaml new file mode 100644 index 000000000..b6c1fe4d4 --- /dev/null +++ b/examples/strands-agent/components/agent-registry.yaml @@ -0,0 +1,14 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: agent-registry +spec: + type: state.redis + version: v1 + metadata: + - name: redisHost + value: localhost:6379 + - name: redisPassword + value: "" + - name: keyPrefix + value: none \ No newline at end of file diff --git a/examples/strands-agent/components/conversation.yaml b/examples/strands-agent/components/conversation.yaml new file mode 100644 index 000000000..674ba38e6 --- /dev/null +++ b/examples/strands-agent/components/conversation.yaml @@ -0,0 +1,8 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: echo +spec: + type: conversation.echo + version: v1 + metadata: [] diff --git a/examples/strands-agent/components/statestore.yaml b/examples/strands-agent/components/statestore.yaml new file mode 100644 index 000000000..2f676bff8 --- /dev/null +++ b/examples/strands-agent/components/statestore.yaml @@ -0,0 +1,14 @@ +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: statestore +spec: + type: state.redis + version: v1 + metadata: + - name: redisHost + value: localhost:6379 + - name: redisPassword + value: "" + - name: actorStateStore + value: "true" diff --git a/examples/strands-agent/dapr-llm.yaml b/examples/strands-agent/dapr-llm.yaml new file mode 100644 index 000000000..d2916cf76 --- /dev/null +++ b/examples/strands-agent/dapr-llm.yaml @@ -0,0 +1,12 @@ +# https://docs.dapr.io/developing-applications/local-development/multi-app-dapr-run/multi-app-template/#template-properties +version: 1 +common: + resourcesPath: ./components + logLevel: info + appLogDestination: console + daprdLogDestination: console + +apps: +- appID: strands-agent + appDirPath: ./ + command: ["python3", "agent.py"] \ No newline at end of file diff --git a/examples/strands-agent/requirements.txt b/examples/strands-agent/requirements.txt new file mode 100644 index 000000000..e98a79e6d --- /dev/null +++ b/examples/strands-agent/requirements.txt @@ -0,0 +1,3 @@ +dapr>=1.15.0 +dapr-ext-strands>=0.1.0 +strands-agents>=1.24.0 diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py index 560c8c91f..daeeecea7 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py @@ -13,8 +13,11 @@ import gc import inspect +import logging from typing import Any, Optional +logger = logging.getLogger(__name__) + def find_agent_in_stack() -> Optional[Any]: """ @@ -41,8 +44,17 @@ def find_agent_in_stack() -> Optional[Any]: return obj # Strands support - DaprSessionManager + # Use gc to find the Agent that owns this session manager (similar to LangGraph checkpointer) if obj_type == 'DaprSessionManager': - return obj + referrers = gc.get_referrers(obj) + for ref in referrers: + ref_type = type(ref).__name__ + ref_module = type(ref).__module__ + # Look for Strands Agent that owns this session manager + if ref_type == 'Agent' and 'strands' in ref_module: + return ref + # Don't register bare DaprSessionManager - only register when Agent exists + return None # If we found a checkpointer, use gc to find the graph that references it if obj_type == 'DaprCheckpointer': @@ -77,8 +89,10 @@ def detect_framework(agent: Any) -> Optional[str]: if 'dapr_agents' in agent_module: return 'dapr_agents' - # Strands - if agent_type == 'DaprSessionManager' or 'strands' in agent_module: + # Strands - detect both Agent class and DaprSessionManager + if agent_type == 'Agent' and 'strands' in agent_module: + return 'strands' + if agent_type == 'DaprSessionManager': return 'strands' return None diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py index d3de8e13f..086005e79 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py @@ -1,8 +1,10 @@ +from .base import BaseAgentMapper from .dapr_agents import DaprAgentsMapper from .langgraph import LangGraphMapper from .strands import StrandsMapper __all__ = [ + "BaseAgentMapper", "DaprAgentsMapper", "LangGraphMapper", "StrandsMapper", diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py new file mode 100644 index 000000000..a04568773 --- /dev/null +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py @@ -0,0 +1,69 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from abc import ABC, abstractmethod +from typing import Any +from dapr.ext.agent_core.types import AgentMetadataSchema + + +class BaseAgentMapper(ABC): + """Abstract base class for agent metadata mappers. + + Provides common functionality for extracting metadata from different + agent frameworks (Strands, LangGraph, Dapr Agents). + """ + + @staticmethod + def _extract_provider(module_name: str) -> str: + """Extract provider name from module path. + + Args: + module_name: Python module name (e.g., 'langchain_openai.chat_models') + + Returns: + Provider identifier (e.g., 'openai', 'anthropic', 'azure_openai') + """ + module_lower = module_name.lower() + + # Check more specific providers first + if 'vertexai' in module_lower: + return 'vertexai' + elif 'bedrock' in module_lower: + return 'bedrock' + elif 'azure' in module_lower: + return 'azure_openai' + elif 'openai' in module_lower: + return 'openai' + elif 'anthropic' in module_lower: + return 'anthropic' + elif 'ollama' in module_lower: + return 'ollama' + elif 'google' in module_lower or 'gemini' in module_lower: + return 'google' + elif 'cohere' in module_lower: + return 'cohere' + + return 'unknown' + + @abstractmethod + def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + """Map agent to standardized metadata schema. + + Args: + agent: Framework-specific agent instance + schema_version: Schema version to use + + Returns: + AgentMetadataSchema with extracted metadata + """ + pass diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py index 0e4c5f787..59b26ef51 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py @@ -3,12 +3,13 @@ import json import logging from typing import Any +from dapr.ext.agent_core.mapping.base import BaseAgentMapper from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata logger = logging.getLogger(__name__) -class DaprAgentsMapper: +class DaprAgentsMapper(BaseAgentMapper): def __init__(self) -> None: pass diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py index 478b4a342..4f2942d0c 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py @@ -2,6 +2,7 @@ from datetime import datetime, timezone import logging from typing import TYPE_CHECKING, Any, Dict, Optional +from dapr.ext.agent_core.mapping.base import BaseAgentMapper from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata from langgraph.pregel._read import PregelNode @@ -10,27 +11,10 @@ logger = logging.getLogger(__name__) -class LangGraphMapper: +class LangGraphMapper(BaseAgentMapper): def __init__(self) -> None: pass - def _extract_provider(self, module_name: str) -> str: - """Extract provider name from module path.""" - module_lower = module_name.lower() - if 'openai' in module_lower and 'azure' not in module_lower: - return 'openai' - elif 'azure' in module_lower: - return 'azure_openai' - elif 'anthropic' in module_lower: - return 'anthropic' - elif 'ollama' in module_lower: - return 'ollama' - elif 'google' in module_lower or 'gemini' in module_lower: - return 'google' - elif 'cohere' in module_lower: - return 'cohere' - return 'unknown' - def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: introspected_vars: Dict[str, Any] = vars(agent) # type: ignore diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py index 3376b864f..14ac53c23 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py @@ -1,53 +1,398 @@ from datetime import datetime, timezone import logging -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional +from dapr.ext.agent_core.mapping.base import BaseAgentMapper from dapr.ext.agent_core.types import ( AgentMetadata, AgentMetadataSchema, + LLMMetadata, MemoryMetadata, RegistryMetadata, + ToolMetadata, ) if TYPE_CHECKING: from dapr.ext.strands import DaprSessionManager + from strands.types.session import SessionAgent logger = logging.getLogger(__name__) -class StrandsMapper: +class StrandsMapper(BaseAgentMapper): def __init__(self) -> None: pass + + def _is_strands_agent(self, agent: Any) -> bool: + """Check if agent is an actual Strands Agent (not just session manager).""" + agent_type = type(agent).__name__ + agent_module = type(agent).__module__ + return agent_type == 'Agent' and 'strands' in agent_module + + def _extract_from_strands_agent(self, agent: Any) -> Dict[str, Any]: + """Extract metadata from a real Strands Agent. + + Args: + agent: A strands.Agent instance + + Returns: + Dictionary with extracted metadata + """ + metadata = {} + + # Basic agent info + metadata['agent_id'] = getattr(agent, 'agent_id', None) or getattr(agent, 'name', 'agent') + metadata['name'] = getattr(agent, 'name', None) + metadata['description'] = getattr(agent, 'description', None) + + # System prompt + system_prompt = getattr(agent, 'system_prompt', None) + if system_prompt: + if isinstance(system_prompt, str): + metadata['system_prompt'] = system_prompt + elif isinstance(system_prompt, list): + # Join list of content blocks + metadata['system_prompt'] = ' '.join(str(block) for block in system_prompt) + + # Agent state (custom metadata) + state = getattr(agent, 'state', {}) + try: + if state: + # Convert to dict if it's JSONSerializableDict (which doesn't support .get(key, default)) + state_dict = dict(state) if hasattr(state, '__iter__') else {} + metadata['role'] = state_dict.get('role') or metadata.get('name', 'Agent') + metadata['goal'] = state_dict.get('goal') or metadata.get('description', '') + metadata['instructions'] = state_dict.get('instructions') or [] + metadata['max_iterations'] = state_dict.get('max_iterations') + else: + metadata['role'] = metadata.get('name', 'Agent') + metadata['goal'] = metadata.get('description', '') + metadata['instructions'] = [] + except Exception: + metadata['role'] = metadata.get('name', 'Agent') + metadata['goal'] = metadata.get('description', '') + metadata['instructions'] = [] + + # LLM Model + model = getattr(agent, 'model', None) + if model: + model_type = type(model).__name__ + + # Check if model has a config + config = getattr(model, 'config', None) + + # Try multiple possible attribute names for the model ID + model_id = getattr(model, 'model_id', None) or getattr(model, 'model', None) or getattr(model, 'name', None) + if config: + if isinstance(config, dict) and 'model_id' in config: + model_id = config['model_id'] + elif hasattr(config, 'model'): + model_id = config.model + elif hasattr(config, 'model_id'): + model_id = config.model_id + + if model_id is None: + model_id = 'unknown' + + # Extract provider from model module or type + model_module = type(model).__module__ + provider = self._extract_provider(model_module) + + # Fallback: parse from model type name if provider still unknown + if provider == 'unknown': + provider = model_type.replace('Model', '').lower() + + metadata['llm'] = { + 'provider': provider, + 'model': str(model_id), + 'model_type': model_type, + } + + # Tools - extract from tool_registry.registry + # In Strands, tools are stored in agent.tool_registry.registry as AgentTool objects + tools_metadata = [] + tool_registry = getattr(agent, 'tool_registry', None) + if tool_registry and hasattr(tool_registry, 'registry'): + registry_dict = tool_registry.registry + if isinstance(registry_dict, dict): + for tool_name, agent_tool in registry_dict.items(): + tool_info = { + 'name': tool_name, + 'description': getattr(agent_tool, 'description', '') or getattr(agent_tool, '__doc__', ''), + } + tools_metadata.append(tool_info) + + metadata['tools'] = tools_metadata + + # Session manager - stored as _session_manager in Strands Agent + session_manager = getattr(agent, '_session_manager', None) + if session_manager and type(session_manager).__name__ == 'DaprSessionManager': + metadata['session_manager'] = session_manager + metadata['state_store'] = getattr(session_manager, 'state_store_name', None) + metadata['session_id'] = getattr(session_manager, '_session_id', None) + + return metadata + + def _get_session_agent(self, session_manager: "DaprSessionManager", session_id: str) -> Optional["SessionAgent"]: + """ + Get the primary SessionAgent from a session. + + Reads the session manifest to find agents, and returns the first one. + + Args: + session_manager: The DaprSessionManager instance + session_id: Session ID to read from + + Returns: + SessionAgent if found, None otherwise + """ + try: + # Use the session manager's method to get the manifest key + manifest_key = session_manager._get_manifest_key(session_id) + logger.info(f"Reading manifest from key: {manifest_key}") + manifest = session_manager._read_state(manifest_key) + + logger.info(f"Manifest content: {manifest}") + + if not manifest or 'agents' not in manifest or not manifest['agents']: + logger.warning(f"No agents found in session {session_id} - session may not have agents yet. Use fallback metadata.") + return None + + # Get the first agent (primary agent) + agent_id = manifest['agents'][0] + logger.info(f"Found agent '{agent_id}' in session {session_id}") + session_agent = session_manager.read_agent(session_id, agent_id) + + if session_agent: + logger.info(f"Successfully loaded SessionAgent '{agent_id}' with state keys: {list(session_agent.state.keys()) if session_agent.state else []}") + else: + logger.warning(f"read_agent returned None for agent_id '{agent_id}'") + + return session_agent + + except Exception as e: + logger.error(f"Failed to read SessionAgent from session {session_id}: {e}", exc_info=True) + return None + + def _extract_llm_metadata(self, agent_state: Dict[str, Any]) -> Optional[LLMMetadata]: + """ + Extract LLM metadata from SessionAgent state. + + Looks for llm_component, conversation_provider, or llm_config in state. + + Args: + agent_state: The SessionAgent.state dictionary + + Returns: + LLMMetadata if LLM configuration found, None otherwise + """ + # Check for various LLM configuration keys + llm_component = agent_state.get('llm_component') or agent_state.get('conversation_provider') + llm_config = agent_state.get('llm_config', {}) + + if not llm_component and not llm_config: + return None + + # Extract LLM details + provider = llm_config.get('provider', 'unknown') + model = llm_config.get('model', 'unknown') + + return LLMMetadata( + client="dapr_conversation", + provider=provider, + api="conversation", + model=model, + component_name=llm_component, + ) + + def _extract_tools_metadata(self, agent_state: Dict[str, Any]) -> List[ToolMetadata]: + """ + Extract tools metadata from SessionAgent state. + + Args: + agent_state: The SessionAgent.state dictionary + + Returns: + List of ToolMetadata (empty list if no tools configured) + """ + tools_list = agent_state.get('tools') + + if not tools_list or not isinstance(tools_list, list): + return [] + + tool_metadata_list: List[ToolMetadata] = [] + for tool in tools_list: + if isinstance(tool, dict): + tool_metadata_list.append(ToolMetadata( + tool_name=str(tool.get('name', 'unknown')), + tool_description=str(tool.get('description', '')), + tool_args=str(tool.get('args', {})), + )) + elif hasattr(tool, 'name'): + # Handle tool objects + tool_metadata_list.append(ToolMetadata( + tool_name=str(getattr(tool, 'name', 'unknown')), + tool_description=str(getattr(tool, 'description', '')), + tool_args=str(getattr(tool, 'args', {})), + )) + + return tool_metadata_list def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: """ - Map Strands DaprSessionManager to AgentMetadataSchema. + Map Strands Agent or DaprSessionManager to AgentMetadataSchema. + + Handles two cases: + 1. Real Strands Agent (strands.Agent) - extracts from agent properties + 2. DaprSessionManager - extracts from stored SessionAgent in state Args: - agent: The DaprSessionManager instance + agent: Either a strands.Agent or DaprSessionManager instance schema_version: Version of the schema Returns: AgentMetadataSchema with extracted metadata """ + + # Case 1: Real Strands Agent from the SDK + if self._is_strands_agent(agent): + extracted = self._extract_from_strands_agent(agent) + + agent_id = extracted.get('agent_id', 'agent') + agent_name = extracted.get('name', agent_id) + session_id = extracted.get('session_id') + + # Build full agent name + if session_id: + full_name = f"strands-{session_id}-{agent_id}" + else: + full_name = f"strands-{agent_id}" + + # Extract LLM metadata + llm_info = extracted.get('llm') + if llm_info: + llm_metadata = LLMMetadata( + client=llm_info.get('model_type', 'unknown'), # OpenAIModel, AnthropicModel, etc. + provider=llm_info.get('provider', 'unknown'), # openai, anthropic, etc. + api="chat", # Strands uses chat-based APIs + model=llm_info.get('model', 'unknown'), # gpt-4o, claude-3-opus, etc. + component_name=None, + ) + else: + llm_metadata = None + + # Extract tools metadata + tools_list = extracted.get('tools', []) + tools_metadata = [] + for tool in tools_list: + if isinstance(tool, dict): + tools_metadata.append(ToolMetadata( + tool_name=tool.get('name', 'unknown'), + tool_description=tool.get('description', ''), + tool_args='', + )) + + # Get session manager info for memory and statestore + state_store_name = extracted.get('state_store') + session_id_value = extracted.get('session_id') + + # Determine memory type and populate session info + has_session_manager = extracted.get('session_manager') is not None + memory_type = "DaprSessionManager" if has_session_manager else "InMemory" + + return AgentMetadataSchema( + schema_version=schema_version, + agent=AgentMetadata( + appid="", + type="Strands", + orchestrator=False, + role=extracted.get('role', agent_name), + goal=extracted.get('goal', ''), + instructions=extracted.get('instructions') if extracted.get('instructions') else None, + statestore=state_store_name, # Set from session manager + system_prompt=extracted.get('system_prompt'), + ), + name=full_name, + registered_at=datetime.now(timezone.utc).isoformat(), + pubsub=None, + memory=MemoryMetadata( + type=memory_type, + session_id=session_id_value, # Set session_id + statestore=state_store_name, # Set statestore + ), + llm=llm_metadata, + tools=tools_metadata, # Already a list, could be empty [] + tool_choice="auto" if tools_metadata else None, # Set tool_choice based on whether tools exist + max_iterations=extracted.get('max_iterations'), + registry=RegistryMetadata( + name=None, + team="default", + ), + agent_metadata={ + 'framework': 'strands', + 'agent_id': agent_id, + 'session_id': session_id_value, + 'state_store': state_store_name, + }, + ) + + # Case 2: DaprSessionManager (legacy approach) session_manager: "DaprSessionManager" = agent - # Extract state store name - state_store_name = getattr(session_manager, '_state_store_name', None) + # Extract session manager info + state_store_name = getattr(session_manager, 'state_store_name', None) session_id = getattr(session_manager, '_session_id', None) + # Try to get the primary SessionAgent + session_agent = self._get_session_agent(session_manager, session_id) if session_id else None + + # Extract agent-specific metadata if SessionAgent exists + if session_agent: + agent_id = session_agent.agent_id + agent_state = session_agent.state or {} + + # Extract from state + system_prompt = agent_state.get('system_prompt') + role = str(agent_state.get('role', agent_id)) + goal = str(agent_state.get('goal', '')) + instructions_raw = agent_state.get('instructions', []) + instructions = list(instructions_raw) if isinstance(instructions_raw, list) else [] + tool_choice = agent_state.get('tool_choice') + max_iterations = agent_state.get('max_iterations') + + # Extract LLM metadata + llm_metadata = self._extract_llm_metadata(agent_state) + + # Extract tools metadata + tools_metadata = self._extract_tools_metadata(agent_state) + + agent_name = f"strands-{session_id}-{agent_id}" + else: + # Fallback when no SessionAgent found + agent_id = None + agent_state = {} + system_prompt = None + role = "Session Manager" + goal = "Manages multi-agent sessions with distributed state storage" + instructions = [] + tool_choice = None + max_iterations = None + llm_metadata = None + tools_metadata = [] + agent_name = f"strands-session-{session_id}" if session_id else "strands-session" + return AgentMetadataSchema( schema_version=schema_version, agent=AgentMetadata( appid="", type="Strands", orchestrator=False, - role="Session Manager", - goal="Manages multi-agent sessions with distributed state storage", - instructions=[], + role=role, + goal=goal, + instructions=instructions if instructions else None, statestore=state_store_name, - system_prompt=None, + system_prompt=system_prompt, ), - name=f"strands-session-{session_id}" if session_id else "strands-session", + name=agent_name, registered_at=datetime.now(timezone.utc).isoformat(), pubsub=None, memory=MemoryMetadata( @@ -55,17 +400,18 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc session_id=session_id, statestore=state_store_name, ), - llm=None, + llm=llm_metadata, + tools=tools_metadata, # Already a list, could be empty [] + tool_choice=tool_choice, + max_iterations=max_iterations, registry=RegistryMetadata( - statestore=None, name=None, + team="default", ), - tools=None, - max_iterations=None, - tool_choice=None, agent_metadata={ - "framework": "strands", - "session_id": session_id, - "state_store": state_store_name, + 'framework': 'strands', + 'session_id': session_id, + 'agent_id': agent_id, + 'state_store': state_store_name, }, ) diff --git a/ext/dapr-ext-agent_core/setup.cfg b/ext/dapr-ext-agent_core/setup.cfg index bdd5fc193..665a43379 100644 --- a/ext/dapr-ext-agent_core/setup.cfg +++ b/ext/dapr-ext-agent_core/setup.cfg @@ -27,6 +27,7 @@ install_requires = dapr-agents >= 0.10.7 pydantic >= 2.12.5 langgraph >= 0.3.6 + strands-agents >= 1.24.0 [options.packages.find] include = diff --git a/ext/dapr-ext-agent_core/tests/test_base_mapper.py b/ext/dapr-ext-agent_core/tests/test_base_mapper.py new file mode 100644 index 000000000..8471ecf2a --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_base_mapper.py @@ -0,0 +1,85 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest +from dapr.ext.agent_core.mapping import BaseAgentMapper + + +class TestBaseAgentMapper(unittest.TestCase): + """Tests for BaseAgentMapper shared functionality.""" + + def test_extract_provider_openai(self): + """Test provider extraction for OpenAI modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_openai.chat_models'), 'openai') + self.assertEqual(BaseAgentMapper._extract_provider('openai.resources'), 'openai') + self.assertEqual(BaseAgentMapper._extract_provider('strands.models.openai'), 'openai') + + def test_extract_provider_azure_openai(self): + """Test provider extraction for Azure OpenAI modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_openai.azure'), 'azure_openai') + self.assertEqual(BaseAgentMapper._extract_provider('azure.openai'), 'azure_openai') + self.assertEqual(BaseAgentMapper._extract_provider('AZURE_OPENAI'), 'azure_openai') + + def test_extract_provider_anthropic(self): + """Test provider extraction for Anthropic modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_anthropic.chat_models'), 'anthropic') + self.assertEqual(BaseAgentMapper._extract_provider('anthropic.client'), 'anthropic') + self.assertEqual(BaseAgentMapper._extract_provider('strands.models.anthropic'), 'anthropic') + + def test_extract_provider_ollama(self): + """Test provider extraction for Ollama modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_ollama'), 'ollama') + self.assertEqual(BaseAgentMapper._extract_provider('ollama.client'), 'ollama') + + def test_extract_provider_google(self): + """Test provider extraction for Google/Gemini modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_google_genai'), 'google') + self.assertEqual(BaseAgentMapper._extract_provider('google.generativeai'), 'google') + self.assertEqual(BaseAgentMapper._extract_provider('gemini.client'), 'google') + + def test_extract_provider_cohere(self): + """Test provider extraction for Cohere modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_cohere'), 'cohere') + self.assertEqual(BaseAgentMapper._extract_provider('cohere.client'), 'cohere') + + def test_extract_provider_bedrock(self): + """Test provider extraction for AWS Bedrock modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_aws.bedrock'), 'bedrock') + self.assertEqual(BaseAgentMapper._extract_provider('bedrock.client'), 'bedrock') + + def test_extract_provider_vertexai(self): + """Test provider extraction for Vertex AI modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('langchain_google_vertexai'), 'vertexai') + self.assertEqual(BaseAgentMapper._extract_provider('vertexai.client'), 'vertexai') + + def test_extract_provider_unknown(self): + """Test provider extraction for unknown modules.""" + self.assertEqual(BaseAgentMapper._extract_provider('some.random.module'), 'unknown') + self.assertEqual(BaseAgentMapper._extract_provider('custom_llm'), 'unknown') + self.assertEqual(BaseAgentMapper._extract_provider(''), 'unknown') + + def test_extract_provider_case_insensitive(self): + """Test that provider extraction is case-insensitive.""" + self.assertEqual(BaseAgentMapper._extract_provider('OPENAI.CLIENT'), 'openai') + self.assertEqual(BaseAgentMapper._extract_provider('Anthropic.Client'), 'anthropic') + self.assertEqual(BaseAgentMapper._extract_provider('OlLaMa'), 'ollama') + + def test_extract_provider_priority_azure_over_openai(self): + """Test that Azure OpenAI takes priority when both keywords present.""" + # Azure should be detected before OpenAI + self.assertEqual(BaseAgentMapper._extract_provider('azure.openai.client'), 'azure_openai') + self.assertEqual(BaseAgentMapper._extract_provider('openai.azure.wrapper'), 'azure_openai') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_introspection.py b/ext/dapr-ext-agent_core/tests/test_introspection.py index 1b715a38e..9ad18ddd5 100644 --- a/ext/dapr-ext-agent_core/tests/test_introspection.py +++ b/ext/dapr-ext-agent_core/tests/test_introspection.py @@ -69,7 +69,9 @@ def test_detect_strands_by_module(self): class MockSessionManager: pass - MockSessionManager.__module__ = 'strands.session.manager' + # Use actual type name that detection looks for + MockSessionManager.__module__ = 'dapr.ext.strands' + MockSessionManager.__name__ = 'DaprSessionManager' agent = MockSessionManager() result = detect_framework(agent) self.assertEqual(result, 'strands') diff --git a/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py b/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py index 8c966ca66..287865613 100644 --- a/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py +++ b/ext/dapr-ext-agent_core/tests/test_langgraph_metadata.py @@ -22,8 +22,8 @@ class MockCheckpointer: """Mock DaprCheckpointer for testing.""" - def __init__(self, store_name='test-store'): - self.store_name = store_name + def __init__(self, state_store_name='test-store'): + self.state_store_name = state_store_name class MockCompiledStateGraph: @@ -54,7 +54,7 @@ def test_mapper_instantiation(self): @mock.patch('dapr.ext.agent_core.mapping.langgraph.PregelNode') def test_basic_metadata_extraction(self, mock_pregel_node): """Test basic metadata extraction from a mock graph.""" - checkpointer = MockCheckpointer(store_name='my-store') + checkpointer = MockCheckpointer(state_store_name='my-store') graph = MockCompiledStateGraph( name='my-graph', checkpointer=checkpointer, diff --git a/ext/dapr-ext-agent_core/tests/test_strands_mapper.py b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py new file mode 100644 index 000000000..78967fc34 --- /dev/null +++ b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py @@ -0,0 +1,184 @@ +""" +Copyright 2026 The Dapr Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import json +import time +import unittest +from unittest import mock + +from dapr.ext.agent_core.mapping.strands import StrandsMapper + + +def make_mock_session_manager(session_id='test-session', state_store='statestore'): + """Create a mock DaprSessionManager for testing.""" + mock_manager = mock.Mock() + mock_manager._session_id = session_id + mock_manager._state_store_name = state_store + mock_manager.state_store_name = state_store # Public property + return mock_manager + + +def make_mock_session_agent(agent_id='test-agent', state=None): + """Create a mock SessionAgent for testing.""" + mock_agent = mock.Mock() + mock_agent.agent_id = agent_id + mock_agent.state = state or {} + mock_agent.conversation_manager_state = {} + mock_agent.created_at = time.time() + return mock_agent + + +class TestStrandsMapper(unittest.TestCase): + def setUp(self): + self.mapper = StrandsMapper() + + def test_map_agent_metadata_no_session_agent(self): + """Test mapping when no SessionAgent exists in the session.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = None + + result = self.mapper.map_agent_metadata(session_manager, "edge") + + # Should use fallback values + self.assertEqual(result.schema_version, "edge") + self.assertEqual(result.agent.role, "Session Manager") + self.assertEqual(result.agent.type, "Strands") + self.assertIsNone(result.llm) + self.assertEqual(result.tools, []) # Empty list, not None + self.assertIsNone(result.tool_choice) + self.assertIsNone(result.max_iterations) + self.assertEqual(result.name, "strands-session-test-session") + + def test_map_agent_metadata_with_basic_agent(self): + """Test mapping with a basic SessionAgent.""" + session_manager = make_mock_session_manager() + + # Mock manifest + session_manager._read_state.return_value = {'agents': ['assistant']} + + # Mock agent + agent_state = { + 'system_prompt': 'You are a helpful assistant', + 'role': 'AI Assistant', + 'goal': 'Help users', + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, "edge") + + # Should extract from SessionAgent + self.assertEqual(result.agent.role, "AI Assistant") + self.assertEqual(result.agent.goal, "Help users") + self.assertEqual(result.agent.system_prompt, "You are a helpful assistant") + self.assertEqual(result.name, "strands-test-session-assistant") + self.assertEqual(result.agent_metadata['agent_id'], 'assistant') + + def test_map_agent_metadata_with_llm_config(self): + """Test mapping with LLM configuration.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = {'agents': ['assistant']} + + agent_state = { + 'system_prompt': 'You are helpful', + 'conversation_provider': 'openai', + 'llm_config': { + 'provider': 'openai', + 'model': 'gpt-4', + } + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, "edge") + + # Should extract LLM metadata + self.assertIsNotNone(result.llm) + self.assertEqual(result.llm.client, "dapr_conversation") + self.assertEqual(result.llm.provider, "openai") + self.assertEqual(result.llm.model, "gpt-4") + self.assertEqual(result.llm.component_name, "openai") + + def test_map_agent_metadata_with_tools(self): + """Test mapping with tools configuration.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = {'agents': ['assistant']} + + agent_state = { + 'tools': [ + {'name': 'calculator', 'description': 'Calculate math', 'args': {'x': 'int', 'y': 'int'}}, + {'name': 'search', 'description': 'Search web', 'args': {'query': 'str'}}, + ], + 'tool_choice': 'auto', + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, "edge") + + # Should extract tools metadata + self.assertIsNotNone(result.tools) + self.assertEqual(len(result.tools), 2) + self.assertEqual(result.tools[0].tool_name, 'calculator') + self.assertEqual(result.tools[0].tool_description, 'Calculate math') + self.assertEqual(result.tools[1].tool_name, 'search') + self.assertEqual(result.tool_choice, 'auto') + + def test_map_agent_metadata_with_instructions(self): + """Test mapping with instructions.""" + session_manager = make_mock_session_manager() + session_manager._read_state.return_value = {'agents': ['assistant']} + + agent_state = { + 'instructions': ['Be concise', 'Be helpful', 'Be friendly'], + 'max_iterations': 10, + } + mock_agent = make_mock_session_agent('assistant', agent_state) + session_manager.read_agent.return_value = mock_agent + + result = self.mapper.map_agent_metadata(session_manager, "edge") + + # Should extract instructions and max_iterations + self.assertIsNotNone(result.agent.instructions) + self.assertEqual(len(result.agent.instructions), 3) + self.assertEqual(result.agent.instructions[0], 'Be concise') + self.assertEqual(result.max_iterations, 10) + + def test_extract_llm_metadata_no_config(self): + """Test LLM metadata extraction with no configuration.""" + result = self.mapper._extract_llm_metadata({}) + self.assertIsNone(result) + + def test_extract_tools_metadata_no_tools(self): + """Test tools metadata extraction with no tools.""" + result = self.mapper._extract_tools_metadata({}) + self.assertEqual(result, []) # Empty list, not None + + def test_extract_tools_metadata_with_tool_objects(self): + """Test tools metadata extraction with tool objects.""" + mock_tool = mock.Mock() + mock_tool.name = 'my_tool' + mock_tool.description = 'My tool description' + mock_tool.args = {'param': 'value'} + + agent_state = {'tools': [mock_tool]} + result = self.mapper._extract_tools_metadata(agent_state) + + self.assertIsNotNone(result) + self.assertEqual(len(result), 1) + self.assertEqual(result[0].tool_name, 'my_tool') + self.assertEqual(result[0].tool_description, 'My tool description') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py index 3df90c4ea..1c852722c 100644 --- a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py +++ b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py @@ -26,6 +26,10 @@ class MockSessionManager: def __init__(self, state_store_name='test-statestore', session_id='test-session-123'): self._state_store_name = state_store_name self._session_id = session_id + + @property + def state_store_name(self) -> str: + return self._state_store_name class StrandsMapperTest(unittest.TestCase): @@ -71,6 +75,7 @@ def test_metadata_name_generation(self): metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') + # Fallback path generates different name self.assertEqual(metadata.name, 'strands-session-my-session') def test_metadata_name_without_session_id(self): @@ -94,6 +99,8 @@ def test_metadata_agent_metadata_field(self): self.assertEqual(metadata.agent_metadata['framework'], 'strands') self.assertEqual(metadata.agent_metadata['session_id'], 'sess1') self.assertEqual(metadata.agent_metadata['state_store'], 'store1') + # agent_id is None in fallback path when no SessionAgent exists + self.assertIsNone(metadata.agent_metadata['agent_id']) def test_metadata_registry_defaults(self): """Test registry metadata has correct defaults.""" @@ -115,7 +122,7 @@ def test_metadata_optional_fields_are_none(self): self.assertIsNone(metadata.pubsub) self.assertIsNone(metadata.llm) - self.assertIsNone(metadata.tools) + self.assertEqual(metadata.tools, []) # Empty list, not None self.assertIsNone(metadata.max_iterations) self.assertIsNone(metadata.tool_choice) @@ -149,7 +156,9 @@ def test_detect_framework_by_module(self): class MockAgent: pass - MockAgent.__module__ = 'strands.session.manager' + # Use actual type name and module that detection looks for + MockAgent.__module__ = 'strands.agent' + MockAgent.__name__ = 'Agent' mock = MockAgent() framework = detect_framework(mock) self.assertEqual(framework, 'strands') diff --git a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py index 279d2562b..40c59aaa6 100644 --- a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py +++ b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py @@ -67,11 +67,37 @@ def __init__( self._consistency = consistency self._owns_client = False self._session_id = session_id + self._registry_initialized = False super().__init__(session_id=session_id, session_repository=self) + + @property + def state_store_name(self) -> str: + """Get the Dapr state store name. - # Register with agent registry after initialization - self._register_with_agent_registry() + Returns: + Name of the Dapr state store component. + """ + return self._state_store_name + + def _register_with_agent_registry(self) -> Optional[Any]: + """Register or update this session manager in the agent registry. + + Returns: + AgentRegistryAdapter if registration succeeded, None otherwise. + """ + try: + from dapr.ext.agent_core.metadata import AgentRegistryAdapter + + return AgentRegistryAdapter.create_from_stack(registry=None) + + except ImportError: + # agent_core extension not installed, skip registration + pass + except Exception as e: + logger.debug(f"Agent registry registration skipped: {e}") + + return None @classmethod def from_address( @@ -223,6 +249,12 @@ def _write_state(self, key: str, data: Dict[str, Any]) -> None: Raises: SessionException: If write fails. """ + # Register with agent registry on first write (similar to LangGraph's put()) + if not self._registry_initialized: + registry_adapter = self._register_with_agent_registry() + if registry_adapter: + self._registry_initialized = True + try: content = json.dumps(data, ensure_ascii=False) self._dapr_client.save_state( @@ -553,14 +585,3 @@ def close(self) -> None: """Close the Dapr client if owned by this manager.""" if self._owns_client: self._dapr_client.close() - - def _register_with_agent_registry(self) -> None: - """Register this session manager with the agent registry.""" - try: - from dapr.ext.agent_core import AgentRegistryAdapter - - AgentRegistryAdapter.create_from_stack(registry=None) - except ImportError: - logger.debug("dapr-ext-agent_core not available, skipping registry registration") - except Exception as e: - logger.warning(f"Failed to register with agent registry: {e}") From e465671799971a9c47c599d87bf8f2bf08f5cfa2 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Tue, 3 Feb 2026 14:35:29 +0100 Subject: [PATCH 09/13] fix: naming for langgraph & remove irrelevant comments Signed-off-by: Casper Nielsen --- examples/langgraph-checkpointer/agent.py | 2 +- examples/strands-agent/agent.py | 23 ---------------- .../dapr/ext/agent_core/mapping/langgraph.py | 4 +-- .../dapr/ext/langgraph/dapr_checkpointer.py | 26 +++++++++---------- 4 files changed, 16 insertions(+), 39 deletions(-) diff --git a/examples/langgraph-checkpointer/agent.py b/examples/langgraph-checkpointer/agent.py index 8ea98ae82..8f29e3108 100644 --- a/examples/langgraph-checkpointer/agent.py +++ b/examples/langgraph-checkpointer/agent.py @@ -50,7 +50,7 @@ def assistant(state: MessagesState): ) builder.add_edge('tools', 'assistant') -memory = DaprCheckpointer(store_name='statestore', key_prefix='dapr') +memory = DaprCheckpointer(state_store_name='statestore', key_prefix='dapr') react_graph_memory = builder.compile(checkpointer=memory) config = {'configurable': {'thread_id': '1'}} diff --git a/examples/strands-agent/agent.py b/examples/strands-agent/agent.py index 40c12edf3..cad8f78e1 100644 --- a/examples/strands-agent/agent.py +++ b/examples/strands-agent/agent.py @@ -25,14 +25,12 @@ def get_weather(location: str) -> str: Returns: A description of the current weather """ - # Mock implementation for demonstration return f"The weather in {location} is sunny and 72°F" def run_agent_conversation(): """Run a Strands Agent with Dapr session persistence.""" - # Check for OpenAI API key openai_api_key = os.getenv('OPENAI_API_KEY') if not openai_api_key: print("❌ Error: OPENAI_API_KEY environment variable not set") @@ -43,47 +41,32 @@ def run_agent_conversation(): agent_id = 'weather-assistant' with DaprClient() as dapr_client: - # Create DaprSessionManager for distributed session storage session_manager = DaprSessionManager( session_id=session_id, state_store_name='statestore', dapr_client=dapr_client ) - # Create a Strands Agent with DaprSessionManager agent = Agent( - # LLM model configuration - API key read from OPENAI_API_KEY env var model=OpenAIModel(model_id='gpt-4o'), - - # System prompt system_prompt=( "You are a helpful weather assistant. " "You can check the weather for any location. " "Be concise and friendly in your responses." ), - - # Tools available to the agent tools=[get_weather], - - # Agent metadata agent_id=agent_id, name='Weather Assistant', description='An AI assistant that helps users check the weather', - - # State management - custom state dict state={ 'role': 'Weather Assistant', 'goal': 'Help users get weather information', 'instructions': ['Be concise', 'Be friendly', 'Always use the get_weather tool'], 'max_iterations': 5, }, - - # Session manager for persistent storage - # This enables the agent to resume conversations across restarts session_manager=session_manager, ) - # User queries queries = [ "What's the weather in San Francisco?", "How about New York?", @@ -93,15 +76,9 @@ def run_agent_conversation(): print(f"👤 USER: {query}") try: - # Run the agent - this will: - # 1. Add the user message to the conversation - # 2. Call the LLM to generate a response - # 3. Execute any tool calls if needed - # 4. Persist everything through the session manager import asyncio response = asyncio.run(agent.invoke_async(query)) - # Extract the final response text if hasattr(response, 'content'): content = response.content elif hasattr(response, 'text'): diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py index 4f2942d0c..3013c17de 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py @@ -88,7 +88,7 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc role="Assistant", goal=system_prompt or "", instructions=[], - statestore=checkpointer.store_name if checkpointer else None, # type: ignore + statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore system_prompt="", ), name=agent.get_name() if hasattr(agent, "get_name") else "", @@ -101,7 +101,7 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc memory=MemoryMetadata( type="DaprCheckpointer", session_id=None, - statestore=checkpointer.store_name if checkpointer else None, # type: ignore + statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore ), llm=LLMMetadata( client=llm_metadata.get('client', '') if llm_metadata else '', diff --git a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py index 67b5cbffb..df2c471b0 100644 --- a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py +++ b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py @@ -42,8 +42,8 @@ class DaprCheckpointer(BaseCheckpointSaver[Checkpoint]): REGISTRY_KEY = 'dapr_checkpoint_registry' - def __init__(self, store_name: str, key_prefix: str): - self.store_name = store_name + def __init__(self, state_store_name: str, key_prefix: str): + self.state_store_name = state_store_name self.key_prefix = key_prefix self.serde = JsonPlusSerializer() self.client = DaprClient() @@ -141,14 +141,14 @@ def put( ) _, data = self.serde.dumps_typed(checkpoint_data) - self.client.save_state(store_name=self.store_name, key=checkpoint_key, value=data) + self.client.save_state(store_name=self.state_store_name, key=checkpoint_key, value=data) latest_pointer_key = ( f'checkpoint_latest:{storage_safe_thread_id}:{storage_safe_checkpoint_ns}' ) self.client.save_state( - store_name=self.store_name, key=latest_pointer_key, value=checkpoint_key + store_name=self.state_store_name, key=latest_pointer_key, value=checkpoint_key ) return next_config @@ -190,7 +190,7 @@ def put_writes( thread_id=thread_id, checkpoint_ns=checkpoint_ns, checkpoint_id=checkpoint_id ) - self.client.save_state(store_name=self.store_name, key=key, value=json.dumps(write_obj)) + self.client.save_state(store_name=self.state_store_name, key=key, value=json.dumps(write_obj)) checkpoint_key = self._make_safe_checkpoint_key( thread_id=thread_id, checkpoint_ns=checkpoint_ns, checkpoint_id=checkpoint_id @@ -201,11 +201,11 @@ def put_writes( ) self.client.save_state( - store_name=self.store_name, key=latest_pointer_key, value=checkpoint_key + store_name=self.state_store_name, key=latest_pointer_key, value=checkpoint_key ) def list(self, config: RunnableConfig) -> list[CheckpointTuple]: - reg_resp = self.client.get_state(store_name=self.store_name, key=self.REGISTRY_KEY) + reg_resp = self.client.get_state(store_name=self.state_store_name, key=self.REGISTRY_KEY) if not reg_resp.data: return [] @@ -213,7 +213,7 @@ def list(self, config: RunnableConfig) -> list[CheckpointTuple]: checkpoints: list[CheckpointTuple] = [] for key in keys: - cp_resp = self.client.get_state(store_name=self.store_name, key=key) + cp_resp = self.client.get_state(store_name=self.state_store_name, key=key) if not cp_resp.data: continue @@ -236,9 +236,9 @@ def list(self, config: RunnableConfig) -> list[CheckpointTuple]: def delete_thread(self, config: RunnableConfig) -> None: key = self._get_key(config) - self.client.delete_state(store_name=self.store_name, key=key) + self.client.delete_state(store_name=self.state_store_name, key=key) - reg_resp = self.client.get_state(store_name=self.store_name, key=self.REGISTRY_KEY) + reg_resp = self.client.get_state(store_name=self.state_store_name, key=self.REGISTRY_KEY) if not reg_resp.data: return @@ -247,7 +247,7 @@ def delete_thread(self, config: RunnableConfig) -> None: if key in registry: registry.remove(key) self.client.save_state( - store_name=self.store_name, + store_name=self.state_store_name, key=self.REGISTRY_KEY, value=json.dumps(registry), ) @@ -268,13 +268,13 @@ def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]: ) # First we extract the latest checkpoint key - checkpoint_key = self.client.get_state(store_name=self.store_name, key=key) + checkpoint_key = self.client.get_state(store_name=self.state_store_name, key=key) if not checkpoint_key.data: return None # To then derive the checkpoint data checkpoint_data = self.client.get_state( - store_name=self.store_name, + store_name=self.state_store_name, # checkpoint_key.data can either be str or bytes key=checkpoint_key.data.decode() if isinstance(checkpoint_key.data, bytes) From 924f1e5bae92019b0b07b61f0ee4fe25c2fad9f4 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Tue, 3 Feb 2026 14:43:13 +0100 Subject: [PATCH 10/13] chore(format): ruff Signed-off-by: Casper Nielsen --- examples/strands-agent/agent.py | 63 ++++++++++++++++----------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/examples/strands-agent/agent.py b/examples/strands-agent/agent.py index cad8f78e1..5a356cc04 100644 --- a/examples/strands-agent/agent.py +++ b/examples/strands-agent/agent.py @@ -18,41 +18,39 @@ @tool def get_weather(location: str) -> str: """Get the current weather for a location. - + Args: location: The city and state, e.g. "San Francisco, CA" - + Returns: A description of the current weather """ - return f"The weather in {location} is sunny and 72°F" + return f'The weather in {location} is sunny and 72°F' def run_agent_conversation(): """Run a Strands Agent with Dapr session persistence.""" - + openai_api_key = os.getenv('OPENAI_API_KEY') if not openai_api_key: - print("❌ Error: OPENAI_API_KEY environment variable not set") - print("💡 Set it with: export OPENAI_API_KEY=your-key-here") + print('❌ Error: OPENAI_API_KEY environment variable not set') + print('💡 Set it with: export OPENAI_API_KEY=your-key-here') return - + session_id = 'assistant-session-1' agent_id = 'weather-assistant' - + with DaprClient() as dapr_client: session_manager = DaprSessionManager( - session_id=session_id, - state_store_name='statestore', - dapr_client=dapr_client + session_id=session_id, state_store_name='statestore', dapr_client=dapr_client ) - + agent = Agent( model=OpenAIModel(model_id='gpt-4o'), system_prompt=( - "You are a helpful weather assistant. " - "You can check the weather for any location. " - "Be concise and friendly in your responses." + 'You are a helpful weather assistant. ' + 'You can check the weather for any location. ' + 'Be concise and friendly in your responses.' ), tools=[get_weather], agent_id=agent_id, @@ -66,39 +64,40 @@ def run_agent_conversation(): }, session_manager=session_manager, ) - + queries = [ "What's the weather in San Francisco?", - "How about New York?", + 'How about New York?', ] - + for query in queries: - print(f"👤 USER: {query}") - + print(f'👤 USER: {query}') + try: import asyncio + response = asyncio.run(agent.invoke_async(query)) - + if hasattr(response, 'content'): content = response.content elif hasattr(response, 'text'): content = response.text else: content = str(response) - - print(f"🤖 ASSISTANT: {content}") - + + print(f'🤖 ASSISTANT: {content}') + except Exception as e: - print(f"❌ Error: {e}") - print("💡 Tip: Make sure you have OPENAI_API_KEY set in your environment.") - print(" Or switch to a different model provider (Anthropic, Bedrock, etc.)") + print(f'❌ Error: {e}') + print('💡 Tip: Make sure you have OPENAI_API_KEY set in your environment.') + print(' Or switch to a different model provider (Anthropic, Bedrock, etc.)') break - + print() - - print("✅ Conversation complete!") - print("🔄 Run again to resume the conversation with full history from Dapr state store.") + + print('✅ Conversation complete!') + print('🔄 Run again to resume the conversation with full history from Dapr state store.') if __name__ == '__main__': - run_agent_conversation() \ No newline at end of file + run_agent_conversation() From 256de6c43ef050c937f9ac2df9d08b720b04728b Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Tue, 3 Feb 2026 15:24:39 +0100 Subject: [PATCH 11/13] fix: comment out depending on agent_core as there's no pkg yet Signed-off-by: Casper Nielsen --- ext/dapr-ext-langgraph/setup.cfg | 2 +- ext/dapr-ext-strands/setup.cfg | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ext/dapr-ext-langgraph/setup.cfg b/ext/dapr-ext-langgraph/setup.cfg index e7f0b94f8..cc1562278 100644 --- a/ext/dapr-ext-langgraph/setup.cfg +++ b/ext/dapr-ext-langgraph/setup.cfg @@ -25,7 +25,7 @@ packages = find_namespace: include_package_data = True install_requires = dapr >= 1.17.0.dev - dapr.ext.agent_core >= 1.17.0.dev + #dapr.ext.agent_core >= 1.17.0.dev langgraph >= 0.3.6 langchain >= 0.1.17 python-ulid >= 3.0.0 diff --git a/ext/dapr-ext-strands/setup.cfg b/ext/dapr-ext-strands/setup.cfg index 5ccd58357..2eddc3f1c 100644 --- a/ext/dapr-ext-strands/setup.cfg +++ b/ext/dapr-ext-strands/setup.cfg @@ -25,6 +25,7 @@ packages = find_namespace: include_package_data = True install_requires = dapr >= 1.17.0.dev + #dapr.ext.agent_core >= 1.17.0.dev strands-agents strands-agents-tools python-ulid >= 3.0.0 From 4ac3b317b24d246caa42e9ed64f72da29945b7f2 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Tue, 3 Feb 2026 15:32:31 +0100 Subject: [PATCH 12/13] chore(format): ruff Signed-off-by: Casper Nielsen --- .../dapr/ext/agent_core/__init__.py | 35 ++- .../dapr/ext/agent_core/introspection.py | 26 +- .../dapr/ext/agent_core/mapping/__init__.py | 10 +- .../dapr/ext/agent_core/mapping/base.py | 18 +- .../ext/agent_core/mapping/dapr_agents.py | 92 ++++--- .../dapr/ext/agent_core/mapping/langgraph.py | 104 ++++---- .../dapr/ext/agent_core/mapping/strands.py | 229 ++++++++++-------- .../dapr/ext/agent_core/metadata.py | 59 ++--- .../dapr/ext/agent_core/types.py | 103 ++++---- .../scripts/generate_schema.py | 52 ++-- .../tests/test_base_mapper.py | 12 +- .../tests/test_strands_mapper.py | 76 +++--- .../tests/test_strands_metadata.py | 10 +- ext/dapr-ext-agent_core/tests/test_types.py | 4 +- .../dapr/ext/langgraph/dapr_checkpointer.py | 6 +- .../dapr/ext/strands/dapr_session_manager.py | 18 +- 16 files changed, 447 insertions(+), 407 deletions(-) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py index bcfb6fccb..08949a29a 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py @@ -1,17 +1,26 @@ -from .types import SupportedFrameworks, AgentMetadataSchema, AgentMetadata, LLMMetadata, PubSubMetadata, ToolMetadata, RegistryMetadata, MemoryMetadata +from .types import ( + SupportedFrameworks, + AgentMetadataSchema, + AgentMetadata, + LLMMetadata, + PubSubMetadata, + ToolMetadata, + RegistryMetadata, + MemoryMetadata, +) from .metadata import AgentRegistryAdapter from .introspection import find_agent_in_stack, detect_framework __all__ = [ - "SupportedFrameworks", - "AgentMetadataSchema", - "AgentMetadata", - "LLMMetadata", - "PubSubMetadata", - "ToolMetadata", - "RegistryMetadata", - "MemoryMetadata", - "AgentRegistryAdapter", - "find_agent_in_stack", - "detect_framework", -] \ No newline at end of file + 'SupportedFrameworks', + 'AgentMetadataSchema', + 'AgentMetadata', + 'LLMMetadata', + 'PubSubMetadata', + 'ToolMetadata', + 'RegistryMetadata', + 'MemoryMetadata', + 'AgentRegistryAdapter', + 'find_agent_in_stack', + 'detect_framework', +] diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py index daeeecea7..f6219dd0f 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py @@ -22,27 +22,27 @@ def find_agent_in_stack() -> Optional[Any]: """ Walk up the call stack to find an agent/graph object. - + Currently supports: - LangGraph: CompiledStateGraph or SyncPregelLoop - Strands: DaprSessionManager - + Returns: The agent/graph object if found, None otherwise. """ # First, try to find in the stack for frame_info in inspect.stack(): frame_locals = frame_info.frame.f_locals - + # Look for 'self' in frame locals if 'self' in frame_locals: obj = frame_locals['self'] obj_type = type(obj).__name__ - + # LangGraph support - CompiledStateGraph if obj_type == 'CompiledStateGraph': return obj - + # Strands support - DaprSessionManager # Use gc to find the Agent that owns this session manager (similar to LangGraph checkpointer) if obj_type == 'DaprSessionManager': @@ -55,7 +55,7 @@ def find_agent_in_stack() -> Optional[Any]: return ref # Don't register bare DaprSessionManager - only register when Agent exists return None - + # If we found a checkpointer, use gc to find the graph that references it if obj_type == 'DaprCheckpointer': # Use garbage collector to find objects referencing this checkpointer @@ -64,35 +64,35 @@ def find_agent_in_stack() -> Optional[Any]: ref_type = type(ref).__name__ if ref_type == 'CompiledStateGraph': return ref - + return None def detect_framework(agent: Any) -> Optional[str]: """ Detect the framework type from an agent object. - + Args: agent: The agent object to inspect. - + Returns: Framework name string if detected, None otherwise. """ agent_type = type(agent).__name__ agent_module = type(agent).__module__ - + # LangGraph if agent_type == 'CompiledStateGraph' or 'langgraph' in agent_module: return 'langgraph' - + # Dapr Agents if 'dapr_agents' in agent_module: return 'dapr_agents' - + # Strands - detect both Agent class and DaprSessionManager if agent_type == 'Agent' and 'strands' in agent_module: return 'strands' if agent_type == 'DaprSessionManager': return 'strands' - + return None diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py index 086005e79..18d4498b0 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/__init__.py @@ -4,8 +4,8 @@ from .strands import StrandsMapper __all__ = [ - "BaseAgentMapper", - "DaprAgentsMapper", - "LangGraphMapper", - "StrandsMapper", -] \ No newline at end of file + 'BaseAgentMapper', + 'DaprAgentsMapper', + 'LangGraphMapper', + 'StrandsMapper', +] diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py index a04568773..1d2d7c2ea 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py @@ -18,23 +18,23 @@ class BaseAgentMapper(ABC): """Abstract base class for agent metadata mappers. - + Provides common functionality for extracting metadata from different agent frameworks (Strands, LangGraph, Dapr Agents). """ - + @staticmethod def _extract_provider(module_name: str) -> str: """Extract provider name from module path. - + Args: module_name: Python module name (e.g., 'langchain_openai.chat_models') - + Returns: Provider identifier (e.g., 'openai', 'anthropic', 'azure_openai') """ module_lower = module_name.lower() - + # Check more specific providers first if 'vertexai' in module_lower: return 'vertexai' @@ -52,17 +52,17 @@ def _extract_provider(module_name: str) -> str: return 'google' elif 'cohere' in module_lower: return 'cohere' - + return 'unknown' - + @abstractmethod def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: """Map agent to standardized metadata schema. - + Args: agent: Framework-specific agent instance schema_version: Schema version to use - + Returns: AgentMetadataSchema with extracted metadata """ diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py index 59b26ef51..6ac37933c 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py @@ -1,10 +1,17 @@ - from datetime import datetime, timezone import json import logging from typing import Any from dapr.ext.agent_core.mapping.base import BaseAgentMapper -from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata +from dapr.ext.agent_core.types import ( + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + MemoryMetadata, + PubSubMetadata, + RegistryMetadata, + ToolMetadata, +) logger = logging.getLogger(__name__) @@ -14,62 +21,67 @@ def __init__(self) -> None: pass def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: - profile = getattr(agent, "profile", None) - memory = getattr(agent, "memory", None) - pubsub = getattr(agent, "pubsub", None) - llm = getattr(agent, "llm", None) - registry = getattr(agent, "_registry", None) - execution = getattr(agent, "execution", None) + profile = getattr(agent, 'profile', None) + memory = getattr(agent, 'memory', None) + pubsub = getattr(agent, 'pubsub', None) + llm = getattr(agent, 'llm', None) + registry = getattr(agent, '_registry', None) + execution = getattr(agent, 'execution', None) return AgentMetadataSchema( schema_version=schema_version, agent=AgentMetadata( - appid=getattr(agent, "appid", ""), + appid=getattr(agent, 'appid', ''), type=type(agent).__name__, orchestrator=False, - role=getattr(profile, "role", "") if profile else "", - goal=getattr(profile, "goal", "") if profile else "", - instructions=getattr(profile, "instructions", None) if profile else [], - statestore=getattr(memory, "store_name", "") if memory else "", - system_prompt=getattr(profile, "system_prompt", "") if profile else "", + role=getattr(profile, 'role', '') if profile else '', + goal=getattr(profile, 'goal', '') if profile else '', + instructions=getattr(profile, 'instructions', None) if profile else [], + statestore=getattr(memory, 'store_name', '') if memory else '', + system_prompt=getattr(profile, 'system_prompt', '') if profile else '', ), - name=getattr(agent, "name", ""), + name=getattr(agent, 'name', ''), registered_at=datetime.now(timezone.utc).isoformat(), pubsub=PubSubMetadata( - name=getattr(pubsub, "pubsub_name", "") if pubsub else "", - broadcast_topic=getattr(pubsub, "broadcast_topic", None) if pubsub else None, - agent_topic=getattr(pubsub, "agent_topic", None) if pubsub else None, + name=getattr(pubsub, 'pubsub_name', '') if pubsub else '', + broadcast_topic=getattr(pubsub, 'broadcast_topic', None) if pubsub else None, + agent_topic=getattr(pubsub, 'agent_topic', None) if pubsub else None, ), memory=MemoryMetadata( - type=type(memory).__name__ if memory else "", - session_id=getattr(memory, "session_id", None) if memory else None, - statestore=getattr(memory, "store_name", None) if memory else None, + type=type(memory).__name__ if memory else '', + session_id=getattr(memory, 'session_id', None) if memory else None, + statestore=getattr(memory, 'store_name', None) if memory else None, ), llm=LLMMetadata( - client=type(llm).__name__ if llm else "", - provider=getattr(llm, "provider", "unknown") if llm else "unknown", - api=getattr(llm, "api", "unknown") if llm else "unknown", - model=getattr(llm, "model", "unknown") if llm else "unknown", - component_name=getattr(llm, "component_name", None) if llm else None, - base_url=getattr(llm, "base_url", None) if llm else None, - azure_endpoint=getattr(llm, "azure_endpoint", None) if llm else None, - azure_deployment=getattr(llm, "azure_deployment", None) if llm else None, - prompt_template=type(getattr(llm, "prompt_template", None)).__name__ if llm and getattr(llm, "prompt_template", None) else None, + client=type(llm).__name__ if llm else '', + provider=getattr(llm, 'provider', 'unknown') if llm else 'unknown', + api=getattr(llm, 'api', 'unknown') if llm else 'unknown', + model=getattr(llm, 'model', 'unknown') if llm else 'unknown', + component_name=getattr(llm, 'component_name', None) if llm else None, + base_url=getattr(llm, 'base_url', None) if llm else None, + azure_endpoint=getattr(llm, 'azure_endpoint', None) if llm else None, + azure_deployment=getattr(llm, 'azure_deployment', None) if llm else None, + prompt_template=type(getattr(llm, 'prompt_template', None)).__name__ + if llm and getattr(llm, 'prompt_template', None) + else None, ), registry=RegistryMetadata( - statestore=getattr(getattr(registry, "store", None), "store_name", None) if registry else None, - name=getattr(registry, "team_name", None) if registry else None, + statestore=getattr(getattr(registry, 'store', None), 'store_name', None) + if registry + else None, + name=getattr(registry, 'team_name', None) if registry else None, ), tools=[ ToolMetadata( - tool_name=getattr(tool, "name", ""), - tool_description=getattr(tool, "description", ""), - tool_args=json.dumps(getattr(tool, "args_schema", {})) - if hasattr(tool, "args_schema") else "{}", + tool_name=getattr(tool, 'name', ''), + tool_description=getattr(tool, 'description', ''), + tool_args=json.dumps(getattr(tool, 'args_schema', {})) + if hasattr(tool, 'args_schema') + else '{}', ) - for tool in getattr(agent, "tools", []) + for tool in getattr(agent, 'tools', []) ], - max_iterations=getattr(execution, "max_iterations", None) if execution else None, - tool_choice=getattr(execution, "tool_choice", None) if execution else None, - agent_metadata=getattr(agent, "agent_metadata", None), + max_iterations=getattr(execution, 'max_iterations', None) if execution else None, + tool_choice=getattr(execution, 'tool_choice', None) if execution else None, + agent_metadata=getattr(agent, 'agent_metadata', None), ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py index 3013c17de..9de2cbd94 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py @@ -1,9 +1,16 @@ - from datetime import datetime, timezone import logging from typing import TYPE_CHECKING, Any, Dict, Optional from dapr.ext.agent_core.mapping.base import BaseAgentMapper -from dapr.ext.agent_core.types import AgentMetadata, AgentMetadataSchema, LLMMetadata, MemoryMetadata, PubSubMetadata, RegistryMetadata, ToolMetadata +from dapr.ext.agent_core.types import ( + AgentMetadata, + AgentMetadataSchema, + LLMMetadata, + MemoryMetadata, + PubSubMetadata, + RegistryMetadata, + ToolMetadata, +) from langgraph.pregel._read import PregelNode if TYPE_CHECKING: @@ -11,58 +18,63 @@ logger = logging.getLogger(__name__) + class LangGraphMapper(BaseAgentMapper): def __init__(self) -> None: pass def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + introspected_vars: Dict[str, Any] = vars(agent) # type: ignore - introspected_vars: Dict[str, Any] = vars(agent) # type: ignore - - nodes: Dict[str, object] = introspected_vars.get("nodes", {}) # type: ignore + nodes: Dict[str, object] = introspected_vars.get('nodes', {}) # type: ignore tools: list[Dict[str, Any]] = [] llm_metadata: Optional[Dict[str, Any]] = None system_prompt: Optional[str] = None - for node_name, obj in nodes.items(): # type: ignore - if node_name == "__start__": + for node_name, obj in nodes.items(): # type: ignore + if node_name == '__start__': # We don't want to process the start node continue if isinstance(obj, PregelNode): node_vars = vars(obj) - if "bound" in node_vars.keys(): - bound = node_vars["bound"] - + if 'bound' in node_vars.keys(): + bound = node_vars['bound'] + # Check if it's a ToolNode - if hasattr(bound, "_tools_by_name"): - tools_by_name = getattr(bound, "_tools_by_name", {}) - tools.extend([ - { - "name": name, - "description": getattr(tool, "description", ""), - "args_schema": getattr(tool, "args_schema", {}), # TODO: See if we can extract the pydantic model - } - for name, tool in tools_by_name.items() - ]) - + if hasattr(bound, '_tools_by_name'): + tools_by_name = getattr(bound, '_tools_by_name', {}) + tools.extend( + [ + { + 'name': name, + 'description': getattr(tool, 'description', ''), + 'args_schema': getattr( + tool, 'args_schema', {} + ), # TODO: See if we can extract the pydantic model + } + for name, tool in tools_by_name.items() + ] + ) + # Check if it's an assistant RunnableCallable - elif type(bound).__name__ == "RunnableCallable": + elif type(bound).__name__ == 'RunnableCallable': logger.info(f"Node '{node_name}' is a RunnableCallable") - - func = getattr(bound, "func", None) - if func and hasattr(func, "__globals__"): + + func = getattr(bound, 'func', None) + if func and hasattr(func, '__globals__'): func_globals = func.__globals__ - + for _, global_value in func_globals.items(): var_type = type(global_value).__name__ var_module = type(global_value).__module__ - + if 'chat' in var_type.lower(): - model = getattr(global_value, 'model_name', None) or \ - getattr(global_value, 'model', None) - + model = getattr(global_value, 'model_name', None) or getattr( + global_value, 'model', None + ) + if model and not llm_metadata: llm_metadata = { 'client': var_type, @@ -70,43 +82,43 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc 'model': model, 'base_url': getattr(global_value, 'base_url', None), } - + # Look for system message elif var_type == 'SystemMessage': content = getattr(global_value, 'content', None) if content and not system_prompt: system_prompt = content - checkpointer: Optional["DaprCheckpointer"] = introspected_vars.get("checkpointer", None) # type: ignore + checkpointer: Optional['DaprCheckpointer'] = introspected_vars.get('checkpointer', None) # type: ignore return AgentMetadataSchema( schema_version=schema_version, agent=AgentMetadata( - appid="", + appid='', type=type(agent).__name__, orchestrator=False, - role="Assistant", - goal=system_prompt or "", + role='Assistant', + goal=system_prompt or '', instructions=[], - statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore - system_prompt="", + statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore + system_prompt='', ), - name=agent.get_name() if hasattr(agent, "get_name") else "", + name=agent.get_name() if hasattr(agent, 'get_name') else '', registered_at=datetime.now(timezone.utc).isoformat(), pubsub=PubSubMetadata( - name="", + name='', broadcast_topic=None, agent_topic=None, ), memory=MemoryMetadata( - type="DaprCheckpointer", + type='DaprCheckpointer', session_id=None, - statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore + statestore=checkpointer.state_store_name if checkpointer else None, # type: ignore ), llm=LLMMetadata( client=llm_metadata.get('client', '') if llm_metadata else '', provider=llm_metadata.get('provider', 'unknown') if llm_metadata else 'unknown', - api="chat", + api='chat', model=llm_metadata.get('model', 'unknown') if llm_metadata else 'unknown', component_name=None, base_url=llm_metadata.get('base_url') if llm_metadata else None, @@ -120,13 +132,13 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc ), tools=[ ToolMetadata( - tool_name=tool.get("name", ""), - tool_description=tool.get("description", ""), - tool_args="", + tool_name=tool.get('name', ''), + tool_description=tool.get('description', ''), + tool_args='', ) for tool in tools ], max_iterations=1, - tool_choice="auto", + tool_choice='auto', agent_metadata=None, ) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py index 14ac53c23..57658522e 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py @@ -21,29 +21,29 @@ class StrandsMapper(BaseAgentMapper): def __init__(self) -> None: pass - + def _is_strands_agent(self, agent: Any) -> bool: """Check if agent is an actual Strands Agent (not just session manager).""" agent_type = type(agent).__name__ agent_module = type(agent).__module__ return agent_type == 'Agent' and 'strands' in agent_module - + def _extract_from_strands_agent(self, agent: Any) -> Dict[str, Any]: """Extract metadata from a real Strands Agent. - + Args: agent: A strands.Agent instance - + Returns: Dictionary with extracted metadata """ metadata = {} - + # Basic agent info metadata['agent_id'] = getattr(agent, 'agent_id', None) or getattr(agent, 'name', 'agent') metadata['name'] = getattr(agent, 'name', None) metadata['description'] = getattr(agent, 'description', None) - + # System prompt system_prompt = getattr(agent, 'system_prompt', None) if system_prompt: @@ -52,7 +52,7 @@ def _extract_from_strands_agent(self, agent: Any) -> Dict[str, Any]: elif isinstance(system_prompt, list): # Join list of content blocks metadata['system_prompt'] = ' '.join(str(block) for block in system_prompt) - + # Agent state (custom metadata) state = getattr(agent, 'state', {}) try: @@ -71,17 +71,21 @@ def _extract_from_strands_agent(self, agent: Any) -> Dict[str, Any]: metadata['role'] = metadata.get('name', 'Agent') metadata['goal'] = metadata.get('description', '') metadata['instructions'] = [] - + # LLM Model model = getattr(agent, 'model', None) if model: model_type = type(model).__name__ - + # Check if model has a config config = getattr(model, 'config', None) - + # Try multiple possible attribute names for the model ID - model_id = getattr(model, 'model_id', None) or getattr(model, 'model', None) or getattr(model, 'name', None) + model_id = ( + getattr(model, 'model_id', None) + or getattr(model, 'model', None) + or getattr(model, 'name', None) + ) if config: if isinstance(config, dict) and 'model_id' in config: model_id = config['model_id'] @@ -89,24 +93,24 @@ def _extract_from_strands_agent(self, agent: Any) -> Dict[str, Any]: model_id = config.model elif hasattr(config, 'model_id'): model_id = config.model_id - + if model_id is None: model_id = 'unknown' - + # Extract provider from model module or type model_module = type(model).__module__ provider = self._extract_provider(model_module) - + # Fallback: parse from model type name if provider still unknown if provider == 'unknown': provider = model_type.replace('Model', '').lower() - + metadata['llm'] = { 'provider': provider, 'model': str(model_id), 'model_type': model_type, } - + # Tools - extract from tool_registry.registry # In Strands, tools are stored in agent.tool_registry.registry as AgentTool objects tools_metadata = [] @@ -117,89 +121,98 @@ def _extract_from_strands_agent(self, agent: Any) -> Dict[str, Any]: for tool_name, agent_tool in registry_dict.items(): tool_info = { 'name': tool_name, - 'description': getattr(agent_tool, 'description', '') or getattr(agent_tool, '__doc__', ''), + 'description': getattr(agent_tool, 'description', '') + or getattr(agent_tool, '__doc__', ''), } tools_metadata.append(tool_info) - + metadata['tools'] = tools_metadata - + # Session manager - stored as _session_manager in Strands Agent session_manager = getattr(agent, '_session_manager', None) if session_manager and type(session_manager).__name__ == 'DaprSessionManager': metadata['session_manager'] = session_manager metadata['state_store'] = getattr(session_manager, 'state_store_name', None) metadata['session_id'] = getattr(session_manager, '_session_id', None) - + return metadata - def _get_session_agent(self, session_manager: "DaprSessionManager", session_id: str) -> Optional["SessionAgent"]: + def _get_session_agent( + self, session_manager: 'DaprSessionManager', session_id: str + ) -> Optional['SessionAgent']: """ Get the primary SessionAgent from a session. - + Reads the session manifest to find agents, and returns the first one. - + Args: session_manager: The DaprSessionManager instance session_id: Session ID to read from - + Returns: SessionAgent if found, None otherwise """ try: # Use the session manager's method to get the manifest key manifest_key = session_manager._get_manifest_key(session_id) - logger.info(f"Reading manifest from key: {manifest_key}") + logger.info(f'Reading manifest from key: {manifest_key}') manifest = session_manager._read_state(manifest_key) - - logger.info(f"Manifest content: {manifest}") - + + logger.info(f'Manifest content: {manifest}') + if not manifest or 'agents' not in manifest or not manifest['agents']: - logger.warning(f"No agents found in session {session_id} - session may not have agents yet. Use fallback metadata.") + logger.warning( + f'No agents found in session {session_id} - session may not have agents yet. Use fallback metadata.' + ) return None - + # Get the first agent (primary agent) agent_id = manifest['agents'][0] logger.info(f"Found agent '{agent_id}' in session {session_id}") session_agent = session_manager.read_agent(session_id, agent_id) - + if session_agent: - logger.info(f"Successfully loaded SessionAgent '{agent_id}' with state keys: {list(session_agent.state.keys()) if session_agent.state else []}") + logger.info( + f"Successfully loaded SessionAgent '{agent_id}' with state keys: {list(session_agent.state.keys()) if session_agent.state else []}" + ) else: logger.warning(f"read_agent returned None for agent_id '{agent_id}'") - + return session_agent - + except Exception as e: - logger.error(f"Failed to read SessionAgent from session {session_id}: {e}", exc_info=True) + logger.error( + f'Failed to read SessionAgent from session {session_id}: {e}', exc_info=True + ) return None def _extract_llm_metadata(self, agent_state: Dict[str, Any]) -> Optional[LLMMetadata]: """ Extract LLM metadata from SessionAgent state. - + Looks for llm_component, conversation_provider, or llm_config in state. - + Args: agent_state: The SessionAgent.state dictionary - + Returns: LLMMetadata if LLM configuration found, None otherwise """ # Check for various LLM configuration keys llm_component = agent_state.get('llm_component') or agent_state.get('conversation_provider') llm_config = agent_state.get('llm_config', {}) - + if not llm_component and not llm_config: return None - + # Extract LLM details provider = llm_config.get('provider', 'unknown') model = llm_config.get('model', 'unknown') - + return LLMMetadata( - client="dapr_conversation", + client='dapr_conversation', provider=provider, - api="conversation", + api='conversation', model=model, component_name=llm_component, ) @@ -207,107 +220,117 @@ def _extract_llm_metadata(self, agent_state: Dict[str, Any]) -> Optional[LLMMeta def _extract_tools_metadata(self, agent_state: Dict[str, Any]) -> List[ToolMetadata]: """ Extract tools metadata from SessionAgent state. - + Args: agent_state: The SessionAgent.state dictionary - + Returns: List of ToolMetadata (empty list if no tools configured) """ tools_list = agent_state.get('tools') - + if not tools_list or not isinstance(tools_list, list): return [] - + tool_metadata_list: List[ToolMetadata] = [] for tool in tools_list: if isinstance(tool, dict): - tool_metadata_list.append(ToolMetadata( - tool_name=str(tool.get('name', 'unknown')), - tool_description=str(tool.get('description', '')), - tool_args=str(tool.get('args', {})), - )) + tool_metadata_list.append( + ToolMetadata( + tool_name=str(tool.get('name', 'unknown')), + tool_description=str(tool.get('description', '')), + tool_args=str(tool.get('args', {})), + ) + ) elif hasattr(tool, 'name'): # Handle tool objects - tool_metadata_list.append(ToolMetadata( - tool_name=str(getattr(tool, 'name', 'unknown')), - tool_description=str(getattr(tool, 'description', '')), - tool_args=str(getattr(tool, 'args', {})), - )) - + tool_metadata_list.append( + ToolMetadata( + tool_name=str(getattr(tool, 'name', 'unknown')), + tool_description=str(getattr(tool, 'description', '')), + tool_args=str(getattr(tool, 'args', {})), + ) + ) + return tool_metadata_list def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: """ Map Strands Agent or DaprSessionManager to AgentMetadataSchema. - + Handles two cases: 1. Real Strands Agent (strands.Agent) - extracts from agent properties 2. DaprSessionManager - extracts from stored SessionAgent in state - + Args: agent: Either a strands.Agent or DaprSessionManager instance schema_version: Version of the schema - + Returns: AgentMetadataSchema with extracted metadata """ - + # Case 1: Real Strands Agent from the SDK if self._is_strands_agent(agent): extracted = self._extract_from_strands_agent(agent) - + agent_id = extracted.get('agent_id', 'agent') agent_name = extracted.get('name', agent_id) session_id = extracted.get('session_id') - + # Build full agent name if session_id: - full_name = f"strands-{session_id}-{agent_id}" + full_name = f'strands-{session_id}-{agent_id}' else: - full_name = f"strands-{agent_id}" - + full_name = f'strands-{agent_id}' + # Extract LLM metadata llm_info = extracted.get('llm') if llm_info: llm_metadata = LLMMetadata( - client=llm_info.get('model_type', 'unknown'), # OpenAIModel, AnthropicModel, etc. + client=llm_info.get( + 'model_type', 'unknown' + ), # OpenAIModel, AnthropicModel, etc. provider=llm_info.get('provider', 'unknown'), # openai, anthropic, etc. - api="chat", # Strands uses chat-based APIs + api='chat', # Strands uses chat-based APIs model=llm_info.get('model', 'unknown'), # gpt-4o, claude-3-opus, etc. component_name=None, ) else: llm_metadata = None - + # Extract tools metadata tools_list = extracted.get('tools', []) tools_metadata = [] for tool in tools_list: if isinstance(tool, dict): - tools_metadata.append(ToolMetadata( - tool_name=tool.get('name', 'unknown'), - tool_description=tool.get('description', ''), - tool_args='', - )) - + tools_metadata.append( + ToolMetadata( + tool_name=tool.get('name', 'unknown'), + tool_description=tool.get('description', ''), + tool_args='', + ) + ) + # Get session manager info for memory and statestore state_store_name = extracted.get('state_store') session_id_value = extracted.get('session_id') - + # Determine memory type and populate session info has_session_manager = extracted.get('session_manager') is not None - memory_type = "DaprSessionManager" if has_session_manager else "InMemory" - + memory_type = 'DaprSessionManager' if has_session_manager else 'InMemory' + return AgentMetadataSchema( schema_version=schema_version, agent=AgentMetadata( - appid="", - type="Strands", + appid='', + type='Strands', orchestrator=False, role=extracted.get('role', agent_name), goal=extracted.get('goal', ''), - instructions=extracted.get('instructions') if extracted.get('instructions') else None, + instructions=extracted.get('instructions') + if extracted.get('instructions') + else None, statestore=state_store_name, # Set from session manager system_prompt=extracted.get('system_prompt'), ), @@ -321,11 +344,13 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc ), llm=llm_metadata, tools=tools_metadata, # Already a list, could be empty [] - tool_choice="auto" if tools_metadata else None, # Set tool_choice based on whether tools exist + tool_choice='auto' + if tools_metadata + else None, # Set tool_choice based on whether tools exist max_iterations=extracted.get('max_iterations'), registry=RegistryMetadata( name=None, - team="default", + team='default', ), agent_metadata={ 'framework': 'strands', @@ -334,22 +359,22 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc 'state_store': state_store_name, }, ) - + # Case 2: DaprSessionManager (legacy approach) - session_manager: "DaprSessionManager" = agent - + session_manager: 'DaprSessionManager' = agent + # Extract session manager info state_store_name = getattr(session_manager, 'state_store_name', None) session_id = getattr(session_manager, '_session_id', None) - + # Try to get the primary SessionAgent session_agent = self._get_session_agent(session_manager, session_id) if session_id else None - + # Extract agent-specific metadata if SessionAgent exists if session_agent: agent_id = session_agent.agent_id agent_state = session_agent.state or {} - + # Extract from state system_prompt = agent_state.get('system_prompt') role = str(agent_state.get('role', agent_id)) @@ -358,33 +383,33 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc instructions = list(instructions_raw) if isinstance(instructions_raw, list) else [] tool_choice = agent_state.get('tool_choice') max_iterations = agent_state.get('max_iterations') - + # Extract LLM metadata llm_metadata = self._extract_llm_metadata(agent_state) - + # Extract tools metadata tools_metadata = self._extract_tools_metadata(agent_state) - - agent_name = f"strands-{session_id}-{agent_id}" + + agent_name = f'strands-{session_id}-{agent_id}' else: # Fallback when no SessionAgent found agent_id = None agent_state = {} system_prompt = None - role = "Session Manager" - goal = "Manages multi-agent sessions with distributed state storage" + role = 'Session Manager' + goal = 'Manages multi-agent sessions with distributed state storage' instructions = [] tool_choice = None max_iterations = None llm_metadata = None tools_metadata = [] - agent_name = f"strands-session-{session_id}" if session_id else "strands-session" - + agent_name = f'strands-session-{session_id}' if session_id else 'strands-session' + return AgentMetadataSchema( schema_version=schema_version, agent=AgentMetadata( - appid="", - type="Strands", + appid='', + type='Strands', orchestrator=False, role=role, goal=goal, @@ -396,7 +421,7 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc registered_at=datetime.now(timezone.utc).isoformat(), pubsub=None, memory=MemoryMetadata( - type="DaprSessionManager", + type='DaprSessionManager', session_id=session_id, statestore=state_store_name, ), @@ -406,7 +431,7 @@ def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSc max_iterations=max_iterations, registry=RegistryMetadata( name=None, - team="default", + team='default', ), agent_metadata={ 'framework': 'strands', diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py index a9844dd41..7816854f8 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py @@ -16,10 +16,7 @@ from dapr_agents.agents.configs import ( AgentRegistryConfig, ) -from dapr_agents.storage.daprstores.stateservice import ( - StateStoreError, - StateStoreService -) +from dapr_agents.storage.daprstores.stateservice import StateStoreError, StateStoreService from dapr.ext.agent_core import SupportedFrameworks, AgentMetadataSchema from dapr.ext.agent_core.introspection import find_agent_in_stack, detect_framework @@ -35,23 +32,23 @@ def create_from_stack( ) -> Optional['AgentRegistryAdapter']: """ Auto-detect and create an AgentRegistryAdapter by walking the call stack. - + Args: registry: Optional registry configuration. If None, will attempt auto-discovery. - + Returns: AgentRegistryAdapter instance if agent found, None otherwise. """ agent = find_agent_in_stack() if not agent: return None - + framework = detect_framework(agent) if not framework: return None - + return cls(registry=registry, framework=framework, agent=agent) - + def __init__(self, registry: Optional[AgentRegistryConfig], framework: str, agent: Any) -> None: self._registry = registry @@ -62,39 +59,34 @@ def __init__(self, registry: Optional[AgentRegistryConfig], framework: str, agen if self._registry is None: components: Sequence[RegisteredComponents] = resp.registered_components for component in components: - if ( - "state" in component.type - and component.name == "agent-registry" - ): + if 'state' in component.type and component.name == 'agent-registry': self._registry = AgentRegistryConfig( store=StateStoreService(store_name=component.name), - team_name="default", + team_name='default', ) except TimeoutError: - logger.warning( - "Dapr sidecar not responding; proceeding without auto-configuration." - ) + logger.warning('Dapr sidecar not responding; proceeding without auto-configuration.') if self._registry is None: return self.registry_state: StateStoreService = self._registry.store - self._registry_prefix: str = "agents:" - self._meta: Dict[str, str] = {"contentType": "application/json"} + self._registry_prefix: str = 'agents:' + self._meta: Dict[str, str] = {'contentType': 'application/json'} self._max_etag_attempts: int = 10 self._save_options: Dict[str, Any] = { - "concurrency": Concurrency.first_write, - "consistency": Consistency.strong, + 'concurrency': Concurrency.first_write, + 'consistency': Consistency.strong, } if not self._can_handle(framework): raise ValueError(f"Adapter cannot handle framework '{framework}'") - + _metadata = self._extract_metadata(agent) # We need to handle some null values here to avoid issues during registration - if _metadata.agent.appid == "": - _metadata.agent.appid = self.appid or "" + if _metadata.agent.appid == '': + _metadata.agent.appid = self.appid or '' if _metadata.registry: if _metadata.registry.name is None: @@ -106,21 +98,20 @@ def __init__(self, registry: Optional[AgentRegistryConfig], framework: str, agen def _can_handle(self, framework: str) -> bool: """Check if this adapter can handle the given Agent.""" - + for fw in SupportedFrameworks: if framework.lower() == fw.value.lower(): self._framework = fw return True return False - def _extract_metadata(self, agent: Any) -> AgentMetadataSchema: """Extract metadata from the given Agent.""" try: - schema_version = version("dapr-ext-agent_core") + schema_version = version('dapr-ext-agent_core') except PackageNotFoundError: - schema_version = "edge" + schema_version = 'edge' framework_mappers = { SupportedFrameworks.DAPR_AGENTS: dapr.ext.agent_core.mapping.DaprAgentsMapper().map_agent_metadata, @@ -144,7 +135,7 @@ def _register(self, metadata: AgentMetadataSchema) -> None: team: Team override; falls back to configured default team. """ if not metadata.registry: - raise ValueError("Registry metadata is required for registration") + raise ValueError('Registry metadata is required for registration') self._upsert_agent_entry( team=metadata.registry.name, @@ -171,12 +162,10 @@ def _mutate_registry_entry( StateStoreError: If the mutation fails after retries due to contention. """ if not self.registry_state: - raise RuntimeError( - "registry_state must be provided to mutate the agent registry" - ) + raise RuntimeError('registry_state must be provided to mutate the agent registry') - key = f"agents:{team or 'default'}" - self._meta["partitionKey"] = key + key = f'agents:{team or "default"}' + self._meta['partitionKey'] = key attempts = max_attempts or self._max_etag_attempts self._ensure_registry_initialized(key=key, meta=self._meta) @@ -244,7 +233,7 @@ def mutator(current: Dict[str, Any]) -> Optional[Dict[str, Any]]: current[agent_name] = agent_metadata return current - logger.debug("Upserting agent '%s' in team '%s' registry", agent_name, team or "default") + logger.debug("Upserting agent '%s' in team '%s' registry", agent_name, team or 'default') self._mutate_registry_entry( team=team, mutator=mutator, diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py index 4a1b5f398..ed10db487 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py @@ -2,27 +2,26 @@ from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field + class SupportedFrameworks(StrEnum): - DAPR_AGENTS = "dapr-agents" - LANGGRAPH = "langgraph" - STRANDS = "strands" + DAPR_AGENTS = 'dapr-agents' + LANGGRAPH = 'langgraph' + STRANDS = 'strands' class AgentMetadata(BaseModel): """Metadata about an agent's configuration and capabilities.""" - appid: str = Field(..., description="Dapr application ID of the agent") - type: str = Field(..., description="Type of the agent (e.g., standalone, durable)") - orchestrator: bool = Field( - False, description="Indicates if the agent is an orchestrator" - ) - role: str = Field(default="", description="Role of the agent") - goal: str = Field(default="", description="High-level objective of the agent") + appid: str = Field(..., description='Dapr application ID of the agent') + type: str = Field(..., description='Type of the agent (e.g., standalone, durable)') + orchestrator: bool = Field(False, description='Indicates if the agent is an orchestrator') + role: str = Field(default='', description='Role of the agent') + goal: str = Field(default='', description='High-level objective of the agent') instructions: Optional[List[str]] = Field( - default=None, description="Instructions for the agent" + default=None, description='Instructions for the agent' ) statestore: Optional[str] = Field( - default=None, description="Dapr state store component name used by the agent" + default=None, description='Dapr state store component name used by the agent' ) system_prompt: Optional[str] = Field( default=None, description="System prompt guiding the agent's behavior" @@ -32,21 +31,21 @@ class AgentMetadata(BaseModel): class PubSubMetadata(BaseModel): """Pub/Sub configuration information.""" - name: str = Field(..., description="Pub/Sub component name") + name: str = Field(..., description='Pub/Sub component name') broadcast_topic: Optional[str] = Field( - default=None, description="Pub/Sub topic for broadcasting messages" + default=None, description='Pub/Sub topic for broadcasting messages' ) agent_topic: Optional[str] = Field( - default=None, description="Pub/Sub topic for direct agent messages" + default=None, description='Pub/Sub topic for direct agent messages' ) class MemoryMetadata(BaseModel): """Memory configuration information.""" - type: str = Field(..., description="Type of memory used by the agent") + type: str = Field(..., description='Type of memory used by the agent') statestore: Optional[str] = Field( - default=None, description="Dapr state store component name for memory" + default=None, description='Dapr state store component name for memory' ) session_id: Optional[str] = Field( default=None, description="Default session ID for the agent's memory" @@ -56,44 +55,42 @@ class MemoryMetadata(BaseModel): class LLMMetadata(BaseModel): """LLM configuration information.""" - client: str = Field(..., description="LLM client used by the agent") - provider: str = Field(..., description="LLM provider used by the agent") - api: str = Field(default="unknown", description="API type used by the LLM client") - model: str = Field(default="unknown", description="Model name or identifier") + client: str = Field(..., description='LLM client used by the agent') + provider: str = Field(..., description='LLM provider used by the agent') + api: str = Field(default='unknown', description='API type used by the LLM client') + model: str = Field(default='unknown', description='Model name or identifier') component_name: Optional[str] = Field( - default=None, description="Dapr component name for the LLM client" + default=None, description='Dapr component name for the LLM client' ) base_url: Optional[str] = Field( - default=None, description="Base URL for the LLM API if applicable" + default=None, description='Base URL for the LLM API if applicable' ) azure_endpoint: Optional[str] = Field( - default=None, description="Azure endpoint if using Azure OpenAI" + default=None, description='Azure endpoint if using Azure OpenAI' ) azure_deployment: Optional[str] = Field( - default=None, description="Azure deployment name if using Azure OpenAI" + default=None, description='Azure deployment name if using Azure OpenAI' ) prompt_template: Optional[str] = Field( - default=None, description="Prompt template used by the agent" + default=None, description='Prompt template used by the agent' ) class ToolMetadata(BaseModel): """Metadata about a tool available to the agent.""" - tool_name: str = Field(..., description="Name of the tool") - tool_description: str = Field( - ..., description="Description of the tool's functionality" - ) - tool_args: str = Field(..., description="Arguments for the tool") + tool_name: str = Field(..., description='Name of the tool') + tool_description: str = Field(..., description="Description of the tool's functionality") + tool_args: str = Field(..., description='Arguments for the tool') class RegistryMetadata(BaseModel): """Registry configuration information.""" statestore: Optional[str] = Field( - None, description="Name of the statestore component for the registry" + None, description='Name of the statestore component for the registry' ) - name: Optional[str] = Field(default=None, description="Name of the team registry") + name: Optional[str] = Field(default=None, description='Name of the team registry') class AgentMetadataSchema(BaseModel): @@ -101,30 +98,22 @@ class AgentMetadataSchema(BaseModel): schema_version: str = Field( ..., - description="Version of the schema used for the agent metadata.", - ) - agent: AgentMetadata = Field( - ..., description="Agent configuration and capabilities" - ) - name: str = Field(..., description="Name of the agent") - registered_at: str = Field(..., description="ISO 8601 timestamp of registration") - pubsub: Optional[PubSubMetadata] = Field( - None, description="Pub/sub configuration if enabled" - ) - memory: Optional[MemoryMetadata] = Field( - None, description="Memory configuration if enabled" - ) - llm: Optional[LLMMetadata] = Field(None, description="LLM configuration") - registry: Optional[RegistryMetadata] = Field( - None, description="Registry configuration" - ) - tools: Optional[List[ToolMetadata]] = Field(None, description="Available tools") + description='Version of the schema used for the agent metadata.', + ) + agent: AgentMetadata = Field(..., description='Agent configuration and capabilities') + name: str = Field(..., description='Name of the agent') + registered_at: str = Field(..., description='ISO 8601 timestamp of registration') + pubsub: Optional[PubSubMetadata] = Field(None, description='Pub/sub configuration if enabled') + memory: Optional[MemoryMetadata] = Field(None, description='Memory configuration if enabled') + llm: Optional[LLMMetadata] = Field(None, description='LLM configuration') + registry: Optional[RegistryMetadata] = Field(None, description='Registry configuration') + tools: Optional[List[ToolMetadata]] = Field(None, description='Available tools') max_iterations: Optional[int] = Field( - None, description="Maximum iterations for agent execution" + None, description='Maximum iterations for agent execution' ) - tool_choice: Optional[str] = Field(None, description="Tool choice strategy") + tool_choice: Optional[str] = Field(None, description='Tool choice strategy') agent_metadata: Optional[Dict[str, Any]] = Field( - None, description="Additional metadata about the agent" + None, description='Additional metadata about the agent' ) @classmethod @@ -139,6 +128,6 @@ def export_json_schema(cls, version: str) -> Dict[str, Any]: JSON schema dictionary with metadata """ schema = cls.model_json_schema() - schema["$schema"] = "https://json-schema.org/draft/2020-12/schema" - schema["version"] = version - return schema \ No newline at end of file + schema['$schema'] = 'https://json-schema.org/draft/2020-12/schema' + schema['version'] = version + return schema diff --git a/ext/dapr-ext-agent_core/scripts/generate_schema.py b/ext/dapr-ext-agent_core/scripts/generate_schema.py index a6e0cce6e..6c475075a 100644 --- a/ext/dapr-ext-agent_core/scripts/generate_schema.py +++ b/ext/dapr-ext-agent_core/scripts/generate_schema.py @@ -10,9 +10,9 @@ def get_auto_version() -> str: """Get current package version automatically.""" try: - return version("dapr-ext-agent_core") + return version('dapr-ext-agent_core') except PackageNotFoundError: - return "0.0.0.dev0" + return '0.0.0.dev0' def generate_schema(output_dir: Path, schema_version: Optional[str] = None): @@ -26,44 +26,42 @@ def generate_schema(output_dir: Path, schema_version: Optional[str] = None): # Use provided version or auto-detect current_version = schema_version or get_auto_version() - print(f"Generating schema for version: {current_version}") - schema_dir = output_dir / "agent-metadata" + print(f'Generating schema for version: {current_version}') + schema_dir = output_dir / 'agent-metadata' # Export schema schema: dict[Any, Any] = AgentMetadataSchema.export_json_schema(current_version) # Write versioned file - version_file = schema_dir / f"v{current_version}.json" - with open(version_file, "w") as f: + version_file = schema_dir / f'v{current_version}.json' + with open(version_file, 'w') as f: json.dump(schema, f, indent=2) - print(f"✓ Generated {version_file}") + print(f'✓ Generated {version_file}') # Write latest.json - latest_file = schema_dir / "latest.json" - with open(latest_file, "w") as f: + latest_file = schema_dir / 'latest.json' + with open(latest_file, 'w') as f: json.dump(schema, f, indent=2) - print(f"✓ Generated {latest_file}") + print(f'✓ Generated {latest_file}') # Write index with all versions index: dict[Any, Any] = { - "current_version": current_version, - "schema_url": f"https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent_core/schemas/agent-metadata/v{current_version}.json", - "available_versions": sorted( - [f.stem for f in schema_dir.glob("v*.json")], reverse=True - ), + 'current_version': current_version, + 'schema_url': f'https://raw.githubusercontent.com/dapr/python-sdk/main/ext/dapr-ext-agent_core/schemas/agent-metadata/v{current_version}.json', + 'available_versions': sorted([f.stem for f in schema_dir.glob('v*.json')], reverse=True), } - index_file = schema_dir / "index.json" - with open(index_file, "w") as f: + index_file = schema_dir / 'index.json' + with open(index_file, 'w') as f: json.dump(index, f, indent=2) - print(f"✓ Generated {index_file}") - print(f"\nSchema generation complete for version {current_version}") + print(f'✓ Generated {index_file}') + print(f'\nSchema generation complete for version {current_version}') def main(): """Main entry point with CLI argument parsing.""" parser = argparse.ArgumentParser( - description="Generate JSON schema files for agent metadata", + description='Generate JSON schema files for agent metadata', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: @@ -82,16 +80,16 @@ def main(): ) parser.add_argument( - "--version", - "-v", + '--version', + '-v', type=str, default=None, - help="Specific version to use for schema generation. If not provided, auto-detects from installed package.", + help='Specific version to use for schema generation. If not provided, auto-detects from installed package.', ) parser.add_argument( - "--output", - "-o", + '--output', + '-o', type=Path, default=None, help="Output directory for schemas. Defaults to 'schemas' in repo root.", @@ -104,11 +102,11 @@ def main(): schemas_dir = args.output else: repo_root = Path(__file__).parent.parent - schemas_dir = repo_root / "schemas" + schemas_dir = repo_root / 'schemas' # Generate schemas generate_schema(schemas_dir, schema_version=args.version) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/ext/dapr-ext-agent_core/tests/test_base_mapper.py b/ext/dapr-ext-agent_core/tests/test_base_mapper.py index 8471ecf2a..f55c25544 100644 --- a/ext/dapr-ext-agent_core/tests/test_base_mapper.py +++ b/ext/dapr-ext-agent_core/tests/test_base_mapper.py @@ -20,19 +20,25 @@ class TestBaseAgentMapper(unittest.TestCase): def test_extract_provider_openai(self): """Test provider extraction for OpenAI modules.""" - self.assertEqual(BaseAgentMapper._extract_provider('langchain_openai.chat_models'), 'openai') + self.assertEqual( + BaseAgentMapper._extract_provider('langchain_openai.chat_models'), 'openai' + ) self.assertEqual(BaseAgentMapper._extract_provider('openai.resources'), 'openai') self.assertEqual(BaseAgentMapper._extract_provider('strands.models.openai'), 'openai') def test_extract_provider_azure_openai(self): """Test provider extraction for Azure OpenAI modules.""" - self.assertEqual(BaseAgentMapper._extract_provider('langchain_openai.azure'), 'azure_openai') + self.assertEqual( + BaseAgentMapper._extract_provider('langchain_openai.azure'), 'azure_openai' + ) self.assertEqual(BaseAgentMapper._extract_provider('azure.openai'), 'azure_openai') self.assertEqual(BaseAgentMapper._extract_provider('AZURE_OPENAI'), 'azure_openai') def test_extract_provider_anthropic(self): """Test provider extraction for Anthropic modules.""" - self.assertEqual(BaseAgentMapper._extract_provider('langchain_anthropic.chat_models'), 'anthropic') + self.assertEqual( + BaseAgentMapper._extract_provider('langchain_anthropic.chat_models'), 'anthropic' + ) self.assertEqual(BaseAgentMapper._extract_provider('anthropic.client'), 'anthropic') self.assertEqual(BaseAgentMapper._extract_provider('strands.models.anthropic'), 'anthropic') diff --git a/ext/dapr-ext-agent_core/tests/test_strands_mapper.py b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py index 78967fc34..f838cbe1c 100644 --- a/ext/dapr-ext-agent_core/tests/test_strands_mapper.py +++ b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py @@ -46,26 +46,26 @@ def test_map_agent_metadata_no_session_agent(self): """Test mapping when no SessionAgent exists in the session.""" session_manager = make_mock_session_manager() session_manager._read_state.return_value = None - - result = self.mapper.map_agent_metadata(session_manager, "edge") - + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + # Should use fallback values - self.assertEqual(result.schema_version, "edge") - self.assertEqual(result.agent.role, "Session Manager") - self.assertEqual(result.agent.type, "Strands") + self.assertEqual(result.schema_version, 'edge') + self.assertEqual(result.agent.role, 'Session Manager') + self.assertEqual(result.agent.type, 'Strands') self.assertIsNone(result.llm) self.assertEqual(result.tools, []) # Empty list, not None self.assertIsNone(result.tool_choice) self.assertIsNone(result.max_iterations) - self.assertEqual(result.name, "strands-session-test-session") + self.assertEqual(result.name, 'strands-session-test-session') def test_map_agent_metadata_with_basic_agent(self): """Test mapping with a basic SessionAgent.""" session_manager = make_mock_session_manager() - + # Mock manifest session_manager._read_state.return_value = {'agents': ['assistant']} - + # Mock agent agent_state = { 'system_prompt': 'You are a helpful assistant', @@ -74,58 +74,62 @@ def test_map_agent_metadata_with_basic_agent(self): } mock_agent = make_mock_session_agent('assistant', agent_state) session_manager.read_agent.return_value = mock_agent - - result = self.mapper.map_agent_metadata(session_manager, "edge") - + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + # Should extract from SessionAgent - self.assertEqual(result.agent.role, "AI Assistant") - self.assertEqual(result.agent.goal, "Help users") - self.assertEqual(result.agent.system_prompt, "You are a helpful assistant") - self.assertEqual(result.name, "strands-test-session-assistant") + self.assertEqual(result.agent.role, 'AI Assistant') + self.assertEqual(result.agent.goal, 'Help users') + self.assertEqual(result.agent.system_prompt, 'You are a helpful assistant') + self.assertEqual(result.name, 'strands-test-session-assistant') self.assertEqual(result.agent_metadata['agent_id'], 'assistant') def test_map_agent_metadata_with_llm_config(self): """Test mapping with LLM configuration.""" session_manager = make_mock_session_manager() session_manager._read_state.return_value = {'agents': ['assistant']} - + agent_state = { 'system_prompt': 'You are helpful', 'conversation_provider': 'openai', 'llm_config': { 'provider': 'openai', 'model': 'gpt-4', - } + }, } mock_agent = make_mock_session_agent('assistant', agent_state) session_manager.read_agent.return_value = mock_agent - - result = self.mapper.map_agent_metadata(session_manager, "edge") - + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + # Should extract LLM metadata self.assertIsNotNone(result.llm) - self.assertEqual(result.llm.client, "dapr_conversation") - self.assertEqual(result.llm.provider, "openai") - self.assertEqual(result.llm.model, "gpt-4") - self.assertEqual(result.llm.component_name, "openai") + self.assertEqual(result.llm.client, 'dapr_conversation') + self.assertEqual(result.llm.provider, 'openai') + self.assertEqual(result.llm.model, 'gpt-4') + self.assertEqual(result.llm.component_name, 'openai') def test_map_agent_metadata_with_tools(self): """Test mapping with tools configuration.""" session_manager = make_mock_session_manager() session_manager._read_state.return_value = {'agents': ['assistant']} - + agent_state = { 'tools': [ - {'name': 'calculator', 'description': 'Calculate math', 'args': {'x': 'int', 'y': 'int'}}, + { + 'name': 'calculator', + 'description': 'Calculate math', + 'args': {'x': 'int', 'y': 'int'}, + }, {'name': 'search', 'description': 'Search web', 'args': {'query': 'str'}}, ], 'tool_choice': 'auto', } mock_agent = make_mock_session_agent('assistant', agent_state) session_manager.read_agent.return_value = mock_agent - - result = self.mapper.map_agent_metadata(session_manager, "edge") - + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + # Should extract tools metadata self.assertIsNotNone(result.tools) self.assertEqual(len(result.tools), 2) @@ -138,16 +142,16 @@ def test_map_agent_metadata_with_instructions(self): """Test mapping with instructions.""" session_manager = make_mock_session_manager() session_manager._read_state.return_value = {'agents': ['assistant']} - + agent_state = { 'instructions': ['Be concise', 'Be helpful', 'Be friendly'], 'max_iterations': 10, } mock_agent = make_mock_session_agent('assistant', agent_state) session_manager.read_agent.return_value = mock_agent - - result = self.mapper.map_agent_metadata(session_manager, "edge") - + + result = self.mapper.map_agent_metadata(session_manager, 'edge') + # Should extract instructions and max_iterations self.assertIsNotNone(result.agent.instructions) self.assertEqual(len(result.agent.instructions), 3) @@ -170,10 +174,10 @@ def test_extract_tools_metadata_with_tool_objects(self): mock_tool.name = 'my_tool' mock_tool.description = 'My tool description' mock_tool.args = {'param': 'value'} - + agent_state = {'tools': [mock_tool]} result = self.mapper._extract_tools_metadata(agent_state) - + self.assertIsNotNone(result) self.assertEqual(len(result), 1) self.assertEqual(result[0].tool_name, 'my_tool') diff --git a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py index 1c852722c..b2fa0a117 100644 --- a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py +++ b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py @@ -26,7 +26,7 @@ class MockSessionManager: def __init__(self, state_store_name='test-statestore', session_id='test-session-123'): self._state_store_name = state_store_name self._session_id = session_id - + @property def state_store_name(self) -> str: return self._state_store_name @@ -57,9 +57,7 @@ def test_metadata_extraction_basic(self): def test_metadata_memory_extraction(self): """Test memory metadata extraction.""" - mock_manager = MockSessionManager( - state_store_name='custom-store', session_id='session-456' - ) + mock_manager = MockSessionManager(state_store_name='custom-store', session_id='session-456') mapper = StrandsMapper() metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') @@ -89,9 +87,7 @@ def test_metadata_name_without_session_id(self): def test_metadata_agent_metadata_field(self): """Test agent_metadata field contains framework info.""" - mock_manager = MockSessionManager( - state_store_name='store1', session_id='sess1' - ) + mock_manager = MockSessionManager(state_store_name='store1', session_id='sess1') mapper = StrandsMapper() metadata = mapper.map_agent_metadata(mock_manager, schema_version='1.0.0') diff --git a/ext/dapr-ext-agent_core/tests/test_types.py b/ext/dapr-ext-agent_core/tests/test_types.py index 515f71804..b0a76c8bb 100644 --- a/ext/dapr-ext-agent_core/tests/test_types.py +++ b/ext/dapr-ext-agent_core/tests/test_types.py @@ -213,9 +213,7 @@ def test_full_schema(self): llm=LLMMetadata(client='OpenAI', provider='openai'), registry=RegistryMetadata(name='team-1'), tools=[ - ToolMetadata( - tool_name='search', tool_description='Search tool', tool_args='{}' - ) + ToolMetadata(tool_name='search', tool_description='Search tool', tool_args='{}') ], max_iterations=10, tool_choice='auto', diff --git a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py index df2c471b0..0c6d126e8 100644 --- a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py +++ b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py @@ -76,7 +76,7 @@ def put( if not self._registry_initialized: self.registry_adapter = AgentRegistryAdapter.create_from_stack(registry=None) self._registry_initialized = True - + thread_id = config['configurable']['thread_id'] checkpoint_ns = config['configurable'].get('checkpoint_ns', '') config_checkpoint_id = config['configurable'].get('checkpoint_id', '') @@ -190,7 +190,9 @@ def put_writes( thread_id=thread_id, checkpoint_ns=checkpoint_ns, checkpoint_id=checkpoint_id ) - self.client.save_state(store_name=self.state_store_name, key=key, value=json.dumps(write_obj)) + self.client.save_state( + store_name=self.state_store_name, key=key, value=json.dumps(write_obj) + ) checkpoint_key = self._make_safe_checkpoint_key( thread_id=thread_id, checkpoint_ns=checkpoint_ns, checkpoint_id=checkpoint_id diff --git a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py index 40c59aaa6..bb3a33de1 100644 --- a/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py +++ b/ext/dapr-ext-strands/dapr/ext/strands/dapr_session_manager.py @@ -70,33 +70,33 @@ def __init__( self._registry_initialized = False super().__init__(session_id=session_id, session_repository=self) - + @property def state_store_name(self) -> str: """Get the Dapr state store name. - + Returns: Name of the Dapr state store component. """ return self._state_store_name - + def _register_with_agent_registry(self) -> Optional[Any]: """Register or update this session manager in the agent registry. - + Returns: AgentRegistryAdapter if registration succeeded, None otherwise. """ try: from dapr.ext.agent_core.metadata import AgentRegistryAdapter - + return AgentRegistryAdapter.create_from_stack(registry=None) - + except ImportError: # agent_core extension not installed, skip registration pass except Exception as e: - logger.debug(f"Agent registry registration skipped: {e}") - + logger.debug(f'Agent registry registration skipped: {e}') + return None @classmethod @@ -254,7 +254,7 @@ def _write_state(self, key: str, data: Dict[str, Any]) -> None: registry_adapter = self._register_with_agent_registry() if registry_adapter: self._registry_initialized = True - + try: content = json.dumps(data, ensure_ascii=False) self._dapr_client.save_state( From 08cb9adcd1898e985426ed69a2771a3508ab3658 Mon Sep 17 00:00:00 2001 From: Casper Nielsen Date: Tue, 3 Feb 2026 16:28:59 +0100 Subject: [PATCH 13/13] fix: correctly ref dapr_agents & formatting Signed-off-by: Casper Nielsen --- examples/strands-agent/agent.py | 4 +++- .../dapr/ext/agent_core/__init__.py | 12 +++++------ .../dapr/ext/agent_core/introspection.py | 20 +++++++++++++++++++ .../dapr/ext/agent_core/mapping/base.py | 1 + .../ext/agent_core/mapping/dapr_agents.py | 16 ++++++++++++++- .../dapr/ext/agent_core/mapping/langgraph.py | 3 ++- .../dapr/ext/agent_core/mapping/strands.py | 3 ++- .../dapr/ext/agent_core/metadata.py | 19 +++++++++--------- .../dapr/ext/agent_core/types.py | 3 ++- .../scripts/generate_schema.py | 8 ++++---- .../tests/test_base_mapper.py | 1 + .../tests/test_strands_mapper.py | 1 - .../tests/test_strands_metadata.py | 1 - ext/dapr-ext-agent_core/tests/test_types.py | 6 +++--- .../dapr/ext/langgraph/dapr_checkpointer.py | 2 +- 15 files changed, 69 insertions(+), 31 deletions(-) diff --git a/examples/strands-agent/agent.py b/examples/strands-agent/agent.py index 5a356cc04..5203225f8 100644 --- a/examples/strands-agent/agent.py +++ b/examples/strands-agent/agent.py @@ -9,9 +9,11 @@ """ import os + +from dapr.ext.strands import DaprSessionManager from strands import Agent, tool from strands.models import OpenAIModel -from dapr.ext.strands import DaprSessionManager + from dapr.clients import DaprClient diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py index 08949a29a..87d6d7f19 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/__init__.py @@ -1,15 +1,15 @@ +from .introspection import detect_framework, find_agent_in_stack +from .metadata import AgentRegistryAdapter from .types import ( - SupportedFrameworks, - AgentMetadataSchema, AgentMetadata, + AgentMetadataSchema, LLMMetadata, + MemoryMetadata, PubSubMetadata, - ToolMetadata, RegistryMetadata, - MemoryMetadata, + SupportedFrameworks, + ToolMetadata, ) -from .metadata import AgentRegistryAdapter -from .introspection import find_agent_in_stack, detect_framework __all__ = [ 'SupportedFrameworks', diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py index f6219dd0f..dc04e0296 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/introspection.py @@ -26,6 +26,7 @@ def find_agent_in_stack() -> Optional[Any]: Currently supports: - LangGraph: CompiledStateGraph or SyncPregelLoop - Strands: DaprSessionManager + - Dapr Agents: Any object from dapr_agents module Returns: The agent/graph object if found, None otherwise. @@ -38,11 +39,30 @@ def find_agent_in_stack() -> Optional[Any]: if 'self' in frame_locals: obj = frame_locals['self'] obj_type = type(obj).__name__ + obj_module = type(obj).__module__ # LangGraph support - CompiledStateGraph if obj_type == 'CompiledStateGraph': return obj + # Dapr Agents support - any agent from dapr_agents module + # Use GC to find the actual derived agent that may reference this object + if 'dapr_agents' in obj_module: + # If this is a base class (DaprInfra, AgentBase), use GC to find derived agent + if obj_type in ('DaprInfra', 'AgentBase'): + referrers = gc.get_referrers(obj) + for ref in referrers: + ref_type = type(ref).__name__ + ref_module = type(ref).__module__ + # Look for derived agent classes that own this base object + if 'dapr_agents' in ref_module and ref_type not in ( + 'DaprInfra', + 'AgentBase', + ): + return ref + # Return the object directly if it's already a derived class + return obj + # Strands support - DaprSessionManager # Use gc to find the Agent that owns this session manager (similar to LangGraph checkpointer) if obj_type == 'DaprSessionManager': diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py index 1d2d7c2ea..a4ff81f76 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/base.py @@ -13,6 +13,7 @@ from abc import ABC, abstractmethod from typing import Any + from dapr.ext.agent_core.types import AgentMetadataSchema diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py index 6ac37933c..d3dbd5411 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/dapr_agents.py @@ -1,7 +1,9 @@ -from datetime import datetime, timezone +import gc import json import logging +from datetime import datetime, timezone from typing import Any + from dapr.ext.agent_core.mapping.base import BaseAgentMapper from dapr.ext.agent_core.types import ( AgentMetadata, @@ -21,6 +23,18 @@ def __init__(self) -> None: pass def map_agent_metadata(self, agent: Any, schema_version: str) -> AgentMetadataSchema: + # If we received a base class (DaprInfra, AgentBase), try to find the actual derived agent via GC + agent_type = type(agent).__name__ + if agent_type in ('DaprInfra', 'AgentBase'): + referrers = gc.get_referrers(agent) + for ref in referrers: + ref_type = type(ref).__name__ + ref_module = type(ref).__module__ + # Look for derived agent classes in dapr_agents module + if 'dapr_agents' in ref_module and ref_type not in ('DaprInfra', 'AgentBase'): + agent = ref + break + profile = getattr(agent, 'profile', None) memory = getattr(agent, 'memory', None) pubsub = getattr(agent, 'pubsub', None) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py index 9de2cbd94..08ddce4d2 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/langgraph.py @@ -1,6 +1,7 @@ -from datetime import datetime, timezone import logging +from datetime import datetime, timezone from typing import TYPE_CHECKING, Any, Dict, Optional + from dapr.ext.agent_core.mapping.base import BaseAgentMapper from dapr.ext.agent_core.types import ( AgentMetadata, diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py index 57658522e..56ede1b19 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/mapping/strands.py @@ -1,6 +1,7 @@ -from datetime import datetime, timezone import logging +from datetime import datetime, timezone from typing import TYPE_CHECKING, Any, Dict, List, Optional + from dapr.ext.agent_core.mapping.base import BaseAgentMapper from dapr.ext.agent_core.types import ( AgentMetadata, diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py index 7816854f8..c554fc4e8 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/metadata.py @@ -1,11 +1,19 @@ from __future__ import annotations -from importlib.metadata import PackageNotFoundError, version import logging import random import time +from importlib.metadata import PackageNotFoundError, version from typing import Any, Callable, Dict, Optional, Sequence +import dapr.ext.agent_core.mapping +from dapr.ext.agent_core.introspection import detect_framework, find_agent_in_stack +from dapr.ext.agent_core.types import AgentMetadataSchema, SupportedFrameworks +from dapr_agents.agents.configs import ( + AgentRegistryConfig, +) +from dapr_agents.storage.daprstores.stateservice import StateStoreError, StateStoreService + from dapr.clients import DaprClient from dapr.clients.grpc._response import ( GetMetadataResponse, @@ -13,15 +21,6 @@ ) from dapr.clients.grpc._state import Concurrency, Consistency -from dapr_agents.agents.configs import ( - AgentRegistryConfig, -) -from dapr_agents.storage.daprstores.stateservice import StateStoreError, StateStoreService - -from dapr.ext.agent_core import SupportedFrameworks, AgentMetadataSchema -from dapr.ext.agent_core.introspection import find_agent_in_stack, detect_framework -import dapr.ext.agent_core.mapping - logger = logging.getLogger(__name__) diff --git a/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py index ed10db487..6c79d0229 100644 --- a/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py +++ b/ext/dapr-ext-agent_core/dapr/ext/agent_core/types.py @@ -1,10 +1,11 @@ from enum import StrEnum from typing import Any, Dict, List, Optional + from pydantic import BaseModel, Field class SupportedFrameworks(StrEnum): - DAPR_AGENTS = 'dapr-agents' + DAPR_AGENTS = 'dapr_agents' LANGGRAPH = 'langgraph' STRANDS = 'strands' diff --git a/ext/dapr-ext-agent_core/scripts/generate_schema.py b/ext/dapr-ext-agent_core/scripts/generate_schema.py index 6c475075a..8b8f194fd 100644 --- a/ext/dapr-ext-agent_core/scripts/generate_schema.py +++ b/ext/dapr-ext-agent_core/scripts/generate_schema.py @@ -1,7 +1,7 @@ import argparse import json +from importlib.metadata import PackageNotFoundError, version from pathlib import Path -from importlib.metadata import version, PackageNotFoundError from typing import Any, Optional from dapr.ext.agent_core import AgentMetadataSchema @@ -67,13 +67,13 @@ def main(): Examples: # Auto-detect version from installed package python scripts/generate_schema.py - + # Generate schema for specific version python scripts/generate_schema.py --version 1.0.0 - + # Generate for pre-release python scripts/generate_schema.py --version 1.1.0-rc1 - + # Custom output directory python scripts/generate_schema.py --version 1.0.0 --output ./custom-schemas """, diff --git a/ext/dapr-ext-agent_core/tests/test_base_mapper.py b/ext/dapr-ext-agent_core/tests/test_base_mapper.py index f55c25544..4f0035881 100644 --- a/ext/dapr-ext-agent_core/tests/test_base_mapper.py +++ b/ext/dapr-ext-agent_core/tests/test_base_mapper.py @@ -12,6 +12,7 @@ """ import unittest + from dapr.ext.agent_core.mapping import BaseAgentMapper diff --git a/ext/dapr-ext-agent_core/tests/test_strands_mapper.py b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py index f838cbe1c..2a2588c27 100644 --- a/ext/dapr-ext-agent_core/tests/test_strands_mapper.py +++ b/ext/dapr-ext-agent_core/tests/test_strands_mapper.py @@ -11,7 +11,6 @@ limitations under the License. """ -import json import time import unittest from unittest import mock diff --git a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py index b2fa0a117..1aa5fab24 100644 --- a/ext/dapr-ext-agent_core/tests/test_strands_metadata.py +++ b/ext/dapr-ext-agent_core/tests/test_strands_metadata.py @@ -15,7 +15,6 @@ import unittest -from dapr.ext.agent_core.types import SupportedFrameworks from dapr.ext.agent_core.introspection import detect_framework from dapr.ext.agent_core.mapping.strands import StrandsMapper diff --git a/ext/dapr-ext-agent_core/tests/test_types.py b/ext/dapr-ext-agent_core/tests/test_types.py index b0a76c8bb..c2d0eef0e 100644 --- a/ext/dapr-ext-agent_core/tests/test_types.py +++ b/ext/dapr-ext-agent_core/tests/test_types.py @@ -16,14 +16,14 @@ import unittest from dapr.ext.agent_core.types import ( - SupportedFrameworks, AgentMetadata, AgentMetadataSchema, LLMMetadata, + MemoryMetadata, PubSubMetadata, - ToolMetadata, RegistryMetadata, - MemoryMetadata, + SupportedFrameworks, + ToolMetadata, ) diff --git a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py index 0c6d126e8..f14b9670f 100644 --- a/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py +++ b/ext/dapr-ext-langgraph/dapr/ext/langgraph/dapr_checkpointer.py @@ -17,12 +17,12 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, cast import msgpack +from dapr.ext.agent_core import AgentRegistryAdapter from langchain_core.messages import AIMessage, HumanMessage, ToolMessage from langchain_core.runnables import RunnableConfig from ulid import ULID from dapr.clients import DaprClient -from dapr.ext.agent_core import AgentRegistryAdapter from langgraph.checkpoint.base import ( WRITES_IDX_MAP, BaseCheckpointSaver,