client.get_root() -> object
-
-# Echo
-
-Methods:
-
-- client.echo.send(\*\*params) -> object
-
# Agents
Types:
```python
-from agentex.types import AcpType, Agent, AgentRpcRequest, AgentListResponse
+from agentex.types import (
+ AcpType,
+ Agent,
+ AgentRpcParams,
+ AgentRpcRequest,
+ AgentRpcResponse,
+ AgentRpcResult,
+ DataDelta,
+ TaskMessageContent,
+ TaskMessageDelta,
+ TaskMessageUpdate,
+ TextDelta,
+ ToolRequestDelta,
+ ToolResponseDelta,
+ AgentListResponse,
+)
```
Methods:
-- client.agents.retrieve(agent_id) -> Agent
-- client.agents.list(\*\*params) -> AgentListResponse
-- client.agents.delete(agent_id) -> Agent
-- client.agents.rpc(agent_id, \*\*params) -> object
-
-## Name
-
-Methods:
-
-- client.agents.name.retrieve(agent_name) -> Agent
-- client.agents.name.delete(agent_name) -> Agent
-- client.agents.name.rpc(agent_name, \*\*params) -> object
+- client.agents.retrieve(agent_id) -> Agent
+- client.agents.list(\*\*params) -> AgentListResponse
+- client.agents.delete(agent_id) -> Agent
+- client.agents.delete_by_name(agent_name) -> Agent
+- client.agents.retrieve_by_name(agent_name) -> Agent
+- client.agents.rpc(agent_id, \*\*params) -> AgentRpcResponse
+- client.agents.rpc_by_name(agent_name, \*\*params) -> AgentRpcResponse
# Tasks
@@ -43,18 +41,13 @@ from agentex.types import Task, TaskListResponse
Methods:
-- client.tasks.retrieve(task_id) -> Task
-- client.tasks.list() -> TaskListResponse
-- client.tasks.delete(task_id) -> Task
-- client.tasks.stream_events(task_id) -> object
-
-## Name
-
-Methods:
-
-- client.tasks.name.retrieve(task_name) -> Task
-- client.tasks.name.delete(task_name) -> Task
-- client.tasks.name.stream_events(task_name) -> object
+- client.tasks.retrieve(task_id) -> Task
+- client.tasks.list() -> TaskListResponse
+- client.tasks.delete(task_id) -> Task
+- client.tasks.delete_by_name(task_name) -> Task
+- client.tasks.retrieve_by_name(task_name) -> Task
+- client.tasks.stream_events(task_id) -> object
+- client.tasks.stream_events_by_name(task_name) -> object
# Messages
@@ -65,7 +58,6 @@ from agentex.types import (
DataContent,
MessageAuthor,
MessageStyle,
- StreamingStatus,
TaskMessage,
TextContent,
ToolRequestContent,
diff --git a/examples/tutorials/00_sync/000_hello_acp/.dockerignore b/examples/tutorials/00_sync/000_hello_acp/.dockerignore
new file mode 100644
index 000000000..4521d8465
--- /dev/null
+++ b/examples/tutorials/00_sync/000_hello_acp/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/000_hello_acp/Dockerfile b/examples/tutorials/00_sync/000_hello_acp/Dockerfile
new file mode 100644
index 000000000..34f07ab19
--- /dev/null
+++ b/examples/tutorials/00_sync/000_hello_acp/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 000_hello_acp/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 000_hello_acp/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/000_hello_acp/README.md b/examples/tutorials/00_sync/000_hello_acp/README.md
new file mode 100644
index 000000000..87631a8a1
--- /dev/null
+++ b/examples/tutorials/00_sync/000_hello_acp/README.md
@@ -0,0 +1,128 @@
+# hello-sync - AgentEx Starter Template
+
+This is a generic starter template for building agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) to help you get started quickly.
+
+## What You'll Learn
+
+- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session.
+- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions.
+- **ACP Events**: The agent responds to four main events:
+ - `task_received`: When a new task is created
+ - `task_message_received`: When a message is sent within a task
+ - `task_approved`: When a task is approved
+ - `task_canceled`: When a task is canceled
+
+## Running the Agent
+
+1. Run the agent locally:
+```bash
+agentex agents run --manifest manifest.yaml
+```
+
+The agent will start on port 8000 and print messages whenever it receives any of the ACP events.
+
+## What's Inside
+
+This template:
+- Sets up a basic ACP server
+- Handles each of the required ACP events with simple print statements
+- Provides a foundation for building more complex agents
+
+## Next Steps
+
+For more advanced agent development, check out the AgentEx tutorials:
+
+- **Tutorials 00-08**: Learn about building synchronous agents with ACP
+- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents
+ - Tutorial 09: Basic Temporal workflow setup
+ - Tutorial 10: Advanced Temporal patterns and best practices
+
+These tutorials will help you understand:
+- How to handle long-running tasks
+- Implementing state machines
+- Managing complex workflows
+- Best practices for async agent development
+
+## The Manifest File
+
+The `manifest.yaml` file is your agent's configuration file. It defines:
+- How your agent should be built and packaged
+- What files are included in your agent's Docker image
+- Your agent's name and description
+- Local development settings (like the port your agent runs on)
+
+This file is essential for both local development and deployment of your agent.
+
+## Project Structure
+
+```
+hello_sync/
+├── project/ # Your agent's code
+│ ├── __init__.py
+│ └── acp.py # ACP server and event handlers
+├── Dockerfile # Container definition
+├── manifest.yaml # Deployment config
+└── requirements.txt # Dependencies
+```
+
+## Development
+
+1. **Customize Event Handlers**
+ - Modify the handlers in `acp.py` to implement your agent's logic
+ - Add your own tools and capabilities
+ - Implement custom state management
+
+2. **Add Dependencies**
+ - Add required packages to `requirements.txt`
+ - Update the manifest with any needed credentials
+
+## Local Development
+
+1. **Install AgentEx**
+```bash
+cd agentex-py
+uv venv
+source .venv/bin/activate
+uv sync
+```
+
+2. **Deploy your agent**
+```bash
+# From this directory
+export ENVIRONMENT=development && agentex agents create --manifest manifest.yaml
+```
+
+4. **Interact with your agent**
+
+Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!)
+```bash
+# Submit a task via CLI
+agentex tasks submit --agent {{ agent_name }} --task "Your task here"
+```
+
+Option 1: Web UI
+```bash
+# Start the local web interface
+cd agentex-web
+make dev
+
+# Then open http://localhost:3000 in your browser to chat with your agent
+```
+
+## Development Tips
+
+1. **Local Testing**
+- Set environment variables in project/.env for any required credentials. OR you can set these in the manifest.yaml
+
+### To build the agent Docker image locally (normally not necessary):
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
+
diff --git a/examples/tutorials/00_sync/000_hello_acp/manifest.yaml b/examples/tutorials/00_sync/000_hello_acp/manifest.yaml
new file mode 100644
index 000000000..72398ea07
--- /dev/null
+++ b/examples/tutorials/00_sync/000_hello_acp/manifest.yaml
@@ -0,0 +1,122 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 000_hello_acp
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 000_hello_acp/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 000_hello_acp/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: s000-hello-acp
+
+ # Type of ACP to use
+ # sync: Simple synchronous ACP implementation
+ # agentic: Advanced ACP with sub-types "base" or "temporal" (requires config)
+ acp_type: sync
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that just says hello and acknowledges the user's message
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "s000-hello-acp"
+ description: "An AgentEx agent that just says hello and acknowledges the user's message"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/000_hello_acp/project/__init__.py b/examples/tutorials/00_sync/000_hello_acp/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/00_sync/000_hello_acp/project/acp.py b/examples/tutorials/00_sync/000_hello_acp/project/acp.py
new file mode 100644
index 000000000..0c7b66450
--- /dev/null
+++ b/examples/tutorials/00_sync/000_hello_acp/project/acp.py
@@ -0,0 +1,28 @@
+from typing import AsyncGenerator, Union
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import SendMessageParams
+
+from agentex.lib.types.task_message_updates import TaskMessageUpdate
+from agentex.types.task_message import TaskMessageContent
+from agentex.types.task_message_content import TextContent
+from agentex.lib.utils.logging import make_logger
+
+logger = make_logger(__name__)
+
+
+# Create an ACP server
+acp = FastACP.create(
+ acp_type="sync",
+)
+
+
+@acp.on_message_send
+async def handle_message_send(
+ params: SendMessageParams
+) -> Union[TaskMessageContent, AsyncGenerator[TaskMessageUpdate, None]]:
+ """Default message handler with streaming support"""
+ return TextContent(
+ author="agent",
+ content=f"Hello! I've received your message. Here's a generic response, but in future tutorials we'll see how you can get me to intelligently respond to your message. This is what I heard you say: {params.content.content}",
+ )
+
diff --git a/examples/tutorials/00_sync/000_hello_acp/requirements.txt b/examples/tutorials/00_sync/000_hello_acp/requirements.txt
new file mode 100644
index 000000000..2c2ebf15b
--- /dev/null
+++ b/examples/tutorials/00_sync/000_hello_acp/requirements.txt
@@ -0,0 +1,5 @@
+# Install agentex-py from local path
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/010_multiturn/.dockerignore b/examples/tutorials/00_sync/010_multiturn/.dockerignore
new file mode 100644
index 000000000..4521d8465
--- /dev/null
+++ b/examples/tutorials/00_sync/010_multiturn/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/010_multiturn/Dockerfile b/examples/tutorials/00_sync/010_multiturn/Dockerfile
new file mode 100644
index 000000000..29f524911
--- /dev/null
+++ b/examples/tutorials/00_sync/010_multiturn/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 010_multiturn/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 010_multiturn/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/010_multiturn/README.md b/examples/tutorials/00_sync/010_multiturn/README.md
new file mode 100644
index 000000000..a23f76531
--- /dev/null
+++ b/examples/tutorials/00_sync/010_multiturn/README.md
@@ -0,0 +1,127 @@
+# s010-multiturn - AgentEx Starter Template
+
+This is a generic starter template for building agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) to help you get started quickly.
+
+## What You'll Learn
+
+- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session.
+- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions.
+- **ACP Events**: The agent responds to four main events:
+ - `task_received`: When a new task is created
+ - `task_message_received`: When a message is sent within a task
+ - `task_approved`: When a task is approved
+ - `task_canceled`: When a task is canceled
+
+## Running the Agent
+
+1. Run the agent locally:
+```bash
+agentex agents run --manifest manifest.yaml
+```
+
+The agent will start on port 8000 and print messages whenever it receives any of the ACP events.
+
+## What's Inside
+
+This template:
+- Sets up a basic ACP server
+- Handles each of the required ACP events with simple print statements
+- Provides a foundation for building more complex agents
+
+## Next Steps
+
+For more advanced agent development, check out the AgentEx tutorials:
+
+- **Tutorials 00-08**: Learn about building synchronous agents with ACP
+- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents
+ - Tutorial 09: Basic Temporal workflow setup
+ - Tutorial 10: Advanced Temporal patterns and best practices
+
+These tutorials will help you understand:
+- How to handle long-running tasks
+- Implementing state machines
+- Managing complex workflows
+- Best practices for async agent development
+
+## The Manifest File
+
+The `manifest.yaml` file is your agent's configuration file. It defines:
+- How your agent should be built and packaged
+- What files are included in your agent's Docker image
+- Your agent's name and description
+- Local development settings (like the port your agent runs on)
+
+This file is essential for both local development and deployment of your agent.
+
+## Project Structure
+
+```
+s010_multiturn/
+├── project/ # Your agent's code
+│ ├── __init__.py
+│ └── acp.py # ACP server and event handlers
+├── Dockerfile # Container definition
+├── manifest.yaml # Deployment config
+└── requirements.txt # Dependencies
+```
+
+## Development
+
+1. **Customize Event Handlers**
+ - Modify the handlers in `acp.py` to implement your agent's logic
+ - Add your own tools and capabilities
+ - Implement custom state management
+
+2. **Add Dependencies**
+ - Add required packages to `requirements.txt`
+ - Update the manifest with any needed credentials
+
+## Local Development
+
+1. **Install AgentEx**
+```bash
+cd agentex-py
+uv venv
+source .venv/bin/activate
+uv sync
+```
+
+2. **Deploy your agent**
+```bash
+# From this directory
+export ENVIRONMENT=development && agentex agents create --manifest manifest.yaml
+```
+
+4. **Interact with your agent**
+
+Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!)
+```bash
+# Submit a task via CLI
+agentex tasks submit --agent {{ agent_name }} --task "Your task here"
+```
+
+Option 1: Web UI
+```bash
+# Start the local web interface
+cd agentex-web
+make dev
+
+# Then open http://localhost:3000 in your browser to chat with your agent
+```
+
+## Development Tips
+
+1. **Local Testing**
+- Set environment variables in project/.env for any required credentials. OR you can set these in the manifest.yaml
+
+### To build the agent Docker image locally (normally not necessary):
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/010_multiturn/manifest.yaml b/examples/tutorials/00_sync/010_multiturn/manifest.yaml
new file mode 100644
index 000000000..a68ab6bed
--- /dev/null
+++ b/examples/tutorials/00_sync/010_multiturn/manifest.yaml
@@ -0,0 +1,118 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 010_multiturn
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 010_multiturn/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 010_multiturn/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: sync
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: s010-multiturn
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "s010-multiturn"
+ description: "An AgentEx agent"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/010_multiturn/project/__init__.py b/examples/tutorials/00_sync/010_multiturn/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/00_sync/010_multiturn/project/acp.py b/examples/tutorials/00_sync/010_multiturn/project/acp.py
new file mode 100644
index 000000000..0067cec30
--- /dev/null
+++ b/examples/tutorials/00_sync/010_multiturn/project/acp.py
@@ -0,0 +1,119 @@
+import os
+from typing import AsyncGenerator, Union
+
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import SendMessageParams
+from agentex.lib.types.llm_messages import AssistantMessage, LLMConfig, SystemMessage, UserMessage
+from agentex.lib.types.task_message_updates import TaskMessageUpdate
+from agentex.types.task_message import TaskMessageContent
+from agentex.types.task_message_content import TextContent
+from agentex.lib.utils.model_utils import BaseModel
+
+# Create an ACP server
+acp = FastACP.create(
+ acp_type="sync",
+)
+
+
+class StateModel(BaseModel):
+ system_prompt: str
+ model: str
+
+
+# Note: The return of this handler is required to be persisted by the Agentex Server
+@acp.on_message_send
+async def handle_message_send(
+ params: SendMessageParams
+) -> Union[TaskMessageContent, AsyncGenerator[TaskMessageUpdate, None]]:
+ """
+ In this tutorial, we'll see how to handle a basic multi-turn conversation without streaming.
+ """
+ #########################################################
+ # 0. Validate the message.
+ #########################################################
+
+ if params.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.content.type}")
+
+ if params.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.content.author}")
+
+ if not os.environ.get("OPENAI_API_KEY"):
+ return TextContent(
+ author="agent",
+ content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
+ )
+
+ #########################################################
+ # 1. Initialize the state. Using state is optional, but it's a good way to store information between turns.
+ #########################################################
+
+ # Try to retrieve the state. If it doesn't exist, create it.
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+
+ if not task_state:
+ # If the state doesn't exist, create it.
+ state = StateModel(system_prompt="You are a helpful assistant that can answer questions.", model="gpt-4o-mini")
+ task_state = await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+ else:
+ state = StateModel.model_validate(task_state.state)
+
+ #########################################################
+ # 2. Fetch our message history.
+ #########################################################
+
+ task_messages = await adk.messages.list(task_id=params.task.id)
+
+ #########################################################
+ # 3. Convert task messages to LLM messages.
+ #########################################################
+
+ # This might seem duplicative, but the split between TaskMessage and LLMMessage is intentional and important.
+
+ llm_messages = [
+ SystemMessage(content=state.system_prompt),
+ *[
+ UserMessage(content=message.content.content) if message.content.author == "user" else AssistantMessage(content=message.content.content)
+ for message in task_messages
+ if message.content.type == "text"
+ ]
+ ]
+
+ # TaskMessages are messages that are sent between an Agent and a Client. They are fundamentally decoupled from messages sent to the LLM. This is because you may want to send additional metadata to allow the client to render the message on the UI differently.
+
+ # LLMMessages are OpenAI-compatible messages that are sent to the LLM, and are used to track the state of a conversation with a model.
+
+ # In simple scenarios your conversion logic will just look like this. However, in complex scenarios where you are leveraging the flexibility of the TaskMessage type to send non-LLM-specific metadata, you should write custom conversion logic.
+
+ # Some complex scenarios include:
+ # - Taking a markdown document output by an LLM, postprocessing it into a JSON object to clearly denote title, content, and footers. This can be sent as a DataContent TaskMessage to the client and converted back to markdown here to send back to the LLM.
+ # - If using multiple LLMs (like in an actor-critic framework), you may want to send DataContent that denotes which LLM generated which part of the output and write conversion logic to split the TaskMessagehistory into multiple LLM conversations.
+ # - If using multiple LLMs, but one LLM's output should not be sent to the user (i.e. a critic model), you can leverage the State as an internal storage mechanism to store the critic model's conversation history. This i s a powerful and flexible way to handle complex scenarios.
+
+ #########################################################
+ # 4. Call an LLM to respond to the user's message.
+ #########################################################
+
+ # Call an LLM to respond to the user's message
+ chat_completion = await adk.providers.litellm.chat_completion(
+ llm_config=LLMConfig(model=state.model, messages=llm_messages),
+ trace_id=params.task.id,
+ )
+
+ #########################################################
+ # 5. Return the agent response to the client.
+ #########################################################
+
+ # The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content.
+
+ # Return the agent response to the client
+ if chat_completion.choices[0].message:
+ content_str = chat_completion.choices[0].message.content or ""
+ else:
+ content_str = ""
+
+ return TextContent(
+ author="agent",
+ content=content_str
+ )
diff --git a/examples/tutorials/00_sync/010_multiturn/requirements.txt b/examples/tutorials/00_sync/010_multiturn/requirements.txt
new file mode 100644
index 000000000..d12ad3951
--- /dev/null
+++ b/examples/tutorials/00_sync/010_multiturn/requirements.txt
@@ -0,0 +1,4 @@
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/020_streaming/.dockerignore b/examples/tutorials/00_sync/020_streaming/.dockerignore
new file mode 100644
index 000000000..e2a6524b2
--- /dev/null
+++ b/examples/tutorials/00_sync/020_streaming/.dockerignore
@@ -0,0 +1,47 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
+
diff --git a/examples/tutorials/00_sync/020_streaming/Dockerfile b/examples/tutorials/00_sync/020_streaming/Dockerfile
new file mode 100644
index 000000000..99958ebc5
--- /dev/null
+++ b/examples/tutorials/00_sync/020_streaming/Dockerfile
@@ -0,0 +1,44 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 020_streaming/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+# Copy the project code
+COPY 020_streaming/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/020_streaming/README.md b/examples/tutorials/00_sync/020_streaming/README.md
new file mode 100644
index 000000000..920acf28b
--- /dev/null
+++ b/examples/tutorials/00_sync/020_streaming/README.md
@@ -0,0 +1,129 @@
+# s020-streaming - AgentEx Starter Template
+
+This is a generic starter template for building agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) to help you get started quickly.
+
+## What You'll Learn
+
+- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session.
+- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions.
+- **ACP Events**: The agent responds to four main events:
+ - `task_received`: When a new task is created
+ - `task_message_received`: When a message is sent within a task
+ - `task_approved`: When a task is approved
+ - `task_canceled`: When a task is canceled
+
+## Running the Agent
+
+1. Run the agent locally:
+```bash
+agentex agents run --manifest manifest.yaml
+```
+
+The agent will start on port 8000 and print messages whenever it receives any of the ACP events.
+
+## What's Inside
+
+This template:
+- Sets up a basic ACP server
+- Handles each of the required ACP events with simple print statements
+- Provides a foundation for building more complex agents
+
+## Next Steps
+
+For more advanced agent development, check out the AgentEx tutorials:
+
+- **Tutorials 00-08**: Learn about building synchronous agents with ACP
+- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents
+ - Tutorial 09: Basic Temporal workflow setup
+ - Tutorial 10: Advanced Temporal patterns and best practices
+
+These tutorials will help you understand:
+- How to handle long-running tasks
+- Implementing state machines
+- Managing complex workflows
+- Best practices for async agent development
+
+## The Manifest File
+
+The `manifest.yaml` file is your agent's configuration file. It defines:
+- How your agent should be built and packaged
+- What files are included in your agent's Docker image
+- Your agent's name and description
+- Local development settings (like the port your agent runs on)
+
+This file is essential for both local development and deployment of your agent.
+
+## Project Structure
+
+```
+hello_sync/
+├── project/ # Your agent's code
+│ ├── __init__.py
+│ └── acp.py # ACP server and event handlers
+├── Dockerfile # Container definition
+├── manifest.yaml # Deployment config
+└── requirements.txt # Dependencies
+```
+
+## Development
+
+1. **Customize Event Handlers**
+ - Modify the handlers in `acp.py` to implement your agent's logic
+ - Add your own tools and capabilities
+ - Implement custom state management
+
+2. **Add Dependencies**
+ - Add required packages to `requirements.txt`
+ - Update the manifest with any needed credentials
+
+## Local Development
+
+1. **Install AgentEx**
+```bash
+cd agentex-py
+uv venv
+source .venv/bin/activate
+uv sync
+```
+
+2. **Deploy your agent**
+```bash
+# From this directory
+export ENVIRONMENT=development && agentex agents create --manifest manifest.yaml
+```
+
+4. **Interact with your agent**
+
+Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!)
+```bash
+# Submit a task via CLI
+agentex tasks submit --agent {{ agent_name }} --task "Your task here"
+```
+
+Option 1: Web UI
+```bash
+# Start the local web interface
+cd agentex-web
+make dev
+
+# Then open http://localhost:3000 in your browser to chat with your agent
+```
+
+## Development Tips
+
+1. **Local Testing**
+- Set environment variables in project/.env for any required credentials. OR you can set these in the manifest.yaml
+
+### To build the agent Docker image locally (normally not necessary):
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
+
+
diff --git a/examples/tutorials/00_sync/020_streaming/manifest.yaml b/examples/tutorials/00_sync/020_streaming/manifest.yaml
new file mode 100644
index 000000000..71460af04
--- /dev/null
+++ b/examples/tutorials/00_sync/020_streaming/manifest.yaml
@@ -0,0 +1,119 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+
+ include_paths:
+ - 020_streaming
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 020_streaming/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 020_streaming/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: sync
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: s020-streaming
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that does multiturn streaming chat
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "s020-streaming"
+ description: "An AgentEx agent that does multiturn streaming chat"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/020_streaming/project/__init__.py b/examples/tutorials/00_sync/020_streaming/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/00_sync/020_streaming/project/acp.py b/examples/tutorials/00_sync/020_streaming/project/acp.py
new file mode 100644
index 000000000..787f2daed
--- /dev/null
+++ b/examples/tutorials/00_sync/020_streaming/project/acp.py
@@ -0,0 +1,98 @@
+import os
+from typing import AsyncGenerator, Union
+
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import SendMessageParams
+from agentex.lib.types.llm_messages import AssistantMessage, LLMConfig, SystemMessage, UserMessage
+from agentex.lib.types.task_message_updates import StreamTaskMessageDelta, StreamTaskMessageDone, StreamTaskMessageFull, TaskMessageUpdate, TextDelta
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.types.task_message_content import TaskMessageContent, TextContent
+
+# Create an ACP server
+acp = FastACP.create(
+ acp_type="sync",
+)
+
+
+class StateModel(BaseModel):
+ system_prompt: str
+ model: str
+
+
+# Note: The return of this handler is required to be persisted by the Agentex Server
+@acp.on_message_send
+async def handle_message_send(
+ params: SendMessageParams
+) -> Union[TaskMessageContent, AsyncGenerator[TaskMessageUpdate, None]]:
+ """
+ In this tutorial, we'll see how to handle a basic multi-turn conversation without streaming.
+ """
+ #########################################################
+ # 1-3. These steps are all the same as the hello acp tutorial.
+ #########################################################
+
+ if not params.content:
+ return
+
+ if params.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.content.type}")
+
+ if params.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.content.author}")
+
+ if not os.environ.get("OPENAI_API_KEY"):
+ yield StreamTaskMessageFull(
+ index=0,
+ type="full",
+ content=TextContent(
+ author="agent",
+ content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
+ ),
+ )
+
+ # Try to retrieve the state. If it doesn't exist, create it.
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+
+ if not task_state:
+ # If the state doesn't exist, create it.
+ state = StateModel(system_prompt="You are a helpful assistant that can answer questions.", model="gpt-4o-mini")
+ task_state = await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+ else:
+ state = StateModel.model_validate(task_state.state)
+
+ task_messages = await adk.messages.list(task_id=params.task.id)
+
+ llm_messages = [
+ SystemMessage(content=state.system_prompt),
+ *[
+ UserMessage(content=message.content.content) if message.content.author == "user" else AssistantMessage(content=message.content.content)
+ for message in task_messages
+ if message.content and message.content.type == "text"
+ ]
+ ]
+
+ #########################################################
+ # 4. Call an LLM to respond to the user's message and stream the response to the client.
+ #########################################################
+
+ # Call an LLM to respond to the user's message
+
+ print(f"Calling LLM with model {state.model} and messages {llm_messages}")
+
+ # The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content.
+
+ message_index = 0
+ async for chunk in adk.providers.litellm.chat_completion_stream(
+ llm_config=LLMConfig(model=state.model, messages=llm_messages, stream=True),
+ trace_id=params.task.id,
+ ):
+ if chunk and chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
+ yield StreamTaskMessageDelta(
+ index=message_index,
+ delta=TextDelta(text_delta=chunk.choices[0].delta.content or ""),
+ )
+
+ yield StreamTaskMessageDone(
+ index=message_index,
+ )
diff --git a/examples/tutorials/00_sync/020_streaming/requirements.txt b/examples/tutorials/00_sync/020_streaming/requirements.txt
new file mode 100644
index 000000000..e9b9640aa
--- /dev/null
+++ b/examples/tutorials/00_sync/020_streaming/requirements.txt
@@ -0,0 +1,4 @@
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
diff --git a/examples/tutorials/00_sync/020_streaming/test.ipynb b/examples/tutorials/00_sync/020_streaming/test.ipynb
new file mode 100644
index 000000000..386043ed6
--- /dev/null
+++ b/examples/tutorials/00_sync/020_streaming/test.ipynb
@@ -0,0 +1,193 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "d1c309d6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "AGENT_NAME = \"s020-streaming\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "id": "36834357",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from agentex import Agentex\n",
+ "\n",
+ "client = Agentex(api_key=\"random\", base_url=\"http://localhost:5003\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "9f6e6ef0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# # (Optional) Create a new task. If you don't create a new task, each message will be sent to a new task. The server will create the task for you.\n",
+ "\n",
+ "# import uuid\n",
+ "\n",
+ "# TASK_ID = str(uuid.uuid4())[:8]\n",
+ "\n",
+ "# rpc_response = client.agents.rpc_by_name(\n",
+ "# agent_name=AGENT_NAME,\n",
+ "# method=\"task/create\",\n",
+ "# params={\n",
+ "# \"name\": f\"{TASK_ID}-task\",\n",
+ "# \"params\": {}\n",
+ "# }\n",
+ "# )\n",
+ "\n",
+ "# task = rpc_response.result\n",
+ "# print(task)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "b03b0d37",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Hello! I can assist you with a variety of tasks, including:\n",
+ "\n",
+ "1. **Answering Questions**: I can provide information on a wide range of topics, including science, history, technology, and more.\n",
+ "2. **Explaining Concepts**: If you need help understanding a concept or topic, I can provide explanations and clarifications.\n",
+ "3. **Writing Assistance**: I can help with writing tasks, such as drafting emails, essays, or creative pieces.\n",
+ "4. **Language Support**: I can assist with grammar, vocabulary, and writing in different languages.\n",
+ "5. **Providing Recommendations**: Whether it's books, movies, or recipes, I can suggest options based on your preferences.\n",
+ "6. **Problem-Solving**: I can help you think through problems or brainstorm ideas.\n",
+ "\n",
+ "If there's something specific you need help with, feel free to ask!\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Test non streaming response\n",
+ "from agentex.types import TaskMessage, TextContent\n",
+ "\n",
+ "rpc_response = client.agents.rpc_by_name(\n",
+ " agent_name=AGENT_NAME,\n",
+ " method=\"message/send\",\n",
+ " params={\n",
+ " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n",
+ " \"stream\": False\n",
+ " }\n",
+ ")\n",
+ "\n",
+ "# # Extract and print just the text content from the response\n",
+ "# # The response is expected to be a dict with a \"result\" key containing a list of message dicts\n",
+ "if rpc_response and rpc_response.result:\n",
+ " for message in rpc_response.result:\n",
+ " if isinstance(message, TaskMessage):\n",
+ " content = message.content\n",
+ " if isinstance(content, TextContent):\n",
+ " text = content.content\n",
+ " print(text)\n",
+ " else:\n",
+ " print(\"No text content found in response.\")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "79688331",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Hello! I can help with a variety of tasks, including:\n",
+ "\n",
+ "1. **Answering Questions**: I can provide information on a wide range of topics, from historical facts to scientific concepts.\n",
+ "\n",
+ "2. **Providing Explanations**: If you're looking to understand a specific topic better, I can explain it in detail.\n",
+ "\n",
+ "3. **Offering Writing Assistance**: I can help with writing prompts, editing text, or generating ideas for essays, articles, and more.\n",
+ "\n",
+ "4. **Solving Problems**: I can assist with math problems, logic puzzles, and more.\n",
+ "\n",
+ "5. **Learning Support**: I can help with study tips, summarizing information, and creating study guides.\n",
+ "\n",
+ "6. **Conversational Practice**: If you want to practice a language or just have a chat, I’m here for that too!\n",
+ "\n",
+ "7. **Recommendation Systems**: I can recommend books, movies, or other media based on your interests.\n",
+ "\n",
+ "Feel free to ask me anything specific you need help with!"
+ ]
+ }
+ ],
+ "source": [
+ "# Test streaming response\n",
+ "import json\n",
+ "from agentex.types import AgentRpcResponse\n",
+ "from agentex.types.agent_rpc_result import StreamTaskMessageDelta, StreamTaskMessageFull\n",
+ "from agentex.types.text_delta import TextDelta\n",
+ "\n",
+ "with client.agents.with_streaming_response.rpc_by_name(\n",
+ " agent_name=AGENT_NAME,\n",
+ " method=\"message/send\",\n",
+ " params={\n",
+ " \"content\": {\"type\": \"text\", \"author\": \"user\", \"content\": \"Hello what can you do?\"},\n",
+ " \"stream\": True\n",
+ " }\n",
+ ") as response:\n",
+ " for streaming_response_str in response.iter_text():\n",
+ " chunk_rpc_responses = streaming_response_str.split(\"\\n\")\n",
+ " for chunk_rpc_response in chunk_rpc_responses:\n",
+ " if chunk_rpc_response:\n",
+ " chunk_rpc_response_dict = json.loads(chunk_rpc_response)\n",
+ " chunk_rpc_response = AgentRpcResponse.model_validate(chunk_rpc_response_dict)\n",
+ " result = chunk_rpc_response.result\n",
+ " # Print only the text deltas as they arrive or any full messages\n",
+ " if isinstance(result, StreamTaskMessageDelta):\n",
+ " delta = result.delta\n",
+ " if isinstance(delta, TextDelta):\n",
+ " print(delta.text_delta, end=\"\", flush=True)\n",
+ " elif isinstance(result, StreamTaskMessageFull):\n",
+ " content = result.content\n",
+ " if isinstance(content, TextContent):\n",
+ " print(content.content)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4ffb663c",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/tutorials/00_sync/030_hello_oldowan/.dockerignore b/examples/tutorials/00_sync/030_hello_oldowan/.dockerignore
new file mode 100644
index 000000000..c3620f1bc
--- /dev/null
+++ b/examples/tutorials/00_sync/030_hello_oldowan/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/030_hello_oldowan/Dockerfile b/examples/tutorials/00_sync/030_hello_oldowan/Dockerfile
new file mode 100644
index 000000000..0ad11f331
--- /dev/null
+++ b/examples/tutorials/00_sync/030_hello_oldowan/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 030_hello_oldowan/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 030_hello_oldowan/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/00_sync/030_hello_oldowan/README.md b/examples/tutorials/00_sync/030_hello_oldowan/README.md
new file mode 100644
index 000000000..756539ef6
--- /dev/null
+++ b/examples/tutorials/00_sync/030_hello_oldowan/README.md
@@ -0,0 +1,17 @@
+# Hello Oldowan Agent
+
+This is a simple example agent that demonstrates the basics of the Agent 2 Client Protocol (ACP) and the AgentEx framework with an integration to oldowan.
+
+## For Development
+Navigate to `tutorials/00_sync/030_hello_oldowan`
+
+```bash
+# Generate CodeArtifact configuration for building (run from repo root)
+./setup-build-codeartifact.sh
+
+# Set up local development environment
+uv venv --python 3.12
+source .venv/bin/activate
+
+uv pip install -r requirements.txt --prerelease=allow
+```
diff --git a/examples/tutorials/00_sync/030_hello_oldowan/manifest.yaml b/examples/tutorials/00_sync/030_hello_oldowan/manifest.yaml
new file mode 100644
index 000000000..bcbd04e51
--- /dev/null
+++ b/examples/tutorials/00_sync/030_hello_oldowan/manifest.yaml
@@ -0,0 +1,115 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 030_hello_oldowan
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 030_hello_oldowan/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 030_hello_oldowan/.dockerignore
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: sync
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: s030-hello-oldowan
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that does multiturn streaming chat with tools in oldowan
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "s030-hello-oldowan"
+ description: "An AgentEx agent that does multiturn streaming chat with tools in oldowan"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
diff --git a/examples/tutorials/00_sync/030_hello_oldowan/project/__init__.py b/examples/tutorials/00_sync/030_hello_oldowan/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/00_sync/030_hello_oldowan/project/acp.py b/examples/tutorials/00_sync/030_hello_oldowan/project/acp.py
new file mode 100644
index 000000000..f6f6795a9
--- /dev/null
+++ b/examples/tutorials/00_sync/030_hello_oldowan/project/acp.py
@@ -0,0 +1,512 @@
+import os
+import json
+from typing import AsyncGenerator, Callable, List, Union, Dict
+from functools import partial
+
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.task_message_updates import (
+ StreamTaskMessageDelta,
+ StreamTaskMessageFull,
+ TaskMessageUpdate,
+ TextDelta,
+ ToolRequestDelta,
+ ToolResponseDelta,
+)
+from agentex.lib.types.acp import SendMessageParams
+from agentex.lib.types.llm_messages import Message, UserMessage
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.types.span import Span
+from agentex.types.task_message_content import TaskMessageContent, TextContent, ToolRequestContent, ToolResponseContent
+
+
+from oldowan.tools.internal import google_search
+from oldowan.completions import (
+ ToolMessage,
+ ChatCompletionMessage,
+ simple_agent_acompletion,
+ ChoiceDelta,
+)
+
+assert os.environ.get("SGP_API_KEY") is not None, "SGP_API_KEY is not set"
+assert os.environ.get("SGP_ACCOUNT_ID") is not None, "SGP_ACCOUNT_ID is not set"
+
+
+def think(thinking_str: str):
+ """
+ Use the tool to think about something. It will not obtain new information or change the database, but just append the thought to the log. Use it when complex reasoning or some cache memory is needed.
+ Args:
+ thinking_str: A thought to think about.
+ """
+ return
+
+
+TOOL_DICT = {
+ "google_search": google_search,
+ "think": think,
+}
+
+# Create an ACP server
+acp = FastACP.create(
+ acp_type="sync",
+)
+
+TOOL_RESPONSE_ID_SUFFIX = "_response"
+
+
+class SimpleAgentCompletionConfig(BaseModel):
+ model: str
+ tools: List[str]
+ max_tokens: int
+ stream: bool
+
+
+class StateModel(BaseModel):
+ turn_number: int # The number of turns the agent has taken
+ messages: List[Message] # The messages the agent has seen
+ simple_agent_completion_config: (
+ SimpleAgentCompletionConfig # The function to call to get an agent response
+ )
+
+
+def convert_choice_delta_to_stream_task_message_deltas(
+ choice_delta: ChoiceDelta, idx: int
+) -> List[StreamTaskMessageDelta]:
+ """
+ This function converts a ChoiceDelta to a list of StreamTaskMessageDelta objects.
+ Args:
+ choice_delta: The ChoiceDelta to convert.
+ parent_task_message: The parent task message.
+ Returns:
+ A list of StreamTaskMessageDelta objects.
+ """
+ # these are tool requests
+ deltas = []
+ if (
+ choice_delta.tool_calls is not None
+ and choice_delta.tool_calls[0].function.name is not None
+ ):
+ for tool_call in choice_delta.tool_calls:
+ deltas.append(
+ StreamTaskMessageDelta(
+ index=idx,
+ delta=ToolRequestDelta(
+ type="tool_request",
+ tool_call_id=tool_call.id,
+ name=tool_call.function.name,
+ arguments_delta=tool_call.function.arguments,
+ ),
+ )
+ )
+ # pass
+ # These are tool responses
+ elif choice_delta.role == "tool":
+ deltas.append(
+ StreamTaskMessageDelta(
+ index=idx,
+ delta=ToolResponseDelta(
+ type="tool_response",
+ tool_call_id=choice_delta.tool_call_id,
+ name=choice_delta.name,
+ content_delta=choice_delta.content,
+ ),
+ )
+ )
+
+ # These are assistant messages
+ elif choice_delta.content is not None:
+ deltas.append(
+ StreamTaskMessageDelta(
+ index=idx,
+ delta=TextDelta(
+ type="text",
+ text_delta=choice_delta.content,
+ ),
+ )
+ )
+
+ return deltas
+
+
+def convert_choice_delta_to_message_content(
+ choice_delta: ChoiceDelta,
+) -> TaskMessageContent:
+ """
+ This function converts a ChoiceDelta to a TaskMessageContent object.
+ Args:
+ choice_delta: The ChoiceDelta to convert.
+ Returns:
+ A TaskMessageContent object.
+ """
+ # This converts a ChoiceDelta to a TaskMessage which will instantiate "the box" to send to client
+ if choice_delta.tool_calls is not None:
+ # since we are streaming we can assume we onl need to create a message for the first tool call
+ return ToolRequestContent(
+ author="agent",
+ name=choice_delta.tool_calls[0].function.name,
+ tool_call_id=choice_delta.tool_calls[0].id,
+ arguments={}, # have to start this empty since we are streaming
+ )
+ elif choice_delta.role == "tool":
+ print("HERE I AM: ", choice_delta)
+ return ToolResponseContent(
+ author="agent",
+ tool_call_id=choice_delta.tool_calls[0].id,
+ name=choice_delta.name,
+ content="", # starting empty because we add to it
+ )
+ elif choice_delta.role == "assistant":
+ return TextContent(
+ author="agent",
+ content="", # starting empty because we add to it
+ )
+ raise ValueError(
+ f"Unknown role: {choice_delta.role}. Failed to convert to TaskMessage"
+ )
+
+
+def convert_oldowan_message_to_stream_task_message_full(
+ id_to_task_message_idx: Dict[str, int],
+ oldowan_message: Union[ChatCompletionMessage, ToolMessage],
+) -> List[StreamTaskMessageFull]:
+ """
+ This function converts an Oldowan message to a list of StreamTaskMessageFull objects.
+ Args:
+ task_messages: A dictionary of task messages.
+ task_id: The task id.
+ oldowan_message: The Oldowan message to convert.
+ Returns:
+ A list of StreamTaskMessageFull objects.
+ """
+
+ fulls = []
+ if isinstance(oldowan_message, ChatCompletionMessage):
+ # First create all tool calls
+ if oldowan_message.tool_calls is not None:
+ for tool_call in oldowan_message.tool_calls:
+ fulls.append(
+ StreamTaskMessageFull(
+ index=id_to_task_message_idx[tool_call.id],
+ content=ToolRequestContent(
+ author="agent",
+ name=tool_call.function.name,
+ arguments=json.loads(tool_call.function.arguments),
+ tool_call_id=tool_call.id,
+ ),
+ )
+ )
+
+ # Create the assistant messages
+ if oldowan_message.content is not None:
+ fulls.append(
+ StreamTaskMessageFull(
+ index=id_to_task_message_idx[oldowan_message.id],
+ content=TextContent(
+ author="agent",
+ content=oldowan_message.content,
+ ),
+ )
+ )
+
+ # Finally create the tool responses
+ elif isinstance(oldowan_message, ToolMessage):
+ fulls.append(
+ StreamTaskMessageFull(
+ index=id_to_task_message_idx[
+ oldowan_message.tool_call_id + TOOL_RESPONSE_ID_SUFFIX
+ ],
+ content=ToolResponseContent(
+ author="agent",
+ tool_call_id=oldowan_message.tool_call_id,
+ name=oldowan_message.name,
+ content=oldowan_message.content,
+ ),
+ )
+ )
+
+ return fulls
+
+
+def get_oldowan_message_ids(
+ oldowan_message: Union[ChatCompletionMessage, ToolMessage],
+) -> List[str]:
+ """
+ This function gets the ids of the oldowan message.
+ Args:
+ oldowan_message: The Oldowan message to get the ids of.
+ Returns:
+ A list of ids.
+ """
+ message_ids = []
+ if isinstance(oldowan_message, ChatCompletionMessage):
+ # check that there is content
+ if oldowan_message.content is not None:
+ message_ids.append(oldowan_message.id)
+
+ # check if there are tool calls
+ if oldowan_message.tool_calls is not None:
+ for tool_call in oldowan_message.tool_calls:
+ message_ids.append(tool_call.id)
+
+ elif isinstance(oldowan_message, ToolMessage):
+ message_ids.append(oldowan_message.tool_call_id + TOOL_RESPONSE_ID_SUFFIX)
+
+ return message_ids
+
+
+# This will eventually become adk.providers.oldowan.stream_agent_sync
+async def stream_oldowan_agent_sync(
+ messages: List[Message],
+ task_id: str,
+ span: Span,
+ simple_agent_acompletion_fn: Callable,
+) -> AsyncGenerator[StreamTaskMessageDelta, None]:
+ """
+ Stream an Oldowan agent response to the client.
+ Args:
+ messages: The messages to send to the agent.
+ task_id: The task id.
+ span: The span to use for tracing.
+ Returns:
+ AsyncGenerator[TaskMessageUpdate, None]: A generator of task message updates.
+ """
+ response_stream = await simple_agent_acompletion_fn(messages=messages)
+
+ # This is used to create the current TaskMessage object
+ cur_task_message_id = None
+ cur_idx = 0
+
+ # This maps id either from message object, tool_call, or tool_response to the TaskMessage object
+ id_to_task_message_idx = {}
+
+ # These are messages that have already been sent in "full"
+ persisted_messages = []
+ events = []
+
+ # These are ChoiceDelta objects
+ async for event in response_stream:
+ if event.role is not None:
+ # if there is a tool call made then check if its a new tool_call_id
+ if (
+ event.tool_calls is not None
+ and event.tool_calls[0].id is not None
+ and event.tool_calls[0].id not in id_to_task_message_idx
+ ):
+ print(f"Role changed: {event.role}")
+ print(f"Tool call id changed: {event.tool_calls[0].id}")
+ cur_task_message_id = event.tool_calls[0].id
+ id_to_task_message_idx[event.tool_calls[0].id] = cur_idx
+ cur_idx += 1
+
+ # id_to_task_message[event.tool_calls[0].id] = await adk.messages.create(
+ # task_id=task_id,
+ # content=convert_choice_delta_to_message_content(event),
+ # )
+ # print(f"Created new task message: {id_to_task_message[event.tool_calls[0].id]}")
+
+ # If you are in a tool response, you should check that either the tool_call_id has changed or your last type was not tool
+ elif event.role == "tool" and (
+ event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX
+ not in id_to_task_message_idx
+ ):
+ print(f"Role changed: {event.role}")
+ print(
+ f"Tool Response id: {event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX}"
+ )
+ cur_task_message_id = event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX
+ id_to_task_message_idx[event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX] = (
+ cur_idx
+ )
+ cur_idx += 1
+ # id_to_task_message[event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX] = await adk.messages.create(
+ # task_id=task_id,
+ # content=convert_choice_delta_to_message_content(event),
+ # )
+ # print(f"Created new task message: {id_to_task_message[event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX]}")
+
+ elif (
+ event.role == "assistant"
+ and event.content is not None
+ and event.id not in id_to_task_message_idx
+ ): # this is an assistant message
+ print(f"Role is: {event.role}")
+ assert hasattr(
+ event, "id"
+ ), "Event does not have an id, please upgrade to latest oldowan"
+ print(f"Event id: {event.id}")
+ cur_task_message_id = event.id
+ id_to_task_message_idx[event.id] = cur_idx
+ cur_idx += 1
+ # update the current role and task message
+ # id_to_task_message[event.id] = await adk.messages.create(
+ # task_id=task_id,
+ # content=convert_choice_delta_to_message_content(event),
+ # trace_id=task_id,
+ # )
+ # print(f"Created new task message: {id_to_task_message[event.id]}")
+
+ # Now we can create the items to stream
+ # NOTE: key assumption is that ChoiceDeltaToolCall can only apply to one tool call at a time.
+ for task_message_delta in convert_choice_delta_to_stream_task_message_deltas(
+ event, idx=id_to_task_message_idx[cur_task_message_id]
+ ):
+ yield task_message_delta
+
+ events.append(event)
+
+ # Issue is that we can either have an oldowan message before a task message has been created OR task message before the oldowan message
+ # this is because tool response messages are added to messages immediately, but streamed one after the other.
+ # For each oldowan message, if we haven't persisted it yet, then do so
+ for idx, oldowan_message in enumerate(response_stream.messages):
+ if oldowan_message not in persisted_messages and all(
+ [
+ id in id_to_task_message_idx
+ for id in get_oldowan_message_ids(oldowan_message)
+ ]
+ ):
+ async with adk.tracing.span(
+ trace_id=task_id,
+ parent_id=span.id,
+ name=f"Message {idx}",
+ input=messages
+ + response_stream.messages[:idx], # input messages to this message
+ ) as message_span:
+ message_span.output = oldowan_message
+
+ # Send the full messages now that they are done
+ for (
+ stream_task_message_full
+ ) in convert_oldowan_message_to_stream_task_message_full(
+ id_to_task_message_idx=id_to_task_message_idx,
+ oldowan_message=oldowan_message,
+ ):
+ yield stream_task_message_full
+
+ print(f"Persisted message: {oldowan_message}")
+ persisted_messages.append(oldowan_message)
+
+ # Stream the last object
+ async with adk.tracing.span(
+ trace_id=task_id,
+ parent_id=span.id,
+ name=f"Message {len(response_stream.messages)}",
+ input=messages + response_stream.messages[:-1],
+ ) as message_span:
+ message_span.output = response_stream.messages[-1]
+
+ # Persist the last message to the DB
+ for stream_task_message_full in convert_oldowan_message_to_stream_task_message_full(
+ id_to_task_message_idx=id_to_task_message_idx,
+ oldowan_message=response_stream.messages[-1],
+ ):
+ yield stream_task_message_full
+ print(f"Persisted message: {response_stream.messages[-1]}")
+ persisted_messages.append(response_stream.messages[-1])
+
+ # Aggregate the messages and store the output
+ messages = response_stream.messages
+ span.output = messages
+
+
+# Note: The return of this handler is required to be persisted by the Agentex Server
+@acp.on_message_send
+async def handle_message_send(
+ params: SendMessageParams
+) -> Union[TaskMessageContent, AsyncGenerator[TaskMessageUpdate, None]]:
+ """
+ In this tutorial, we'll see how to handle a basic multi-turn conversation without streaming.
+ """
+ #########################################################
+ # 1-3. These steps are all the same as the hello acp tutorial.
+ #########################################################
+
+ if params.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.content.type}")
+
+ if params.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.content.author}")
+
+ if not os.environ.get("OPENAI_API_KEY"):
+ yield StreamTaskMessageFull(
+ index=0,
+ content=TextContent(
+ author="agent",
+ content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
+ ),
+ )
+
+ # Try to retrieve the state. If it doesn't exist, create it.
+ task_state = await adk.state.get_by_task_and_agent(
+ task_id=params.task.id, agent_id=params.agent.id
+ )
+
+ if not task_state:
+ # If the state doesn't exist, create it.
+ state = StateModel(
+ simple_agent_completion_config=SimpleAgentCompletionConfig(
+ model="openai/gpt-4o",
+ tools=["google_search", "think"],
+ max_tokens=8192,
+ stream=True,
+ ),
+ messages=[],
+ turn_number=0,
+ )
+ assert all(
+ [tool in TOOL_DICT for tool in state.simple_agent_completion_config.tools]
+ ), f"Invalid tool: {state.simple_agent_completion_config.tools}"
+ task_state = await adk.state.create(
+ task_id=params.task.id, agent_id=params.agent.id, state=state
+ )
+ else:
+ state = StateModel.model_validate(task_state.state)
+
+ messages = state.messages
+
+ #########################################################
+ # 4. Call an LLM to respond to the user's message and stream the response to the client.
+ #########################################################
+ print(
+ f"Calling LLM with model {state.simple_agent_completion_config.model_dump_json()} and messages {messages}"
+ )
+
+ # Add the user's message to the conversation history
+ state.messages.append(UserMessage(content=params.content.content))
+
+ # The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content.
+ async with adk.tracing.span(
+ trace_id=params.task.id,
+ name=f"Turn {state.turn_number}",
+ input=state,
+ ) as span:
+ simple_agent_completion_fn = partial(
+ simple_agent_acompletion,
+ model=state.simple_agent_completion_config.model,
+ tools=[
+ TOOL_DICT[tool] for tool in state.simple_agent_completion_config.tools
+ ],
+ max_tokens=state.simple_agent_completion_config.max_tokens,
+ stream=state.simple_agent_completion_config.stream,
+ )
+ # Stream the response and collect the generated messages
+ async for chunk in stream_oldowan_agent_sync(
+ messages=messages,
+ task_id=params.task.id,
+ span=span,
+ simple_agent_acompletion_fn=simple_agent_completion_fn,
+ ):
+ yield chunk
+
+ # The generated messages are accessible from the span output
+ state.messages.extend(span.output)
+
+ state.turn_number += 1
+
+ # Update the state with the new messages
+ await adk.state.update(
+ task_id=params.task.id,
+ agent_id=params.agent.id,
+ state_id=task_state.id,
+ state=state,
+ trace_id=params.task.id,
+ )
diff --git a/examples/tutorials/00_sync/030_hello_oldowan/requirements.txt b/examples/tutorials/00_sync/030_hello_oldowan/requirements.txt
new file mode 100644
index 000000000..1077299aa
--- /dev/null
+++ b/examples/tutorials/00_sync/030_hello_oldowan/requirements.txt
@@ -0,0 +1,6 @@
+# Install agentex-py from local path
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
+scale-oldowan>=0.3.17
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/000_hello_acp/.dockerignore b/examples/tutorials/10_agentic/00_base/000_hello_acp/.dockerignore
new file mode 100644
index 000000000..4521d8465
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/000_hello_acp/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/000_hello_acp/Dockerfile b/examples/tutorials/10_agentic/00_base/000_hello_acp/Dockerfile
new file mode 100644
index 000000000..34f07ab19
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/000_hello_acp/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 000_hello_acp/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 000_hello_acp/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/000_hello_acp/manifest.yaml b/examples/tutorials/10_agentic/00_base/000_hello_acp/manifest.yaml
new file mode 100644
index 000000000..ca5f5df36
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/000_hello_acp/manifest.yaml
@@ -0,0 +1,122 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 000_hello_acp
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 000_hello_acp/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 000_hello_acp/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab000-hello-acp
+
+ # Type of ACP to use
+ # sync: Simple synchronous ACP implementation
+ # agentic: Advanced ACP with sub-types "base" or "temporal" (requires config)
+ acp_type: agentic
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that is not intelligent. It just shows how to implement the base agentic ACP type.
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "ab000-hello-acp"
+ description: "An AgentEx agent that is not intelligent. It just shows how to implement the base agentic ACP type."
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/000_hello_acp/project/__init__.py b/examples/tutorials/10_agentic/00_base/000_hello_acp/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/10_agentic/00_base/000_hello_acp/project/acp.py b/examples/tutorials/10_agentic/00_base/000_hello_acp/project/acp.py
new file mode 100644
index 000000000..069dc0b35
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/000_hello_acp/project/acp.py
@@ -0,0 +1,75 @@
+import json
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.fastacp import AgenticACPConfig
+from agentex.lib.types.acp import CancelTaskParams, CreateTaskParams, SendEventParams
+
+from agentex.types.text_content import TextContent
+from agentex.lib.utils.logging import make_logger
+
+logger = make_logger(__name__)
+
+
+# Create an ACP server with base configuration
+# This sets up the core server that will handle task creation, events, and cancellation
+acp = FastACP.create(
+ acp_type="agentic",
+ config=AgenticACPConfig(
+ type="base",
+ ),
+)
+
+@acp.on_task_create
+async def handle_task_create(params: CreateTaskParams):
+ # This handler is called first whenever a new task is created.
+ # It's a good place to initialize any state or resources needed for the task.
+
+ #########################################################
+ # 1. (👋) Do task initialization here.
+ #########################################################
+
+ # Acknowledge that the task has been created.
+ await adk.messages.create(
+ task_id=params.task.id,
+ content=TextContent(
+ author="agent",
+ content=f"Hello! I've received your task. Normally you can do some state initialization here, or just pass and do nothing until you get your first event. For now I'm just acknowledging that I've received a task with the following params:\n\n{json.dumps(params.params, indent=2)}.\n\nYou should only see this message once, when the task is created. All subsequent events will be handled by the `on_task_event_send` handler.",
+ ),
+ )
+
+@acp.on_task_event_send
+async def handle_event_send(params: SendEventParams):
+ # This handler is called whenever a new event (like a message) is sent to the task
+
+ #########################################################
+ # 2. (👋) Echo back the client's message to show it in the UI.
+ #########################################################
+
+ # This is not done by default so the agent developer has full control over what is shown to the user.
+ if params.event.content:
+ await adk.messages.create(task_id=params.task.id, content=params.event.content)
+
+ #########################################################
+ # 3. (👋) Send a simple response message.
+ #########################################################
+
+ # In future tutorials, this is where we'll add more sophisticated response logic.
+ await adk.messages.create(
+ task_id=params.task.id,
+ content=TextContent(
+ author="agent",
+ content=f"Hello! I've received your message. I can't respond right now, but in future tutorials we'll see how you can get me to intelligently respond to your message.",
+ ),
+ )
+
+@acp.on_task_cancel
+async def handle_task_cancel(params: CancelTaskParams):
+ # This handler is called when a task is cancelled.
+ # It's useful for cleaning up any resources or state associated with the task.
+
+ #########################################################
+ # 4. (👋) Do task cleanup here.
+ #########################################################
+
+ # This is mostly for durable workflows that are cancellable like Temporal, but we will leave it here for demonstration purposes.
+ logger.info(f"Hello! I've received task cancel for task {params.task.id}: {params.task}. This isn't necessary for this example, but it's good to know that it's available.")
diff --git a/examples/tutorials/10_agentic/00_base/000_hello_acp/requirements.txt b/examples/tutorials/10_agentic/00_base/000_hello_acp/requirements.txt
new file mode 100644
index 000000000..d12ad3951
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/000_hello_acp/requirements.txt
@@ -0,0 +1,4 @@
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/010_multiturn/.dockerignore b/examples/tutorials/10_agentic/00_base/010_multiturn/.dockerignore
new file mode 100644
index 000000000..4521d8465
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/010_multiturn/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/010_multiturn/Dockerfile b/examples/tutorials/10_agentic/00_base/010_multiturn/Dockerfile
new file mode 100644
index 000000000..29f524911
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/010_multiturn/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 010_multiturn/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 010_multiturn/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/010_multiturn/README.md b/examples/tutorials/10_agentic/00_base/010_multiturn/README.md
new file mode 100644
index 000000000..30e18800c
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/010_multiturn/README.md
@@ -0,0 +1,21 @@
+# [Agentic] (Base) Echo
+
+This is a simple AgentEx agent that just says hello and acknowledges the user's message to show which ACP methods need to be implemented for the base agentic ACP type.
+
+## Building the Agent
+
+To build the agent Docker image locally:
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
+
+## Official Documentation
+
+[000 Hello Base Agentic](https://agentex.scale.com/docs/tutorials/agentic/000_hello_base_agentic)
diff --git a/examples/tutorials/10_agentic/00_base/010_multiturn/manifest.yaml b/examples/tutorials/10_agentic/00_base/010_multiturn/manifest.yaml
new file mode 100644
index 000000000..4ad60c950
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/010_multiturn/manifest.yaml
@@ -0,0 +1,122 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 010_multiturn
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 010_multiturn/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 010_multiturn/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab010-multiturn
+
+ # Type of ACP to use
+ # sync: Simple synchronous ACP implementation
+ # agentic: Advanced ACP with sub-types "base" or "temporal" (requires config)
+ acp_type: agentic
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that echoes back the user's message
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "ab010-multiturn"
+ description: "An AgentEx agent that echoes back the user's message"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/010_multiturn/project/__init__.py b/examples/tutorials/10_agentic/00_base/010_multiturn/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/10_agentic/00_base/010_multiturn/project/acp.py b/examples/tutorials/10_agentic/00_base/010_multiturn/project/acp.py
new file mode 100644
index 000000000..e6d6f8cc9
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/010_multiturn/project/acp.py
@@ -0,0 +1,153 @@
+import os
+from typing import List
+
+from agentex.lib import adk
+from agentex.lib.core.tracing.tracing_processor_manager import (
+ add_tracing_processor_config,
+)
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import CancelTaskParams, CreateTaskParams, SendEventParams
+from agentex.lib.types.fastacp import AgenticACPConfig
+from agentex.lib.types.llm_messages import (
+ AssistantMessage,
+ LLMConfig,
+ Message,
+ SystemMessage,
+ UserMessage,
+)
+from agentex.lib.types.tracing import SGPTracingProcessorConfig
+from agentex.lib.utils.logging import make_logger
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.types.text_content import TextContent
+
+logger = make_logger(__name__)
+
+# Add a tracing processor
+add_tracing_processor_config(SGPTracingProcessorConfig(
+ sgp_api_key=os.environ.get("SCALE_GP_API_KEY", ""),
+ sgp_account_id=os.environ.get("SCALE_GP_ACCOUNT_ID", "")
+))
+
+# Create an ACP server
+
+# !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+acp = FastACP.create(
+ acp_type="agentic",
+ config=AgenticACPConfig(type="base"),
+)
+
+class StateModel(BaseModel):
+ messages: List[Message]
+
+
+@acp.on_task_create
+async def handle_task_create(params: CreateTaskParams):
+ # Upon task creation, we initialize the task state with a system message.
+ # This will be fetched by the `on_task_event_send` handler when each event is sent.
+
+ #########################################################
+ # 1. Initialize the task state.
+ #########################################################
+
+ state = StateModel(messages=[SystemMessage(content="You are a helpful assistant that can answer questions.")])
+ await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+
+@acp.on_task_event_send
+async def handle_event_send(params: SendEventParams):
+ # !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+
+ #########################################################
+ # 2. Validate the event content.
+ #########################################################
+ if not params.event.content:
+ return
+
+ if params.event.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.event.content.type}")
+
+ if params.event.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.event.content.author}")
+
+ #########################################################
+ # 3. Echo back the user's message so it shows up in the UI.
+ #########################################################
+
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=params.event.content,
+ )
+
+ #########################################################
+ # 4. (👋) If the OpenAI API key is not set, send a message to the user to let them know.
+ #########################################################
+
+ if not os.environ.get("OPENAI_API_KEY"):
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=TextContent(
+ author="agent",
+ content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
+ ),
+ )
+
+ #########################################################
+ # 5. (👋) Retrieve the task state.
+ #########################################################
+
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+ state = StateModel.model_validate(task_state.state)
+
+ #########################################################
+ # 6. (👋) Add the new user message to the message history
+ #########################################################
+
+ state.messages.append(UserMessage(content=params.event.content.content))
+
+ #########################################################
+ # 7. (👋) Call an LLM to respond to the user's message
+ #########################################################
+
+ # Call an LLM to respond to the user's message
+ chat_completion = await adk.providers.litellm.chat_completion(
+ llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages),
+ trace_id=params.task.id,
+ )
+ state.messages.append(AssistantMessage(content=chat_completion.choices[0].message.content))
+
+ #########################################################
+ # 8. (👋) Send agent response to client
+ #########################################################
+
+ if chat_completion.choices[0].message:
+ content_str = chat_completion.choices[0].message.content or ""
+ else:
+ content_str = ""
+
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=TextContent(
+ author="agent",
+ content=content_str,
+ ),
+ )
+
+ #########################################################
+ # 9. (👋) Store the messages in the task state for the next turn
+ #########################################################
+
+ await adk.state.update(
+ state_id=task_state.id,
+ task_id=params.task.id,
+ agent_id=params.agent.id,
+ state=state,
+ trace_id=params.task.id,
+ )
+
+@acp.on_task_cancel
+async def handle_task_cancel(params: CancelTaskParams):
+ """Default task cancel handler"""
+ logger.info(f"Task canceled: {params.task}")
+
diff --git a/examples/tutorials/10_agentic/00_base/010_multiturn/requirements.txt b/examples/tutorials/10_agentic/00_base/010_multiturn/requirements.txt
new file mode 100644
index 000000000..d12ad3951
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/010_multiturn/requirements.txt
@@ -0,0 +1,4 @@
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/020_streaming/.dockerignore b/examples/tutorials/10_agentic/00_base/020_streaming/.dockerignore
new file mode 100644
index 000000000..4521d8465
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/020_streaming/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/020_streaming/Dockerfile b/examples/tutorials/10_agentic/00_base/020_streaming/Dockerfile
new file mode 100644
index 000000000..26ceaf8aa
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/020_streaming/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 020_streaming/requirements.txt /app/020_streaming/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 020_streaming/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/020_streaming/README.md b/examples/tutorials/10_agentic/00_base/020_streaming/README.md
new file mode 100644
index 000000000..5ca587b3b
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/020_streaming/README.md
@@ -0,0 +1,127 @@
+# ab020-streaming - AgentEx Starter Template
+
+This is a generic starter template for building agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) to help you get started quickly.
+
+## What You'll Learn
+
+- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session.
+- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions.
+- **ACP Events**: The agent responds to four main events:
+ - `task_received`: When a new task is created
+ - `task_message_received`: When a message is sent within a task
+ - `task_approved`: When a task is approved
+ - `task_canceled`: When a task is canceled
+
+
+## Running the Agent
+
+1. Run the agent locally:
+```bash
+agentex agents run --manifest manifest.yaml
+```
+
+The agent will start on port 8000 and print messages whenever it receives any of the ACP events.
+
+## What's Inside
+
+This template:
+- Sets up a basic ACP server
+- Handles each of the required ACP events with simple print statements
+- Provides a foundation for building more complex agents
+
+## Next Steps
+
+For more advanced agent development, check out the AgentEx tutorials:
+
+- **Tutorials 00-08**: Learn about building synchronous agents with ACP
+- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents
+ - Tutorial 09: Basic Temporal workflow setup
+ - Tutorial 10: Advanced Temporal patterns and best practices
+
+These tutorials will help you understand:
+- How to handle long-running tasks
+- Implementing state machines
+- Managing complex workflows
+- Best practices for async agent development
+
+## The Manifest File
+
+The `manifest.yaml` file is your agent's configuration file. It defines:
+- How your agent should be built and packaged
+- What files are included in your agent's Docker image
+- Your agent's name and description
+- Local development settings (like the port your agent runs on)
+
+This file is essential for both local development and deployment of your agent.
+
+## Project Structure
+
+```
+020_streaming/
+├── project/ # Your agent's code
+│ ├── __init__.py
+│ └── acp.py # ACP server and event handlers
+├── Dockerfile # Container definition
+├── manifest.yaml # Deployment config
+└── requirements.txt # Dependencies
+```
+
+## Development
+
+1. **Customize Event Handlers**
+ - Modify the handlers in `acp.py` to implement your agent's logic
+ - Add your own tools and capabilities
+ - Implement custom state management
+
+2. **Add Dependencies**
+ - Add required packages to `requirements.txt`
+ - Update the manifest with any needed credentials
+
+## Local Development
+
+1. **Install AgentEx**
+```bash
+cd agentex-py
+uv venv
+source .venv/bin/activate
+uv sync
+```
+
+2. **Deploy your agent**
+```bash
+# From this directory
+export ENVIRONMENT=development && agentex agents create --manifest manifest.yaml
+```
+
+4. **Interact with your agent**
+
+Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!)
+```bash
+# Submit a task via CLI
+agentex tasks submit --agent {{ agent_name }} --task "Your task here"
+```
+
+Option 1: Web UI
+```bash
+# Start the local web interface
+cd agentex-web
+make dev
+
+# Then open http://localhost:3000 in your browser to chat with your agent
+```
+## Development Tips
+
+1. **Local Testing**
+- Set environment variables in project/.env for any required credentials. OR you can set these in the manifest.yaml
+
+### To build the agent Docker image locally (normally not necessary):
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/020_streaming/manifest.yaml b/examples/tutorials/10_agentic/00_base/020_streaming/manifest.yaml
new file mode 100644
index 000000000..3280ce483
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/020_streaming/manifest.yaml
@@ -0,0 +1,119 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 020_streaming
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 020_streaming/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 020_streaming/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: agentic
+
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab020-streaming
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: A multiturn AgentEx agent that streams outputs
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "ab020-streaming"
+ description: "A multiturn AgentEx agent that streams outputs"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
diff --git a/examples/tutorials/10_agentic/00_base/020_streaming/project/__init__.py b/examples/tutorials/10_agentic/00_base/020_streaming/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/10_agentic/00_base/020_streaming/project/acp.py b/examples/tutorials/10_agentic/00_base/020_streaming/project/acp.py
new file mode 100644
index 000000000..4e6c698b3
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/020_streaming/project/acp.py
@@ -0,0 +1,130 @@
+import os
+from typing import List
+
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import CancelTaskParams, CreateTaskParams, SendEventParams
+from agentex.lib.types.fastacp import AgenticACPConfig
+from agentex.lib.types.llm_messages import AssistantMessage, LLMConfig, Message, SystemMessage, UserMessage
+from agentex.lib.utils.logging import make_logger
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.types.text_content import TextContent
+
+logger = make_logger(__name__)
+
+
+# Create an ACP server
+
+# !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+acp = FastACP.create(
+ acp_type="agentic",
+ config=AgenticACPConfig(type="base"),
+)
+
+class StateModel(BaseModel):
+ messages: List[Message]
+
+
+@acp.on_task_create
+async def handle_task_create(params: CreateTaskParams):
+ # Upon task creation, we initialize the task state with a system message.
+ # This will be fetched by the `on_task_event_send` handler when each event is sent.
+
+ #########################################################
+ # 1. Initialize the task state.
+ #########################################################
+
+ state = StateModel(messages=[SystemMessage(content="You are a helpful assistant that can answer questions.")])
+ await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+
+@acp.on_task_event_send
+async def handle_event_send(params: SendEventParams):
+ # !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+
+ #########################################################
+ # 2. Validate the event content.
+ #########################################################
+ if not params.event.content:
+ return
+
+ if params.event.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.event.content.type}")
+
+ if params.event.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.event.content.author}")
+
+ #########################################################
+ # 3. Echo back the user's message.
+ #########################################################
+
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=params.event.content,
+ )
+
+ #########################################################
+ # 4. If the OpenAI API key is not set, send a message to the user to let them know.
+ #########################################################
+
+ if not os.environ.get("OPENAI_API_KEY"):
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=TextContent(
+ author="agent",
+ content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
+ ),
+ )
+
+ #########################################################
+ # 5. Retrieve the task state.
+ #########################################################
+
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+ state = StateModel.model_validate(task_state.state)
+
+ #########################################################
+ # 6. Add the new user message to the message history
+ #########################################################
+
+ state.messages.append(UserMessage(content=params.event.content.content))
+
+ #########################################################
+ # 7. (👋) Call an LLM to respond to the user's message
+ #########################################################
+
+ # When we use the streaming version of chat completion, we can either use the `chat_completion_stream_auto_send` method, or we can use the `chat_completion_stream` method. Here is the difference:
+
+ # `chat_completion_stream_auto_send` - This is the "managed version" of the streaming method. It will automatically send the response to the client as an agent TaskMessage.
+
+ # `chat_completion_stream` - This is the "unmanaged version" of the streaming method. It will return a generator of chat completion chunks. You can then do whatever you want with the chunks, such as sending them to the client as an agent message, or storing them in the task state, or whatever you want.
+
+ # Here we use the `chat_completion_stream_auto_send` method.
+ #########################################################
+
+ task_message = await adk.providers.litellm.chat_completion_stream_auto_send(
+ task_id=params.task.id,
+ llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages, stream=True),
+ trace_id=params.task.id,
+ )
+
+ state.messages.append(AssistantMessage(content=task_message.content.content))
+
+ #########################################################
+ # 8. Store the messages in the task state for the next turn
+ #########################################################
+
+ await adk.state.update(
+ state_id=task_state.id,
+ task_id=params.task.id,
+ agent_id=params.agent.id,
+ state=state,
+ trace_id=params.task.id,
+ )
+
+@acp.on_task_cancel
+async def handle_task_cancel(params: CancelTaskParams):
+ """Default task cancel handler"""
+ logger.info(f"Task canceled: {params.task}")
+
diff --git a/examples/tutorials/10_agentic/00_base/020_streaming/requirements.txt b/examples/tutorials/10_agentic/00_base/020_streaming/requirements.txt
new file mode 100644
index 000000000..2c2ebf15b
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/020_streaming/requirements.txt
@@ -0,0 +1,5 @@
+# Install agentex-py from local path
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/030_tracing/.dockerignore b/examples/tutorials/10_agentic/00_base/030_tracing/.dockerignore
new file mode 100644
index 000000000..4521d8465
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/030_tracing/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/030_tracing/Dockerfile b/examples/tutorials/10_agentic/00_base/030_tracing/Dockerfile
new file mode 100644
index 000000000..84ecb735d
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/030_tracing/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 030_tracing/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 030_tracing/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/030_tracing/README.md b/examples/tutorials/10_agentic/00_base/030_tracing/README.md
new file mode 100644
index 000000000..936e2ef44
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/030_tracing/README.md
@@ -0,0 +1,141 @@
+# ab030-tracing - AgentEx Starter Template
+
+This is a generic starter template for building agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) to help you get started quickly.
+
+## What You'll Learn
+
+- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session.
+- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions.
+- **ACP Events**: The agent responds to four main events:
+ - `task_received`: When a new task is created
+ - `task_message_received`: When a message is sent within a task
+ - `task_approved`: When a task is approved
+ - `task_canceled`: When a task is canceled
+
+## Building the Agent
+
+To build the agent Docker image locally:
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
+
+## Running the Agent
+
+1. Run the agent locally:
+```bash
+agentex agents run --manifest manifest.yaml
+```
+
+The agent will start on port 8000 and print messages whenever it receives any of the ACP events.
+
+## What's Inside
+
+This template:
+- Sets up a basic ACP server
+- Handles each of the required ACP events with simple print statements
+- Provides a foundation for building more complex agents
+
+## Next Steps
+
+For more advanced agent development, check out the AgentEx tutorials:
+
+- **Tutorials 00-08**: Learn about building synchronous agents with ACP
+- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents
+ - Tutorial 09: Basic Temporal workflow setup
+ - Tutorial 10: Advanced Temporal patterns and best practices
+
+These tutorials will help you understand:
+- How to handle long-running tasks
+- Implementing state machines
+- Managing complex workflows
+- Best practices for async agent development
+
+## The Manifest File
+
+The `manifest.yaml` file is your agent's configuration file. It defines:
+- How your agent should be built and packaged
+- What files are included in your agent's Docker image
+- Your agent's name and description
+- Local development settings (like the port your agent runs on)
+
+This file is essential for both local development and deployment of your agent.
+
+## Project Structure
+
+```
+030_tracing/
+├── project/ # Your agent's code
+│ ├── __init__.py
+│ └── acp.py # ACP server and event handlers
+├── Dockerfile # Container definition
+├── manifest.yaml # Deployment config
+└── requirements.txt # Dependencies
+```
+
+## Development
+
+1. **Customize Event Handlers**
+ - Modify the handlers in `acp.py` to implement your agent's logic
+ - Add your own tools and capabilities
+ - Implement custom state management
+
+2. **Add Dependencies**
+ - Add required packages to `requirements.txt`
+ - Update the manifest with any needed credentials
+
+## Local Development
+
+1. **Install AgentEx**
+```bash
+cd agentex-py
+uv venv
+source .venv/bin/activate
+uv sync
+```
+
+2. **Deploy your agent**
+```bash
+# From this directory
+export ENVIRONMENT=development && agentex agents create --manifest manifest.yaml
+```
+
+4. **Interact with your agent**
+
+Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!)
+```bash
+# Submit a task via CLI
+agentex tasks submit --agent {{ agent_name }} --task "Your task here"
+```
+
+Option 1: Web UI
+```bash
+# Start the local web interface
+cd agentex-web
+make dev
+
+# Then open http://localhost:3000 in your browser to chat with your agent
+```
+
+## Development Tips
+
+1. **Local Testing**
+- Set environment variables in project/.env for any required credentials. OR you can set these in the manifest.yaml
+
+### To build the agent Docker image locally (normally not necessary):
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/030_tracing/manifest.yaml b/examples/tutorials/10_agentic/00_base/030_tracing/manifest.yaml
new file mode 100644
index 000000000..73a4052a6
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/030_tracing/manifest.yaml
@@ -0,0 +1,119 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 030_tracing
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 030_tracing/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 030_tracing/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: agentic
+
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab030-tracing
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that demonstrates how to do hierarchical and custom tracing
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "ab030-tracing"
+ description: "An AgentEx agent that demonstrates how to do hierarchical and custom tracing"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/030_tracing/project/__init__.py b/examples/tutorials/10_agentic/00_base/030_tracing/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/10_agentic/00_base/030_tracing/project/acp.py b/examples/tutorials/10_agentic/00_base/030_tracing/project/acp.py
new file mode 100644
index 000000000..04e626cf4
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/030_tracing/project/acp.py
@@ -0,0 +1,152 @@
+import os
+from typing import List
+
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import CancelTaskParams, CreateTaskParams, SendEventParams
+from agentex.lib.types.fastacp import AgenticACPConfig
+from agentex.lib.types.llm_messages import AssistantMessage, LLMConfig, Message, SystemMessage, UserMessage
+from agentex.lib.utils.logging import make_logger
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.types.text_content import TextContent
+
+logger = make_logger(__name__)
+
+
+# Create an ACP server
+
+# !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+acp = FastACP.create(
+ acp_type="agentic",
+ config=AgenticACPConfig(type="base"),
+)
+
+class StateModel(BaseModel):
+ messages: List[Message]
+ turn_number: int
+
+
+@acp.on_task_create
+async def handle_task_create(params: CreateTaskParams):
+ # Upon task creation, we initialize the task state with a system message.
+ # This will be fetched by the `on_task_event_send` handler when each event is sent.
+
+ #########################################################
+ # 1. Initialize the task state.
+ #########################################################
+
+ state = StateModel(
+ messages=[SystemMessage(content="You are a helpful assistant that can answer questions.")],
+ turn_number=0,
+ )
+ await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+
+@acp.on_task_event_send
+async def handle_event_send(params: SendEventParams):
+ # !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+
+ #########################################################
+ # 2. Validate the event content.
+ #########################################################
+ if not params.event.content:
+ return
+
+ if params.event.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.event.content.type}")
+
+ if params.event.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.event.content.author}")
+
+ #########################################################
+ # 3. Retrieve the task state.
+ #########################################################
+
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+ state = StateModel.model_validate(task_state.state)
+ state.turn_number += 1
+
+ # Add the new user message to the message history
+ state.messages.append(UserMessage(content=params.event.content.content))
+
+ #########################################################
+ # 4. (👋) Create a tracing span.
+ #########################################################
+
+ # Create a tracing span. All of the Agentex ADK methods are "auto-traced", but by default show up as a flat list associated with a single trace id (which is usually just set to the task id by default).
+ # If you want to create a hierarchical trace, you can do so by creating spans in your business logic and passing the span id to the ADK methods. Traces will be grouped under parent spans for better readability.
+ # If you're not trying to create a hierarchical trace, but just trying to create a custom span to trace something, you can use this too to create a custom span that is associate with your trace by trace ID.
+
+ async with adk.tracing.span(
+ trace_id=params.task.id,
+ name=f"Turn {state.turn_number}",
+ input=state
+ ) as span:
+
+ #########################################################
+ # 5. Echo back the user's message so it shows up in the UI.
+ #########################################################
+
+ # (👋) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace.
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=params.event.content,
+ parent_span_id=span.id if span else None,
+ )
+
+ #########################################################
+ # 6. If the OpenAI API key is not set, send a message to the user to let them know.
+ #########################################################
+
+ # (👋) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace.
+ if not os.environ.get("OPENAI_API_KEY"):
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=TextContent(
+ author="agent",
+ content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
+ ),
+ parent_span_id=span.id if span else None,
+ )
+
+ #########################################################
+ # 7. Call an LLM to respond to the user's message
+ #########################################################
+
+ # (👋) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace.
+ task_message = await adk.providers.litellm.chat_completion_stream_auto_send(
+ task_id=params.task.id,
+ llm_config=LLMConfig(model="gpt-4o-mini", messages=state.messages, stream=True),
+ trace_id=params.task.id,
+ parent_span_id=span.id if span else None,
+ )
+
+ state.messages.append(AssistantMessage(content=task_message.content.content))
+
+ #########################################################
+ # 8. Store the messages in the task state for the next turn
+ #########################################################
+
+ # (👋) Notice that we pass the parent_span_id to the ADK methods to create a hierarchical trace.
+ await adk.state.update(
+ state_id=task_state.id,
+ task_id=params.task.id,
+ agent_id=params.agent.id,
+ state=state,
+ trace_id=params.task.id,
+ parent_span_id=span.id if span else None,
+ )
+
+ #########################################################
+ # 9. (👋) Set the span output to the state for the next turn
+ #########################################################
+
+ # (👋) You can store an arbitrary pydantic model or dictionary in the span output. The idea of a span is that it easily allows you to compare the input and output of a span to see what the wrapped function did.
+ # In this case, the state is comprehensive and expressive, so we just store the change in state that occured.
+ span.output = state
+
+@acp.on_task_cancel
+async def handle_task_cancel(params: CancelTaskParams):
+ """Default task cancel handler"""
+ logger.info(f"Task canceled: {params.task}")
diff --git a/examples/tutorials/10_agentic/00_base/030_tracing/requirements.txt b/examples/tutorials/10_agentic/00_base/030_tracing/requirements.txt
new file mode 100644
index 000000000..d12ad3951
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/030_tracing/requirements.txt
@@ -0,0 +1,4 @@
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/040_other_sdks/.dockerignore b/examples/tutorials/10_agentic/00_base/040_other_sdks/.dockerignore
new file mode 100644
index 000000000..4521d8465
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/040_other_sdks/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/040_other_sdks/Dockerfile b/examples/tutorials/10_agentic/00_base/040_other_sdks/Dockerfile
new file mode 100644
index 000000000..ae6ab7ff3
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/040_other_sdks/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 040_other_sdks/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 040_other_sdks/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/040_other_sdks/README.md b/examples/tutorials/10_agentic/00_base/040_other_sdks/README.md
new file mode 100644
index 000000000..2596a8c7e
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/040_other_sdks/README.md
@@ -0,0 +1,127 @@
+# ab040-other-sdks - AgentEx Starter Template
+
+This is a generic starter template for building agents with the AgentEx framework. It provides a basic implementation of the Agent 2 Client Protocol (ACP) to help you get started quickly.
+
+## What You'll Learn
+
+- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session.
+- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions.
+- **ACP Events**: The agent responds to four main events:
+ - `task_received`: When a new task is created
+ - `task_message_received`: When a message is sent within a task
+ - `task_approved`: When a task is approved
+ - `task_canceled`: When a task is canceled
+
+## Running the Agent
+
+1. Run the agent locally:
+```bash
+agentex agents run --manifest manifest.yaml
+```
+
+The agent will start on port 8000 and print messages whenever it receives any of the ACP events.
+
+## What's Inside
+
+This template:
+- Sets up a basic ACP server
+- Handles each of the required ACP events with simple print statements
+- Provides a foundation for building more complex agents
+
+## Next Steps
+
+For more advanced agent development, check out the AgentEx tutorials:
+
+- **Tutorials 00-08**: Learn about building synchronous agents with ACP
+- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents
+ - Tutorial 09: Basic Temporal workflow setup
+ - Tutorial 10: Advanced Temporal patterns and best practices
+
+These tutorials will help you understand:
+- How to handle long-running tasks
+- Implementing state machines
+- Managing complex workflows
+- Best practices for async agent development
+
+## The Manifest File
+
+The `manifest.yaml` file is your agent's configuration file. It defines:
+- How your agent should be built and packaged
+- What files are included in your agent's Docker image
+- Your agent's name and description
+- Local development settings (like the port your agent runs on)
+
+This file is essential for both local development and deployment of your agent.
+
+## Project Structure
+
+```
+040_other_sdks/
+├── project/ # Your agent's code
+│ ├── __init__.py
+│ └── acp.py # ACP server and event handlers
+├── Dockerfile # Container definition
+├── manifest.yaml # Deployment config
+└── requirements.txt # Dependencies
+```
+
+## Development
+
+1. **Customize Event Handlers**
+ - Modify the handlers in `acp.py` to implement your agent's logic
+ - Add your own tools and capabilities
+ - Implement custom state management
+
+2. **Add Dependencies**
+ - Add required packages to `requirements.txt`
+ - Update the manifest with any needed credentials
+
+## Local Development
+
+1. **Install AgentEx**
+```bash
+cd agentex-py
+uv venv
+source .venv/bin/activate
+uv sync
+```
+
+2. **Deploy your agent**
+```bash
+# From this directory
+export ENVIRONMENT=development && agentex agents create --manifest manifest.yaml
+```
+
+4. **Interact with your agent**
+
+Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!)
+```bash
+# Submit a task via CLI
+agentex tasks submit --agent {{ agent_name }} --task "Your task here"
+```
+
+Option 1: Web UI
+```bash
+# Start the local web interface
+cd agentex-web
+make dev
+
+# Then open http://localhost:3000 in your browser to chat with your agent
+```
+
+## Development Tips
+
+1. **Local Testing**
+- Set environment variables in project/.env for any required credentials. OR you can set these in the manifest.yaml
+
+### To build the agent Docker image locally (normally not necessary):
+
+1. First, set up CodeArtifact authentication:
+```bash
+../../setup-build-codeartifact.sh
+```
+
+2. Build the agent image:
+```bash
+agentex agents build --manifest manifest.yaml --secret 'id=codeartifact-pip-conf,src=.codeartifact-pip-conf'
+```
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/040_other_sdks/manifest.yaml b/examples/tutorials/10_agentic/00_base/040_other_sdks/manifest.yaml
new file mode 100644
index 000000000..8695ab7d4
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/040_other_sdks/manifest.yaml
@@ -0,0 +1,119 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 040_other_sdks
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 040_other_sdks/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 040_other_sdks/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: agentic
+
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab040-other-sdks
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that uses other SDKs to show the flexibilty that agents are just code
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "ab040-other-sdks"
+ description: "An AgentEx agent that uses other SDKs to show the flexibilty that agents are just code"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/040_other_sdks/project/__init__.py b/examples/tutorials/10_agentic/00_base/040_other_sdks/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/10_agentic/00_base/040_other_sdks/project/acp.py b/examples/tutorials/10_agentic/00_base/040_other_sdks/project/acp.py
new file mode 100644
index 000000000..9b149630c
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/040_other_sdks/project/acp.py
@@ -0,0 +1,376 @@
+import os
+from typing import Dict, List, Optional
+from contextlib import AsyncExitStack, asynccontextmanager
+import json
+
+from agentex.lib import adk
+from agentex.lib.core.services.adk.streaming import StreamingTaskMessageContext
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import CancelTaskParams, CreateTaskParams, SendEventParams
+from agentex.lib.types.fastacp import AgenticACPConfig
+from agentex.lib.types.task_message_updates import (
+ StreamTaskMessageDelta,
+ StreamTaskMessageFull,
+ TextDelta,
+)
+from agentex.lib.utils.logging import make_logger
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.types.text_content import TextContent
+from agentex.types.task_message_content import ToolRequestContent, ToolResponseContent
+
+from agents import Agent, Runner
+from agents.mcp import MCPServerStdio
+from mcp import StdioServerParameters
+from openai.types.responses import (
+ ResponseCompletedEvent,
+ ResponseFunctionToolCall,
+ ResponseOutputItemDoneEvent,
+ ResponseTextDeltaEvent,
+)
+from pydantic import BaseModel
+
+logger = make_logger(__name__)
+
+
+# Create an ACP server
+
+# !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+acp = FastACP.create(
+ acp_type="agentic",
+ config=AgenticACPConfig(type="base"),
+)
+
+class StateModel(BaseModel):
+ input_list: List[dict]
+ turn_number: int
+
+
+MCP_SERVERS = [
+ StdioServerParameters(
+ command="npx",
+ args=["-y", "@modelcontextprotocol/server-sequential-thinking"],
+ ),
+ StdioServerParameters(
+ command="uvx",
+ args=["openai-websearch-mcp"],
+ env={
+ "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY", "")
+ }
+ ),
+]
+
+
+@acp.on_task_create
+async def handle_task_create(params: CreateTaskParams):
+ # Upon task creation, we initialize the task state with a system message.
+ # This will be fetched by the `on_task_event_send` handler when each event is sent.
+ state = StateModel(
+ input_list=[],
+ turn_number=0,
+ )
+ await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+
+@acp.on_task_event_send
+async def handle_event_send(params: SendEventParams):
+ # !!! Warning: Because "Agentic" ACPs are designed to be fully asynchronous, race conditions can occur if parallel events are sent. It is highly recommended to use the "temporal" type in the AgenticACPConfig instead to handle complex use cases. The "base" ACP is only designed to be used for simple use cases and for learning purposes.
+
+ if not params.event.content:
+ return
+
+ if params.event.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.event.content.type}")
+
+ if params.event.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.event.content.author}")
+
+
+ # Retrieve the task state. Each event is handled as a new turn, so we need to get the state for the current turn.
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+ state = StateModel.model_validate(task_state.state)
+ state.turn_number += 1
+
+ # Add the new user message to the message history
+ state.input_list.append({"role": "user", "content": params.event.content.content})
+
+ async with adk.tracing.span(
+ trace_id=params.task.id,
+ name=f"Turn {state.turn_number}",
+ input=state
+ ) as span:
+ # Echo back the user's message so it shows up in the UI. This is not done by default so the agent developer has full control over what is shown to the user.
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=params.event.content,
+ parent_span_id=span.id if span else None,
+ )
+
+ if not os.environ.get("OPENAI_API_KEY"):
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=TextContent(
+ author="agent",
+ content="Hey, sorry I'm unable to respond to your message because you're running this example without an OpenAI API key. Please set the OPENAI_API_KEY environment variable to run this example. Do this by either by adding a .env file to the project/ directory or by setting the environment variable in your terminal.",
+ ),
+ parent_span_id=span.id if span else None,
+ )
+
+ #########################################################
+ # (👋) Call an LLM to respond to the user's message using custom streaming
+ #########################################################
+
+ # This demonstrates advanced streaming patterns using adk.streaming.
+ # We'll show two different streaming approaches:
+ # 1. Simple streaming with context managers for complete messages (tool calls)
+ # 2. Delta-based streaming for incremental text responses
+ run_result = await run_openai_agent_with_custom_streaming(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ input_list=state.input_list,
+ mcp_server_params=MCP_SERVERS,
+ agent_name="Tool-Enabled Assistant",
+ agent_instructions="""You are a helpful assistant that can answer questions using various tools.
+ You have access to sequential thinking and web search capabilities through MCP servers.
+ Use these tools when appropriate to provide accurate and well-reasoned responses.""",
+ parent_span_id=span.id if span else None,
+ )
+
+ state.input_list = run_result.to_input_list()
+
+ # Store the messages in the task state for the next turn
+ await adk.state.update(
+ state_id=task_state.id,
+ task_id=params.task.id,
+ agent_id=params.agent.id,
+ state=state,
+ trace_id=params.task.id,
+ parent_span_id=span.id if span else None,
+ )
+
+ # Set the span output to the state for the next turn
+ span.output = state
+
+@acp.on_task_cancel
+async def handle_task_cancel(params: CancelTaskParams):
+ """Default task cancel handler"""
+ logger.info(f"Task canceled: {params.task}")
+
+
+########################################################
+# Helper functions that integrate Agentex primitives with other SDKs like OpenAI Agents
+########################################################
+
+
+@asynccontextmanager
+async def mcp_server_context(mcp_server_params: list[StdioServerParameters]):
+ """Context manager for MCP servers."""
+ servers = []
+ for params in mcp_server_params:
+ server = MCPServerStdio(
+ name=f"Server: {params.command}",
+ params=params.model_dump(),
+ cache_tools_list=True,
+ client_session_timeout_seconds=60,
+ )
+ servers.append(server)
+
+ async with AsyncExitStack() as stack:
+ for server in servers:
+ await stack.enter_async_context(server)
+ yield servers
+
+
+def redact_mcp_server_params(
+ mcp_server_params: list[StdioServerParameters],
+) -> list[StdioServerParameters]:
+ """Redact MCP server params."""
+ return [
+ StdioServerParameters(
+ **{k: v for k, v in server_param.model_dump().items() if k != "env"},
+ env={k: "********" for k in server_param.env} if server_param.env else None,
+ )
+ for server_param in mcp_server_params
+ ]
+
+
+async def run_openai_agent_with_custom_streaming(
+ task_id: str,
+ trace_id: str,
+ input_list: list[Dict],
+ mcp_server_params: list[StdioServerParameters],
+ agent_name: str,
+ agent_instructions: str,
+ parent_span_id: Optional[str] = None,
+):
+ """
+ Run an OpenAI agent with custom streaming using adk.streaming.
+
+ This demonstrates advanced streaming patterns using adk.streaming.
+ We'll show two different streaming approaches:
+ 1. Simple streaming with context managers for complete messages (tool calls)
+ 2. Delta-based streaming for incremental text responses
+ """
+
+ tool_call_map: Dict[str, ResponseFunctionToolCall] = {}
+
+ redacted_mcp_server_params = redact_mcp_server_params(mcp_server_params)
+
+ result = None
+ async with adk.tracing.span(
+ trace_id=trace_id,
+ name="run_agent_with_custom_streaming",
+ input={
+ "input_list": input_list,
+ "mcp_server_params": redacted_mcp_server_params,
+ "agent_name": agent_name,
+ "agent_instructions": agent_instructions,
+ },
+ parent_id=parent_span_id,
+ ) as span:
+ async with mcp_server_context(mcp_server_params) as servers:
+ agent = Agent(
+ name=agent_name,
+ instructions=agent_instructions,
+ mcp_servers=servers,
+ )
+
+ # Run with streaming enabled
+ result = Runner.run_streamed(starting_agent=agent, input=input_list)
+
+ #########################################################
+ # (👋) For complete messages like tool calls we will use a with block to create a streaming context, but for text deltas we will use a streaming context that is created and closed manually. To make sure we close all streaming contexts we will track the item_id and close them all at the end.
+ #########################################################
+
+ item_id_to_streaming_context: Dict[str, StreamingTaskMessageContext] = {}
+ unclosed_item_ids: set[str] = set()
+
+ try:
+ # Process streaming events with TaskMessage creation
+ async for event in result.stream_events():
+
+ if event.type == "run_item_stream_event":
+ if event.item.type == "tool_call_item":
+ tool_call_item = event.item.raw_item
+ tool_call_map[tool_call_item.call_id] = tool_call_item
+
+ logger.info(f"Tool call item: {tool_call_item}")
+
+ tool_request_content = ToolRequestContent(
+ author="agent",
+ tool_call_id=tool_call_item.call_id,
+ name=tool_call_item.name,
+ arguments=json.loads(tool_call_item.arguments),
+ )
+
+ # (👋) Create a streaming context for the tool call
+ # Since a tool call is a complete message, we can use a with block to create a streaming context. This will take care of creating a TaskMessage, sending a START event, and sending a DONE event when the context is closed. Of course you will also want to stream the content of the tool call so clients that are subscribed to streaming updates to the task will see the tool call.
+ async with adk.streaming.streaming_task_message_context(
+ task_id=task_id,
+ initial_content=tool_request_content,
+ ) as streaming_context:
+ # The message has already been persisted, but we still need to send an upda
+ await streaming_context.stream_update(
+ update=StreamTaskMessageFull(
+ parent_task_message=streaming_context.task_message,
+ content=tool_request_content,
+ content_type=tool_request_content.type,
+ ),
+ )
+
+ elif event.item.type == "tool_call_output_item":
+ tool_output_item = event.item.raw_item
+
+ tool_response_content = ToolResponseContent(
+ author="agent",
+ tool_call_id=tool_output_item["call_id"],
+ name=tool_call_map[tool_output_item["call_id"]].name,
+ content=tool_output_item["output"],
+ )
+
+ # (👋) Create a streaming context for the tool call output
+ # Since a tool call output is a complete message, we can use a with block to create a streaming context. This will take care of creating a TaskMessage, sending a START event, and sending a DONE event when the context is closed. Of course you will also want to stream the content of the tool call output so clients that are subscribed to streaming updates to the task will see the tool call output.
+ async with adk.streaming.streaming_task_message_context(
+ task_id=task_id,
+ initial_content=tool_response_content,
+ ) as streaming_context:
+ # The message has already been persisted, but we still need to send an update
+ await streaming_context.stream_update(
+ update=StreamTaskMessageFull(
+ parent_task_message=streaming_context.task_message,
+ content=tool_response_content,
+ content_type=tool_response_content.type,
+ ),
+ )
+
+ elif event.type == "raw_response_event":
+ if isinstance(event.data, ResponseTextDeltaEvent):
+ # Handle text delta
+ item_id = event.data.item_id
+
+ # (👋) Create a streaming context for the text delta
+ # Since a text delta is a partial message, we will create a streaming context manually without a with block because we need to persist the context across the for loop.
+ if item_id not in item_id_to_streaming_context:
+ streaming_context = adk.streaming.streaming_task_message_context(
+ task_id=task_id,
+ initial_content=TextContent(
+ author="agent",
+ content="",
+ ),
+ )
+ # (👋) Open the streaming context manually
+ # This will create a TaskMessage and send a START event for you.
+ item_id_to_streaming_context[item_id] = await streaming_context.open()
+
+ # (👋) Add the item_id to the set of unclosed item_ids
+ # This will allow us to close any lingering streaming context when the agent is done.
+ unclosed_item_ids.add(item_id)
+ else:
+ streaming_context = item_id_to_streaming_context[item_id]
+
+ # (👋) Stream the delta through the streaming service
+ # This will send a DELTA event. The context manager will accumulate the content for you into a final message when you close the context.
+ await streaming_context.stream_update(
+ update=StreamTaskMessageDelta(
+ parent_task_message=streaming_context.task_message,
+ delta=TextDelta(text_delta=event.data.delta),
+ ),
+ )
+
+ elif isinstance(event.data, ResponseOutputItemDoneEvent):
+ # Handle item completion
+ item_id = event.data.item.id
+
+ # (👋) Close the streaming context
+ # This will send a DONE event and update the persisted message.
+ if item_id in item_id_to_streaming_context:
+ streaming_context = item_id_to_streaming_context[item_id]
+ await streaming_context.close()
+ unclosed_item_ids.remove(item_id)
+
+ elif isinstance(event.data, ResponseCompletedEvent):
+ # (👋) Close all remaining streaming contexts
+ # This will send a DONE event and update the persisted messages for all remaining streaming contents. Normally this won't be needed if all messages are closed by the time the agent is done.
+ for item_id in unclosed_item_ids:
+ streaming_context = item_id_to_streaming_context[item_id]
+ await streaming_context.close()
+ unclosed_item_ids.remove(item_id)
+
+ finally:
+ # (👋) Close all remaining streaming contexts
+ # This will send a DONE event and update the persisted messages for all remaining streaming contents. Normally this won't be needed, but we do it in case any errors occur.
+ for item_id in unclosed_item_ids:
+ streaming_context = item_id_to_streaming_context[item_id]
+ await streaming_context.close()
+ unclosed_item_ids.remove(item_id)
+ if span:
+ span.output = {
+ "new_items": [
+ item.raw_item.model_dump()
+ if isinstance(item.raw_item, BaseModel)
+ else item.raw_item
+ for item in result.new_items
+ ],
+ "final_output": result.final_output,
+ }
+ return result
diff --git a/examples/tutorials/10_agentic/00_base/040_other_sdks/requirements.txt b/examples/tutorials/10_agentic/00_base/040_other_sdks/requirements.txt
new file mode 100644
index 000000000..d12ad3951
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/040_other_sdks/requirements.txt
@@ -0,0 +1,4 @@
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/050_hello_oldowan/.dockerignore b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/.dockerignore
new file mode 100644
index 000000000..c3620f1bc
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/050_hello_oldowan/Dockerfile b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/Dockerfile
new file mode 100644
index 000000000..aca315ae6
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 050_hello_oldowan/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 050_hello_oldowan/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/050_hello_oldowan/README.md b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/README.md
new file mode 100644
index 000000000..d8c8c12ce
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/README.md
@@ -0,0 +1,17 @@
+# Hello Oldowan Agent
+
+This is a simple example agent that demonstrates the basics of the Agent 2 Client Protocol (ACP) and the AgentEx framework with an integration to oldowan.
+
+## For Development
+Navigate to `tutorials/10_agentic/00_base/050_hello_oldowan`
+
+```bash
+# Generate CodeArtifact configuration for building (run from repo root)
+./setup-build-codeartifact.sh
+
+# Set up local development environment
+uv venv --python 3.12
+source .venv/bin/activate
+
+uv pip install -r requirements.txt --prerelease=allow
+```
diff --git a/examples/tutorials/10_agentic/00_base/050_hello_oldowan/manifest.yaml b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/manifest.yaml
new file mode 100644
index 000000000..addbb6689
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/manifest.yaml
@@ -0,0 +1,116 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 050_hello_oldowan
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 050_hello_oldowan/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 050_hello_oldowan/.dockerignore
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: agentic
+
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab050-hello-oldowan
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that uses Oldowan to show the flexibilty that agents are just code
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "ab050-hello-oldowan"
+ description: "An AgentEx agent that uses Oldowan to show the flexibilty that agents are just code"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
diff --git a/examples/tutorials/10_agentic/00_base/050_hello_oldowan/project/__init__.py b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/10_agentic/00_base/050_hello_oldowan/project/acp.py b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/project/acp.py
new file mode 100644
index 000000000..ad9acc06c
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/project/acp.py
@@ -0,0 +1,435 @@
+import os
+import json
+from typing import Callable, List, Union, Dict
+from functools import partial
+import logging
+
+logger = logging.getLogger(__name__)
+
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import SendEventParams, CreateTaskParams, CancelTaskParams
+from agentex.lib.types.fastacp import AgenticACPConfig
+from agentex.lib.types.task_message_updates import StreamTaskMessageDelta, StreamTaskMessageFull, TaskMessageUpdate
+from agentex.lib.types.task_message_updates import DeltaType, TextDelta, ToolResponseDelta
+from agentex.lib.core.services.adk.streaming import StreamingTaskMessageContext
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.types.span import Span
+from agentex.lib.types.llm_messages import Message, UserMessage
+from agentex.types.text_content import TextContent
+from agentex.types.task_message_content import TaskMessageContent, ToolRequestContent, ToolResponseContent
+
+from oldowan.tools.internal import google_search
+from oldowan.completions import ToolMessage, ChatCompletionMessage, simple_agent_acompletion, ChoiceDelta
+
+assert os.environ.get("SGP_API_KEY") is not None, "SGP_API_KEY is not set"
+assert os.environ.get("SGP_ACCOUNT_ID") is not None, "SGP_ACCOUNT_ID is not set"
+
+def think(thinking_str: str):
+ """
+ Use the tool to think about something. It will not obtain new information or change the database, but just append the thought to the log. Use it when complex reasoning or some cache memory is needed.
+ Args:
+ thinking_str: A thought to think about.
+ """
+ return
+
+TOOL_DICT = {
+ "google_search": google_search,
+ "think": think,
+}
+
+# Create an ACP server
+acp = FastACP.create(
+ acp_type="agentic",
+ config=AgenticACPConfig(type="base"),
+)
+
+TOOL_RESPONSE_ID_SUFFIX = "_response"
+
+class SimpleAgentCompletionConfig(BaseModel):
+ model: str
+ tools: List[str]
+ max_tokens: int
+ stream: bool
+
+class StateModel(BaseModel):
+ turn_number: int # The number of turns the agent has taken
+ messages: List[Message] # The messages the agent has seen
+ simple_agent_completion_config: SimpleAgentCompletionConfig # The function to call to get an agent response
+
+def convert_choice_delta_to_stream_task_message_deltas(choice_delta: ChoiceDelta, parent_task_message: TaskMessage) -> List[StreamTaskMessageDelta]:
+ """
+ This function converts a ChoiceDelta to a list of StreamTaskMessageDelta objects.
+ Args:
+ choice_delta: The ChoiceDelta to convert.
+ parent_task_message: The parent task message.
+ Returns:
+ A list of StreamTaskMessageDelta objects.
+ """
+ # these are tool requests
+ deltas = []
+ if choice_delta.tool_calls is not None and choice_delta.tool_calls[0].function.name is not None:
+
+ for tool_call in choice_delta.tool_calls:
+ # print(tool_call)
+ # don't stream tool calls yet.
+ # deltas.append(StreamTaskMessageDelta(
+ # index=idx,
+ # content_type=TaskMessageContentType.TOOL_REQUEST,
+ # delta='', # tool_call.function.arguments
+ # ))
+ pass
+ # These are tool responses
+ elif choice_delta.role == "tool":
+ deltas.append(StreamTaskMessageDelta(
+ parent_task_message=parent_task_message,
+ delta=ToolResponseDelta(
+ type=DeltaType.TOOL_RESPONSE,
+ tool_call_id=choice_delta.tool_call_id,
+ name=choice_delta.name,
+ content_delta=choice_delta.content,
+ ),
+ ))
+
+ # These are assistant messages
+ elif choice_delta.content is not None:
+ deltas.append(StreamTaskMessageDelta(
+ parent_task_message=parent_task_message,
+ delta=TextDelta(
+ type=DeltaType.TEXT,
+ text_delta=choice_delta.content,
+ ),
+ ))
+
+ return deltas
+
+def convert_choice_delta_to_message_content(choice_delta: ChoiceDelta) -> TaskMessageContent:
+ """
+ This function converts a ChoiceDelta to a TaskMessageContent object.
+ Args:
+ choice_delta: The ChoiceDelta to convert.
+ Returns:
+ A TaskMessageContent object.
+ """
+ # This converts a ChoiceDelta to a TaskMessage which will instantiate "the box" to send to client
+ if choice_delta.tool_calls is not None:
+ # since we are streaming we can assume we onl need to create a message for the first tool call
+ return ToolRequestContent(
+ author="agent",
+ name=choice_delta.tool_calls[0].function.name,
+ tool_call_id=choice_delta.tool_calls[0].id,
+ arguments={}, # have to start this empty since we are streaming
+ )
+ elif choice_delta.role == "tool":
+ return ToolResponseContent(
+ author="agent",
+ name=choice_delta.name,
+ tool_call_id=choice_delta.tool_call_id,
+ content='', # starting empty because we add to it
+ )
+ elif choice_delta.role == "assistant":
+ return TextContent(
+ author="agent",
+ content='', # starting empty because we add to it
+ )
+ raise ValueError(f"Unknown role: {choice_delta.role}. Failed to convert to TaskMessage")
+
+async def convert_oldowan_message_to_stream_task_message_full(
+ id_to_streaming_context: Dict[str, StreamingTaskMessageContext],
+ oldowan_message: Union[ChatCompletionMessage, ToolMessage],
+ ) -> List[StreamTaskMessageFull]:
+ """
+ This function converts an Oldowan message to a list of StreamTaskMessageFull objects.
+ Args:
+ task_messages: A dictionary of task messages.
+ task_id: The task id.
+ oldowan_message: The Oldowan message to convert.
+ Returns:
+ A list of StreamTaskMessageFull objects.
+ """
+
+ if isinstance(oldowan_message, ChatCompletionMessage):
+ # First create all tool calls
+ if oldowan_message.tool_calls is not None:
+ for tool_call in oldowan_message.tool_calls:
+ task_message_full = StreamTaskMessageFull(
+ parent_task_message=id_to_streaming_context[tool_call.id].task_message,
+ content=ToolRequestContent(
+ author="agent",
+ name=tool_call.function.name,
+ tool_call_id=tool_call.id,
+ arguments=json.loads(tool_call.function.arguments),
+ ),
+ )
+ await id_to_streaming_context[tool_call.id].stream_update(
+ update=task_message_full,
+ )
+
+
+ # Create the assistant messages
+ if oldowan_message.content is not None:
+ task_message_full = StreamTaskMessageFull(
+ parent_task_message=id_to_streaming_context[oldowan_message.id].task_message,
+ content=TextContent(
+ author="agent",
+ content=oldowan_message.content,
+ ),
+ )
+ await id_to_streaming_context[oldowan_message.id].stream_update(
+ update=task_message_full,
+ )
+
+ # Finally create the tool responses
+ elif isinstance(oldowan_message, ToolMessage):
+ task_message_full = StreamTaskMessageFull(
+ parent_task_message=id_to_streaming_context[oldowan_message.tool_call_id + TOOL_RESPONSE_ID_SUFFIX].task_message,
+ content=ToolResponseContent(
+ author="agent",
+ name=oldowan_message.name,
+ content=oldowan_message.content,
+ tool_call_id=oldowan_message.tool_call_id,
+ ),
+ )
+ await id_to_streaming_context[oldowan_message.tool_call_id + TOOL_RESPONSE_ID_SUFFIX].stream_update(
+ update=task_message_full,
+ )
+
+def get_oldowan_message_ids(oldowan_message: Union[ChatCompletionMessage, ToolMessage]) -> List[str]:
+ """
+ This function gets the ids of the oldowan message.
+ Args:
+ oldowan_message: The Oldowan message to get the ids of.
+ Returns:
+ A list of ids.
+ """
+ message_ids = []
+ if isinstance(oldowan_message, ChatCompletionMessage):
+ # check that there is content
+ if oldowan_message.content is not None:
+ message_ids.append(oldowan_message.id)
+
+ # check if there are tool calls
+ if oldowan_message.tool_calls is not None:
+ for tool_call in oldowan_message.tool_calls:
+ message_ids.append(tool_call.id)
+
+ elif isinstance(oldowan_message, ToolMessage):
+ message_ids.append(oldowan_message.tool_call_id + TOOL_RESPONSE_ID_SUFFIX)
+
+ return message_ids
+
+# This will eventually become adk.providers.oldowan.stream_agent_async_auto_send
+async def stream_oldowan_agent_async_auto_send(messages: List[Message], task_id: str, span: Span, simple_agent_acompletion_fn: Callable) -> List[Message]:
+ """
+ Stream an Oldowan agent response to the client.
+ Args:
+ messages: The messages to send to the agent.
+ task_id: The task id.
+ span: The span to use for tracing.
+ Returns:
+ AsyncGenerator[TaskMessageUpdate, None]: A generator of task message updates.
+ """
+ response_stream = await simple_agent_acompletion_fn(messages=messages)
+
+ # This is used to create the current TaskMessage object
+ cur_task_message_id = None
+
+ # This maps id either from message object, tool_call, or tool_response to the TaskMessage object
+ id_to_streaming_context = {}
+
+ # These are messages that have already been sent in "full"
+ persisted_messages = []
+ events = []
+
+ # These are ChoiceDelta objects
+ async for event in response_stream:
+ print(event)
+ if event.role is not None:
+ # if there is a tool call made then check if its a new tool_call_id
+ if event.tool_calls is not None and event.tool_calls[0].id is not None and event.tool_calls[0].id not in id_to_streaming_context:
+ print(f"Role changed: {event.role}")
+ print(f"Tool call id changed: {event.tool_calls[0].id}")
+ cur_task_message_id = event.tool_calls[0].id
+ streaming_context = adk.streaming.streaming_task_message_context(
+ task_id=task_id,
+ initial_content=convert_choice_delta_to_message_content(event),
+ )
+ id_to_streaming_context[event.tool_calls[0].id] = await streaming_context.open()
+ print(f"Created streaming context for tool call: {id_to_streaming_context[event.tool_calls[0].id].task_message}")
+
+
+ # If you are in a tool response, you should check that either the tool_call_id has changed or your last type was not tool
+ elif event.role == "tool" and (event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX not in id_to_streaming_context):
+ print(f"Role changed: {event.role}")
+ print(f"Tool Response id: {event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX}")
+ cur_task_message_id = event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX
+ streaming_context = adk.streaming.streaming_task_message_context(
+ task_id=task_id,
+ initial_content=convert_choice_delta_to_message_content(event),
+ )
+ id_to_streaming_context[event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX] = await streaming_context.open()
+ print(f"Created streaming context for tool response: {id_to_streaming_context[event.tool_call_id + TOOL_RESPONSE_ID_SUFFIX].task_message}")
+
+
+ elif event.role == "assistant" and event.content is not None and event.id not in id_to_streaming_context: # this is an assistant message
+ print(f"Role is: {event.role}")
+ assert hasattr(event, "id"), "Event does not have an id, please upgrade to latest oldowan"
+ print(f"Event id: {event.id}")
+ cur_task_message_id = event.id
+ streaming_context = adk.streaming.streaming_task_message_context(
+ task_id=task_id,
+ initial_content=convert_choice_delta_to_message_content(event),
+ )
+ id_to_streaming_context[event.id] = await streaming_context.open()
+ print(f"Created streaming context for assistant message: {id_to_streaming_context[event.id].task_message}")
+
+
+
+ # Now we can create the items to stream
+ # NOTE: key assumption is that ChoiceDeltaToolCall can only apply to one tool call at a time.
+ for task_message_delta in convert_choice_delta_to_stream_task_message_deltas(event, parent_task_message=id_to_streaming_context[cur_task_message_id].task_message):
+ streaming_context = id_to_streaming_context[cur_task_message_id]
+ await streaming_context.stream_update(
+ update=task_message_delta,
+ )
+
+ events.append(event)
+
+ # Issue is that we can either have an oldowan message before a task message has been created OR task message before the oldowan message
+ # this is because tool response messages are added to messages immediately, but streamed one after the other.
+ # For each oldowan message, if we haven't persisted it yet, then do so
+ for idx, oldowan_message in enumerate(response_stream.messages):
+ if oldowan_message not in persisted_messages and all([id in id_to_streaming_context for id in get_oldowan_message_ids(oldowan_message)]):
+ async with adk.tracing.span(
+ trace_id=task_id,
+ parent_id=span.id,
+ name=f"Message {idx}",
+ input=messages + response_stream.messages[:idx], # input messages to this message
+ ) as message_span:
+ message_span.output = oldowan_message
+
+ # Send the full messages now that they are done
+ await convert_oldowan_message_to_stream_task_message_full(
+ id_to_streaming_context=id_to_streaming_context,
+ oldowan_message=oldowan_message
+ )
+
+ print(f"Persisted message: {oldowan_message}")
+ persisted_messages.append(oldowan_message)
+
+ # Stream the last object
+ async with adk.tracing.span(
+ trace_id=task_id,
+ parent_id=span.id,
+ name=f"Message {len(response_stream.messages)}",
+ input=messages + response_stream.messages[:-1],
+ ) as message_span:
+ message_span.output = response_stream.messages[-1]
+
+ # Persist the last message to the DB
+ await convert_oldowan_message_to_stream_task_message_full(
+ id_to_streaming_context=id_to_streaming_context,
+ oldowan_message=response_stream.messages[-1]
+ )
+ print(f"Persisted message: {response_stream.messages[-1]}")
+ persisted_messages.append(response_stream.messages[-1])
+
+ # Close all the streaming contexts
+ for streaming_context in id_to_streaming_context.values():
+ if not streaming_context._is_closed:
+ print(f"Closing streaming context for message ID: {streaming_context.task_message.id}. Is closed: {streaming_context._is_closed}")
+ await streaming_context.close()
+
+ # Aggregate the messages and store the output
+ return response_stream.messages
+
+
+@acp.on_task_create
+async def handle_task_create(params: CreateTaskParams):
+ # Upon task creation, we initialize the task state with a system message.
+ # This will be fetched by the `on_task_event_send` handler when each event is sent.
+ state = StateModel(
+ simple_agent_completion_config=SimpleAgentCompletionConfig(
+ model="openai/gpt-4o",
+ tools=["google_search", "think"],
+ max_tokens=8192,
+ stream=True,
+ ),
+ messages=[],
+ turn_number=0,
+ )
+ assert all([tool in TOOL_DICT for tool in state.simple_agent_completion_config.tools]), f"Invalid tool: {state.simple_agent_completion_config.tools}"
+ await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+
+# Note: The return of this handler is required to be persisted by the Agentex Server
+@acp.on_task_event_send
+async def handle_message_send(params: SendEventParams):
+ #########################################################
+ # 1-3. These steps are all the same as the hello acp tutorial.
+ #########################################################
+
+ if not params.event.content:
+ return
+
+ if params.event.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.event.content.type}")
+
+ if params.event.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.event.content.author}")
+
+ print(f"Received event: {params.event.content}")
+ await adk.messages.create(
+ task_id=params.task.id,
+ trace_id=params.task.id,
+ content=params.event.content,
+ )
+
+ # Try to retrieve the state. If it doesn't exist, create it.
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+ state = StateModel.model_validate(task_state.state)
+
+ messages = state.messages
+
+ #########################################################
+ # 4. Call an LLM to respond to the user's message and stream the response to the client.
+ #########################################################
+ print(f"Calling LLM with model {state.simple_agent_completion_config.model_dump_json()} and messages {messages}")
+
+ # Add the user's message to the conversation history
+ state.messages.append(UserMessage(content=params.event.content.content))
+
+ # The Agentex server automatically commits input and output messages to the database so you don't need to do this yourself, simply process the input content and return the output content.
+ async with adk.tracing.span(
+ trace_id=params.task.id,
+ name=f"Turn {state.turn_number}",
+ input=state,
+ ) as span:
+ simple_agent_completion_fn = partial(
+ simple_agent_acompletion,
+ model=state.simple_agent_completion_config.model,
+ tools=[TOOL_DICT[tool] for tool in state.simple_agent_completion_config.tools],
+ max_tokens=state.simple_agent_completion_config.max_tokens,
+ stream=state.simple_agent_completion_config.stream,
+ )
+
+ # Stream the response and collect the generated messages
+ messages = await stream_oldowan_agent_async_auto_send(messages=messages, task_id=params.task.id, span=span, simple_agent_acompletion_fn=simple_agent_completion_fn)
+
+ # The generated messages are accessible from the span output
+ state.messages.extend(messages)
+
+ state.turn_number += 1
+
+ # Update the state with the new messages
+ await adk.state.update(
+ task_id=params.task.id,
+ agent_id=params.agent.id,
+ state_id=task_state.id,
+ state=state,
+ trace_id=params.task.id,
+ )
+
+@acp.on_task_cancel
+async def handle_task_cancel(params: CancelTaskParams):
+ """Default task cancel handler"""
+ logger.info(f"Task canceled: {params.task}")
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/050_hello_oldowan/requirements.txt b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/requirements.txt
new file mode 100644
index 000000000..7898056cd
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/050_hello_oldowan/requirements.txt
@@ -0,0 +1,6 @@
+# Install agentex-py from local path
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
+scale-oldowan==0.3.17
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/060_hello_egp_services/.dockerignore b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/.dockerignore
new file mode 100644
index 000000000..c3620f1bc
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/.dockerignore
@@ -0,0 +1,46 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
+
+# CodeArtifact configuration
+.codeartifact-pip-conf
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/060_hello_egp_services/Dockerfile b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/Dockerfile
new file mode 100644
index 000000000..112b41a4e
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 060_hello_egp_services/requirements.txt /app/requirements.txt
+
+WORKDIR /app/
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 060_hello_egp_services/project /app/project
+
+WORKDIR /app/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/060_hello_egp_services/README.md b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/README.md
new file mode 100644
index 000000000..b194e9ab8
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/README.md
@@ -0,0 +1,17 @@
+# Hello EGP Services Agent
+
+This is a simple example agent that demonstrates the basics of the Agent 2 Client Protocol (ACP) and the AgentEx framework with an integration to EGP Services.
+
+## For Development
+Navigate to `tutorials/10_agentic/00_base/060_hello_egp_services`
+
+```bash
+# Generate CodeArtifact configuration for building (run from repo root)
+./setup-build-codeartifact.sh
+
+# Set up local development environment
+uv venv --python 3.12
+source .venv/bin/activate
+
+uv pip install -r requirements.txt --prerelease=allow
+```
diff --git a/examples/tutorials/10_agentic/00_base/060_hello_egp_services/manifest.yaml b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/manifest.yaml
new file mode 100644
index 000000000..5269bc918
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/manifest.yaml
@@ -0,0 +1,116 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - 060_hello_egp_services
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 060_hello_egp_services/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 060_hello_egp_services/.dockerignore
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: agentic
+
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab060-hello-egp-services
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent that uses EGP Services to show the flexibilty that agents are just code
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # - name: OPENAI_BASE_URL
+ # value: "https://api.openai.com/v1"
+ # - name: ACCOUNT_ID
+ # value: "your_account_id_here"
+
+# Deployment Configuration
+# -----------------------
+# Configuration for deploying your agent to Kubernetes clusters
+deployment:
+ # Container image configuration
+ image:
+ repository: "" # Update with your container registry
+ tag: "latest" # Default tag, should be versioned in production
+
+ # Global deployment settings that apply to all clusters
+ # These can be overridden in cluster-specific files (deploy/*.yaml)
+ global:
+ agent:
+ name: "ab060-hello-egp-services"
+ description: "An AgentEx agent that uses EGP Services to show the flexibilty that agents are just code"
+
+ # Default replica count
+ replicaCount: 1
+
+ # Default resource requirements
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "1Gi"
+ limits:
+ cpu: "1000m"
+ memory: "2Gi"
diff --git a/examples/tutorials/10_agentic/00_base/060_hello_egp_services/project/__init__.py b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/examples/tutorials/10_agentic/00_base/060_hello_egp_services/project/acp.py b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/project/acp.py
new file mode 100644
index 000000000..93e4830ea
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/project/acp.py
@@ -0,0 +1,264 @@
+import os
+import json
+import logging
+from datetime import datetime
+from jinja2 import Template
+from typing import List, Union
+
+from agentex.lib import adk
+from agentex.lib.sdk.fastacp.fastacp import FastACP
+from agentex.lib.types.acp import SendEventParams, CreateTaskParams, CancelTaskParams
+from agentex.lib.types.fastacp import AgenticACPConfig
+from agentex.lib.utils.model_utils import BaseModel
+from agentex.lib.types.llm_messages import Message, UserMessage, SystemMessage
+from agentex.types.text_content import TextContent
+from agentex.types.task_message_content import TaskMessageContent, ToolRequestContent, ToolResponseContent
+
+from egp_services.nodes import ToolGenerationNode, RetrieverNode, ChatGenerationNode
+from egp_services.nodes.generation.tool_generation import ToolConfig
+from oldowan.completions import ToolMessage, ChatCompletionMessage
+
+logger = logging.getLogger(__name__)
+
+assert os.environ.get("SGP_API_KEY") is not None, "SGP_API_KEY is not set"
+assert os.environ.get("SGP_ACCOUNT_ID") is not None, "SGP_ACCOUNT_ID is not set"
+
+# Create an ACP server
+acp = FastACP.create(
+ acp_type="agentic",
+ config=AgenticACPConfig(type="base"),
+)
+
+
+
+TOOL_GENERATION_NODE = ToolGenerationNode(
+ model="openai/gpt-4o",
+ tools=[
+ ToolConfig(name="nodes.ChatGenerationNode", init_kwargs={'llm_model' : 'anthropic/claude-3-7-sonnet-20250219'}),
+ ToolConfig(name="internal.google_search"),
+ ],
+ client_kwargs={"api_key": os.environ.get("SGP_API_KEY"), "account_id": os.environ.get("SGP_ACCOUNT_ID")}
+)
+
+CHAT_GENERATION_NODE = ChatGenerationNode(
+ model="openai/gpt-4o",
+ client_kwargs={"api_key": os.environ.get("SGP_API_KEY"), "account_id": os.environ.get("SGP_ACCOUNT_ID")}
+)
+
+RETRIEVER_NODE = RetrieverNode(
+ knowledge_base_id="bb9095d0-a93a-4353-a9f5-44c54d0060ac",
+ client_kwargs={"api_key": os.environ.get("SGP_API_KEY"), "account_id": os.environ.get("SGP_ACCOUNT_ID")}
+)
+
+CREATE_RETRIEVAL_QUERY_USER_PROMPT = """
+You are a helpful assistant that creates a retrieval query for a knowledge base based on the current state of the conversation.
+
+Here is the current state of the conversation:
+
+{% for message in messages %}
+{{ message.role }}: {{ message.content }}
+{% endfor %}
+
+Now create a retrieval query for the knowledge base.
+"""
+
+TOOL_ENABLED_ASSISTANT_SYSTEM_PROMPT = """
+You are a helpful assistant that uses tools to answer questions.
+
+Here is some context for the conversation:
+
+{% for chunk in chunks %}
+
+Chunk ID: {{ chunk.chunk_id }}
+
+{{ chunk.text }}
+
+{% endfor %}
+
+Good luck!
+"""
+
+TOOL_RESPONSE_ID_SUFFIX = "_response"
+
+class StateModel(BaseModel):
+ turn_number: int # The number of turns the agent has taken
+ messages: List[Message] # The messages the agent has seen
+
+# Converts an egp service message to an agentex task message
+def convert_message_to_task_message(message: Union[ChatCompletionMessage, ToolMessage]) -> List[TaskMessageContent]:
+ task_messages = []
+ if isinstance(message, ChatCompletionMessage):
+ # Always return the ChatCompletionMessage first
+ if message.content is not None:
+ task_messages.append(TextContent(
+ author="agent",
+ content=message.content,
+ ))
+
+ # Then add on the tool calls
+ if message.tool_calls is not None:
+ for tool_call in message.tool_calls:
+ task_messages.append(ToolRequestContent(
+ author="agent",
+ name=tool_call.function.name,
+ arguments=json.loads(tool_call.function.arguments),
+ ))
+
+ # FInally add the Tool REsponse
+ elif isinstance(message, ToolMessage):
+ task_messages.append(ToolResponseContent(
+ author="agent",
+ content=message.content,
+ name=message.name,
+ ))
+ return task_messages
+
+
+async def handle_turn(task_id: str, state: StateModel, content: str):
+ """Shared function for handling a turn in the task"""
+ # Echo back the user's initial message
+ await adk.messages.create(
+ task_id=task_id,
+ content=TextContent(
+ author="user",
+ content=content,
+ ),
+ trace_id=task_id,
+ )
+
+ # Add the user's message to the state
+ state.messages.append(UserMessage(content=content))
+
+ # Create a span for the entire turn
+ async with adk.tracing.span(
+ trace_id=task_id,
+ name=f"Turn {state.turn_number}",
+ input=state,
+ ) as span:
+ # 1. Summarize the current state
+ retrieval_query_messages = [
+ UserMessage(content=Template(CREATE_RETRIEVAL_QUERY_USER_PROMPT).render(messages=state.messages)),
+ ]
+ async with adk.tracing.span(
+ trace_id=task_id,
+ name=f"Create Retrieval Query",
+ parent_id=span.id,
+ input={"retrieval_query_messages": retrieval_query_messages},
+ ) as retrieval_query_span:
+ retrieval_query = CHAT_GENERATION_NODE(
+ messages=retrieval_query_messages,
+ )
+ retrieval_query_span.end_time = datetime.now()
+ retrieval_query_span.output = {"retrieval_query": retrieval_query}
+
+ print(f"Retrieval query about to be sent: {retrieval_query} - class: {type(retrieval_query)} - class name: {type(retrieval_query).__name__}")
+
+ # 2. Do a retrieval function
+ async with adk.tracing.span(
+ trace_id=task_id,
+ name=f"Retrieve Chunks",
+ parent_id=span.id,
+ input={"retrieval_query": retrieval_query},
+ ) as retrieve_chunks_span:
+ chunks = RETRIEVER_NODE(query=retrieval_query.output, num_to_return=2)
+ retrieve_chunks_span.end_time = datetime.now()
+ retrieve_chunks_span.output = {"chunks": chunks}
+
+
+ # 3. Do a tool enabled generation
+ tool_enabled_llm_messages = [
+ SystemMessage(content=Template(TOOL_ENABLED_ASSISTANT_SYSTEM_PROMPT).render(chunks=chunks)),
+ *state.messages,
+ ]
+ # Trace the full node
+ async with adk.tracing.span(
+ trace_id=task_id,
+ name=f"Generate Response",
+ parent_id=span.id,
+ input={"tool_enabled_llm_messages": tool_enabled_llm_messages},
+ ) as generate_response_span:
+ messages = await TOOL_GENERATION_NODE.async_call(
+ messages=tool_enabled_llm_messages,
+ )
+
+ # For each message, trace it and send it to the client
+ for idx, message in enumerate(messages):
+ async with adk.tracing.span(
+ trace_id=task_id,
+ name=f"Message {idx}",
+ parent_id=generate_response_span.id,
+ input={"messages": tool_enabled_llm_messages + messages[:idx]},
+ ) as message_span:
+ task_messages = convert_message_to_task_message(message)
+ for task_message in task_messages:
+ await adk.messages.create(
+ task_id=task_id,
+ content=task_message,
+ trace_id=task_id,
+ parent_span_id=message_span.id,
+ )
+
+ message_span.output = {"message": message}
+
+ generate_response_span.end_time = datetime.now()
+ generate_response_span.output = {"messages": messages}
+
+ # Update the task state with the new messages
+ state.messages.extend(messages)
+ state.turn_number += 1
+
+ span.end_time = datetime.now()
+
+ return state
+
+
+@acp.on_task_create
+async def handle_task_create(params: CreateTaskParams):
+ # Upon task creation, we initialize the task state with a system message.
+ # This will be fetched by the `on_task_event_send` handler when each event is sent.
+ state = StateModel(
+ messages=[],
+ turn_number=0,
+ )
+ await adk.state.create(task_id=params.task.id, agent_id=params.agent.id, state=state)
+
+
+# Note: The return of this handler is required to be persisted by the Agentex Server
+@acp.on_task_event_send
+async def handle_message_send(params: SendEventParams):
+ #########################################################
+ # 1-3. These steps are all the same as the hello acp tutorial.
+ #########################################################
+
+ if not params.event.content:
+ return
+
+ if params.event.content.type != "text":
+ raise ValueError(f"Expected text message, got {params.event.content.type}")
+
+ if params.event.content.author != "user":
+ raise ValueError(f"Expected user message, got {params.event.content.author}")
+
+ # Try to retrieve the state. If it doesn't exist, create it.
+ task_state = await adk.state.get_by_task_and_agent(task_id=params.task.id, agent_id=params.agent.id)
+ state = StateModel.model_validate(task_state.state)
+
+ #########################################################
+ # 4. Call an LLM to respond to the user's message and stream the response to the client.
+ #########################################################
+
+ state = await handle_turn(task_id=params.task.id, state=state, content=params.event.content.content)
+
+ # Update the state with the new messages
+ await adk.state.update(
+ task_id=params.task.id,
+ agent_id=params.agent.id,
+ state_id=task_state.id,
+ state=state,
+ trace_id=params.task.id,
+ )
+
+@acp.on_task_cancel
+async def handle_task_cancel(params: CancelTaskParams):
+ """Default task cancel handler"""
+ logger.info(f"Task canceled: {params.task}")
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/060_hello_egp_services/requirements.txt b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/requirements.txt
new file mode 100644
index 000000000..0cd249f11
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/060_hello_egp_services/requirements.txt
@@ -0,0 +1,7 @@
+# Install agentex-py from local path
+agentex-py>=0.0.4,<0.1.0
+
+# Scale GenAI Platform Python SDK
+scale-gp
+egp-services
+openai-agents!=0.0.17
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/080_batch_events/.dockerignore b/examples/tutorials/10_agentic/00_base/080_batch_events/.dockerignore
new file mode 100644
index 000000000..c4f7a8b4b
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/080_batch_events/.dockerignore
@@ -0,0 +1,43 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Environments
+.env**
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Git
+.git
+.gitignore
+
+# Misc
+.DS_Store
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/080_batch_events/Dockerfile b/examples/tutorials/10_agentic/00_base/080_batch_events/Dockerfile
new file mode 100644
index 000000000..1c5138b5e
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/080_batch_events/Dockerfile
@@ -0,0 +1,45 @@
+# syntax=docker/dockerfile:1.3
+FROM python:3.12-slim
+COPY --from=ghcr.io/astral-sh/uv:0.6.4 /uv /uvx /bin/
+
+# Install system dependencies
+RUN apt-get update && apt-get install -y \
+ htop \
+ vim \
+ curl \
+ tar \
+ python3-dev \
+ postgresql-client \
+ build-essential \
+ libpq-dev \
+ gcc \
+ cmake \
+ netcat-openbsd \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
+
+RUN uv pip install --system --upgrade pip setuptools wheel
+
+ENV UV_HTTP_TIMEOUT=1000
+
+# Copy just the requirements file to optimize caching
+COPY 10_agentic/00_base/080_batch_events/requirements.txt /app/10_agentic/00_base/080_batch_events/requirements.txt
+
+WORKDIR /app/10_agentic/00_base/080_batch_events
+
+# Install the required Python packages
+RUN --mount=type=secret,id=codeartifact-pip-conf,target=/etc/pip.conf \
+ export UV_INDEX_URL=$(grep -E "^index-url" /etc/pip.conf | cut -d'=' -f2- | xargs) && \
+ export UV_EXTRA_INDEX_URL=$(grep -E "^extra-index-url" /etc/pip.conf | cut -d'=' -f2- | xargs || echo "") && \
+ uv pip install --system -r requirements.txt
+
+# Copy the project code
+COPY 10_agentic/00_base/080_batch_events/project /app/10_agentic/00_base/080_batch_events/project
+
+WORKDIR /app/10_agentic/00_base/080_batch_events/project
+
+# Set environment variables
+ENV PYTHONPATH=/app
+
+# Run the agent using uvicorn
+CMD ["uvicorn", "acp:acp", "--host", "0.0.0.0", "--port", "8000"]
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/080_batch_events/README.md b/examples/tutorials/10_agentic/00_base/080_batch_events/README.md
new file mode 100644
index 000000000..a6058fb20
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/080_batch_events/README.md
@@ -0,0 +1,174 @@
+# ab080-batch-events - AgentEx Starter Template
+
+This is a tutorial demonstrating **batch event processing** and the **limitations of the base agentic ACP protocol**.
+
+## 🎯 Tutorial Purpose
+
+This tutorial demonstrates three key concepts:
+
+1. **Events and Cursor Usage**: How to use `adk.events.list_events()` with `last_processed_event_id` to track processing progress
+2. **Cursor Committing**: How to "commit" the cursor by updating `last_processed_event_id` in the AgentTaskTracker
+3. **Base ACP Limitations**: Real-world limitations when building distributed agents with the basic agentic ACP protocol
+
+## ⚠️ Important Limitations
+
+### **Primary Limitation (Race Conditions)**
+The code includes this critical limitation:
+```python
+# LIMITATION - because this is not atomic, it is possible that two different
+# processes will read the value of READY and then both will try to set it to
+# PROCESSING. The only way to prevent this is locking, which is not supported
+# by the agentex server.
+```
+
+**Problem**: Multiple pods can simultaneously check status=READY and both proceed to process events, leading to duplicate work.
+
+### **Additional Distributed System Limitations**
+
+1. **Server Crash Recovery**: If the agent server dies while processing events, there's no clean way to restart processing from where it left off. The status remains "PROCESSING" indefinitely.
+
+2. **Cursor Commit Failures**: If the server fails to commit the cursor (`last_processed_event_id`) after writing a message, retrying will lead to duplicate messages being written for the same events.
+
+3. **No Transactional Guarantees**: There's no way to atomically update both the message store and the cursor position, leading to potential inconsistencies.
+
+4. **Base ACP Protocol Constraints**: These issues cannot be solved with the simple agentic base ACP protocol alone - they require more sophisticated coordination mechanisms.
+
+## 🔧 Solutions
+
+The limitations above highlight why more advanced patterns are needed for production systems:
+
+**Options for Production**:
+1. **Database Locking**: Implement your own database locking mechanism and provide the agent with database credentials
+2. **Temporal Workflows**: Use Temporal to ensure only one workflow execution processes events at a time (eliminates the need for manual locking)
+3. **Message Queues**: Use external queue systems with built-in exactly-once delivery guarantees
+
+## 🎯 Batching Demonstration
+
+Despite the limitations, this tutorial effectively demonstrates **event batching behavior**:
+
+- Events arriving during the 2-second processing delay get queued
+- When processing completes, all queued events are processed together in the next batch
+- This shows how slow agents can efficiently handle bursts of events
+
+## What You'll Learn
+
+- **Tasks**: A task is a grouping mechanism for related messages. Think of it as a conversation thread or a session.
+- **Messages**: Messages are communication objects within a task. They can contain text, data, or instructions.
+- **ACP Events**: The agent responds to four main events:
+ - `task_received`: When a new task is created
+ - `task_message_received`: When a message is sent within a task
+ - `task_approved`: When a task is approved
+ - `task_canceled`: When a task is canceled
+
+## Running the Agent
+
+1. Run the agent locally:
+```bash
+agentex agents run --manifest manifest.yaml
+```
+
+The agent will start on port 8000 and print messages whenever it receives any of the ACP events.
+
+## What's Inside
+
+This template:
+- Sets up a basic ACP server
+- Handles each of the required ACP events with simple print statements
+- Provides a foundation for building more complex agents
+
+## Next Steps
+
+For more advanced agent development, check out the AgentEx tutorials:
+
+- **Tutorials 00-08**: Learn about building synchronous agents with ACP
+- **Tutorials 09-10**: Learn how to use Temporal to power asynchronous agents
+ - Tutorial 09: Basic Temporal workflow setup
+ - Tutorial 10: Advanced Temporal patterns and best practices
+
+These tutorials will help you understand:
+- How to handle long-running tasks
+- Implementing state machines
+- Managing complex workflows
+- Best practices for async agent development
+
+## The Manifest File
+
+The `manifest.yaml` file is your agent's configuration file. It defines:
+- How your agent should be built and packaged
+- What files are included in your agent's Docker image
+- Your agent's name and description
+- Local development settings (like the port your agent runs on)
+
+This file is essential for both local development and deployment of your agent.
+
+## Project Structure
+
+```
+080_batch_events/
+├── project/ # Your agent's code
+│ ├── __init__.py
+│ └── acp.py # ACP server and event handlers
+├── Dockerfile # Container definition
+├── manifest.yaml # Deployment config
+└── requirements.txt # Dependencies
+```
+
+## Development
+
+1. **Customize Event Handlers**
+ - Modify the handlers in `acp.py` to implement your agent's logic
+ - Add your own tools and capabilities
+ - Implement custom state management
+
+2. **Add Dependencies**
+ - Add required packages to `requirements.txt`
+ - Update the manifest with any needed credentials
+
+## Local Development
+
+1. **Install AgentEx**
+```bash
+cd agentex-py
+uv venv
+source .venv/bin/activate
+uv sync
+```
+
+2. **Start the Agentex Server**
+```bash
+# Navigate to the backend directory
+cd agentex
+
+# Start all services using Docker Compose
+make dev
+
+# Optional: In a separate terminal, use lazydocker for a better UI (everything should say "healthy")
+lzd
+```
+
+3. **Run your agent**
+```bash
+# From this directory
+export ENVIRONMENT=development && agentex agents run --manifest manifest.yaml
+```
+
+4. **Interact with your agent**
+
+Option 0: CLI (deprecated - to be replaced once a new CLI is implemented - please use the web UI for now!)
+```bash
+# Submit a task via CLI
+agentex tasks submit --agent {{ agent_name }} --task "Your task here"
+```
+
+Option 1: Web UI
+```bash
+# Start the local web interface
+cd agentex-web
+make dev
+
+# Then open http://localhost:3000 in your browser to chat with your agent
+```
+## Development Tips
+
+1. **Local Testing**
+- Set environment variables in project/.env for any required credentials
\ No newline at end of file
diff --git a/examples/tutorials/10_agentic/00_base/080_batch_events/manifest.yaml b/examples/tutorials/10_agentic/00_base/080_batch_events/manifest.yaml
new file mode 100644
index 000000000..857576875
--- /dev/null
+++ b/examples/tutorials/10_agentic/00_base/080_batch_events/manifest.yaml
@@ -0,0 +1,118 @@
+# Agent Manifest Configuration
+# ---------------------------
+# This file defines how your agent should be built and deployed.
+
+# Build Configuration
+# ------------------
+# The build config defines what gets packaged into your agent's Docker image.
+# This same configuration is used whether building locally or remotely.
+#
+# When building:
+# 1. All files from include_paths are collected into a build context
+# 2. The context is filtered by dockerignore rules
+# 3. The Dockerfile uses this context to build your agent's image
+# 4. The image is pushed to a registry and used to run your agent
+build:
+ context:
+ # Root directory for the build context
+ root: ../../../ # Keep this as the default root
+
+ # Paths to include in the Docker build context
+ # Must include:
+ # - agentex-py: The core framework (needed for agent execution)
+ # - Your agent's directory (your custom agent code)
+ # These paths are collected and sent to the Docker daemon for building
+ include_paths:
+ - agentex-py
+ - 10_agentic/00_base/080_batch_events
+
+ # Path to your agent's Dockerfile
+ # This defines how your agent's image is built from the context
+ # Relative to the root directory
+ dockerfile: 10_agentic/00_base/080_batch_events/Dockerfile
+
+ # Path to your agent's .dockerignore
+ # Filters unnecessary files from the build context
+ # Helps keep build context small and builds fast
+ dockerignore: 10_agentic/00_base/080_batch_events/.dockerignore
+
+
+# Local Development Configuration
+# -----------------------------
+# Only used when running the agent locally
+local_development:
+ agent:
+ port: 8000 # Port where your local ACP server is running
+ host_address: host.docker.internal # Host address for Docker networking (host.docker.internal for Docker, localhost for direct)
+
+ # File paths for local development (relative to this manifest.yaml)
+ paths:
+ # Path to ACP server file
+ # Examples:
+ # project/acp.py (standard)
+ # src/server.py (custom structure)
+ # ../shared/acp.py (shared across projects)
+ # /absolute/path/acp.py (absolute path)
+ acp: project/acp.py
+
+
+# Agent Configuration
+# -----------------
+agent:
+ acp_type: agentic
+
+ # Unique name for your agent
+ # Used for task routing and monitoring
+ name: ab080-batch-events
+
+ # Description of what your agent does
+ # Helps with documentation and discovery
+ description: An AgentEx agent
+
+ # Temporal workflow configuration
+ # Set enabled: true to use Temporal workflows for long-running tasks
+ temporal:
+ enabled: false
+
+ # Optional: Credentials mapping
+ # Maps Kubernetes secrets to environment variables
+ # Common credentials include:
+ # credentials:
+ # - env_var_name: OPENAI_API_KEY
+ # secret_name: openai-api-key
+ # secret_key: api-key
+
+ # Optional: Set Environment variables for running your agent locally as well
+ # as for deployment later on
+ # env:
+ # OPENAI_API_KEY: "=R)z$vGt@Z&vktV)3-NWv`#NvZ@Yey12(<+U%q|k
zX+`a$P$yYBT)*Ha-}S_;=_(-u^f`_|J2Yh0#~4@s++oN};5u;su|-wrU3FQm^7{su
z>8o+gYSgiDDe%_wC(nD7_>vcLXILMjo130;Ms#NECJWkUDkq3gCi&?SiaT$fJzRZ;
zK1&Gb)(p)&kkjH3z}C+^j}slaI9B8l>C$CRag1cKxT2XGYYeTbzK+$Xi#%E#5?ntc0me*m*r6Jn9m8-KiLWhy#C-dnk0=nL_TK&5G`zT%r$}
zTWl(7x8ZWI*V%pBaqf>0s^2Plir$c3VAtYz%{q2|FaVAOemz9cSBhP}|0FYW;VE)1
zD=Z;aD@M}gfn})3oIu*Jhv}F2-p&LiH-zAQ?~cR=nVH?WB)_!+nf%0#IER|f@OPFY
zY7OxM3VI+>n+H^LV6rP&C%rCbrtvH7Cf19R+pyn3zfS+h)~A-XdmS;`VuG=#jL6R_
z-tw%?I6yhtQFIpCO;65xD-0b)nS}gg#EZ{N4&rCBu*z~HtY3%{Dv$Jw*NV2AyvepV
ztb7x~b0%UBo!#>nMsQxhLaWN_Z#T+Y)TlQ+O_YBG4aOx%y(c-5hbgoAq;GiVnu|}X
zRQ%LF?ErRdz3UF{PJAf2ELjl^(tW+JbKDS@b-Q5)R>R6PH4Y0C?0y2thIV0YdTu6O
z(gA7BZY^7C79uu{-;>3uix4N#%|tjX_#hndKLCzEalcl5OrDHyWI%bV=Xk)uTiK9Z
zY4t&PQuk~m|NRGyTN!hcK4X+b3El3M8f~;OiZ