Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 66 additions & 9 deletions src/conductor/providers/copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any

from conductor.exceptions import ProviderError
from conductor.exceptions import ProviderError, ValidationError
from conductor.providers.base import AgentOutput, AgentProvider, EventCallback

if TYPE_CHECKING:
from conductor.config.schema import AgentDef
from conductor.config.schema import AgentDef, OutputField

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -187,6 +187,7 @@ def __init__(
self._idle_recovery_config = idle_recovery_config or IdleRecoveryConfig()
self._temperature = temperature
self._default_max_agent_iterations = max_agent_iterations
self._max_schema_depth = 10 # Max nesting depth for recursive schema building
self._session_ids: dict[str, str] = {}
self._resume_session_ids: dict[str, str] = {}
self._interrupted_session: Any = None
Expand Down Expand Up @@ -505,13 +506,7 @@ async def _execute_sdk_call(
# Build schema description for output schema (used in prompt and recovery)
schema_for_prompt: dict[str, Any] | None = None
if agent.output:
schema_for_prompt = {
name: {
"type": field.type,
"description": field.description or f"The {name} field",
}
for name, field in agent.output.items()
}
schema_for_prompt = self._build_prompt_schema(agent.output)
schema_desc = json.dumps(schema_for_prompt, indent=2)
full_prompt += (
f"\n\n**IMPORTANT: You MUST respond with a JSON object matching this schema:**\n"
Expand Down Expand Up @@ -1082,6 +1077,68 @@ def _build_parse_recovery_prompt(
f"than the raw JSON object."
)

def _build_prompt_schema(
self, schema: dict[str, OutputField], depth: int = 0
) -> dict[str, Any]:
"""Build a prompt-facing schema description from OutputField definitions."""
if depth > self._max_schema_depth:
raise ValidationError(
f"Schema nesting depth exceeds maximum of {self._max_schema_depth} levels",
suggestion="Simplify your output schema to reduce nesting depth",
)
return {
field_name: self._build_prompt_field_schema(field_name, field_def, depth=depth)
for field_name, field_def in schema.items()
}

def _build_prompt_field_schema(
self,
field_name: str,
field_def: OutputField,
depth: int = 0,
) -> dict[str, Any]:
"""Build a prompt-facing schema description for a named field."""
schema: dict[str, Any] = {
"type": field_def.type,
"description": field_def.description or f"The {field_name} field",
}

if field_def.type == "object" and field_def.properties:
schema["properties"] = self._build_prompt_schema(
field_def.properties, depth=depth + 1
)
schema["required"] = list(field_def.properties.keys())

if field_def.type == "array" and field_def.items:
schema["items"] = self._build_prompt_item_schema(field_def.items, depth=depth + 1)

return schema

def _build_prompt_item_schema(self, field_def: OutputField, depth: int = 0) -> dict[str, Any]:
"""Build a prompt-facing schema description for an array item."""
if depth > self._max_schema_depth:
raise ValidationError(
f"Schema nesting depth exceeds maximum of {self._max_schema_depth} levels",
suggestion="Simplify your output schema to reduce nesting depth",
)
schema: dict[str, Any] = {
"type": field_def.type,
}

if field_def.description:
schema["description"] = field_def.description

if field_def.type == "object" and field_def.properties:
schema["properties"] = self._build_prompt_schema(
field_def.properties, depth=depth + 1
)
schema["required"] = list(field_def.properties.keys())

if field_def.type == "array" and field_def.items:
schema["items"] = self._build_prompt_item_schema(field_def.items, depth=depth + 1)

return schema

def _log_event_verbose(self, event_type: str, event: Any, full_mode: bool) -> None:
"""Log SDK events in verbose mode for progress visibility.

Expand Down
117 changes: 116 additions & 1 deletion tests/test_providers/test_copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from conductor.config.schema import AgentDef
from conductor.exceptions import ProviderError
from conductor.providers.copilot import CopilotProvider, RetryConfig
from conductor.providers.copilot import CopilotProvider, RetryConfig, SDKResponse


def stub_handler(agent: AgentDef, prompt: str, context: dict[str, Any]) -> dict[str, Any]:
Expand Down Expand Up @@ -383,6 +383,121 @@ def mock_handler(agent, prompt, context):
assert len(provider.get_retry_history()) == 0


class TestPromptSchemaGeneration:
"""Tests for prompt-facing schema generation."""

def test_build_prompt_schema_recurses_through_nested_fields(self) -> None:
"""Nested object properties and array items are preserved in prompt schema."""
provider = CopilotProvider(mock_handler=stub_handler)
agent = AgentDef(
name="planner",
model="gpt-4",
prompt="Plan the work",
output={
"plan": {
"type": "object",
"description": "Structured research plan",
"properties": {
"questions": {
"type": "array",
"items": {"type": "string"},
},
"areas": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"focus": {
"type": "array",
"items": {"type": "string"},
},
},
},
},
"sources": {
"type": "array",
"items": {"type": "string"},
},
},
},
"summary": {
"type": "string",
},
},
)

schema = provider._build_prompt_schema(agent.output or {})

assert schema["plan"]["type"] == "object"
assert schema["plan"]["properties"]["questions"]["type"] == "array"
assert schema["plan"]["properties"]["questions"]["items"]["type"] == "string"
assert schema["plan"]["properties"]["areas"]["items"]["properties"]["name"]["type"] == "string"
assert (
schema["plan"]["properties"]["areas"]["items"]["properties"]["focus"]["items"]["type"]
== "string"
)
assert schema["plan"]["required"] == ["questions", "areas", "sources"]
assert schema["summary"]["description"] == "The summary field"

@pytest.mark.asyncio
async def test_execute_appends_nested_schema_to_prompt(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""The actual prompt sent to Copilot includes nested schema details."""
provider = CopilotProvider(retry_config=RetryConfig(max_attempts=1))
agent = AgentDef(
name="planner",
model="gpt-4",
prompt="Plan the work",
output={
"plan": {
"type": "object",
"properties": {
"questions": {"type": "array"},
"areas": {"type": "array"},
"sources": {"type": "array"},
},
},
"summary": {"type": "string"},
},
)

class _FakeSession:
session_id = "session-123"

async def disconnect(self) -> None:
return None

class _FakeClient:
async def create_session(self, **kwargs: Any) -> _FakeSession:
return _FakeSession()

captured_prompt: dict[str, str] = {}

async def _noop() -> None:
return None

async def _fake_send_and_wait(*args: Any, **kwargs: Any) -> SDKResponse:
captured_prompt["value"] = args[1]
return SDKResponse(
content='{"plan":{"questions":[],"areas":[],"sources":[]},"summary":"done"}'
)

provider._client = _FakeClient()
monkeypatch.setattr(provider, "_ensure_client_started", _noop)
monkeypatch.setattr(provider, "_send_and_wait", _fake_send_and_wait)

await provider.execute(agent=agent, context={}, rendered_prompt="Plan the work")

prompt = captured_prompt["value"]
assert '"plan"' in prompt
assert '"properties"' in prompt
assert '"questions"' in prompt
assert '"areas"' in prompt
assert '"sources"' in prompt
assert '"required"' in prompt
assert "Return ONLY the JSON object, no other text." in prompt


class TestRetryConfig:
"""Tests for RetryConfig dataclass."""

Expand Down