From fd4beb8b3761a2d2df65fc9ad9cf8c7c92af6ef0 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Thu, 12 Mar 2026 00:39:58 -0700 Subject: [PATCH 01/14] Add Python SDK public API and examples Co-authored-by: Codex --- sdk/python/docs/api-reference.md | 180 ++++ sdk/python/docs/faq.md | 40 +- sdk/python/docs/getting-started.md | 37 +- .../01_quickstart_constructor/async.py | 30 + .../01_quickstart_constructor/sync.py | 20 + sdk/python/examples/02_turn_run/async.py | 37 + sdk/python/examples/02_turn_run/sync.py | 28 + .../examples/03_turn_stream_events/async.py | 44 + .../examples/03_turn_stream_events/sync.py | 36 + .../examples/04_models_and_metadata/async.py | 28 + .../examples/04_models_and_metadata/sync.py | 20 + .../examples/05_existing_thread/async.py | 32 + .../examples/05_existing_thread/sync.py | 23 + .../06_thread_lifecycle_and_controls/async.py | 70 ++ .../06_thread_lifecycle_and_controls/sync.py | 63 ++ .../examples/07_image_and_text/async.py | 35 + sdk/python/examples/07_image_and_text/sync.py | 26 + .../examples/08_local_image_and_text/async.py | 38 + .../examples/08_local_image_and_text/sync.py | 29 + sdk/python/examples/09_async_parity/sync.py | 23 + .../10_error_handling_and_retry/async.py | 91 ++ .../10_error_handling_and_retry/sync.py | 40 + sdk/python/examples/11_cli_mini_app/async.py | 96 +++ sdk/python/examples/11_cli_mini_app/sync.py | 89 ++ .../12_turn_params_kitchen_sink/async.py | 75 ++ .../12_turn_params_kitchen_sink/sync.py | 67 ++ .../13_model_select_and_turn_params/async.py | 117 +++ .../13_model_select_and_turn_params/sync.py | 108 +++ sdk/python/examples/README.md | 78 ++ sdk/python/examples/_bootstrap.py | 50 ++ sdk/python/examples/assets/sample_scene.png | Bin 0 -> 3724 bytes sdk/python/notebooks/sdk_walkthrough.ipynb | 535 ++++++++++++ sdk/python/src/codex_app_server/__init__.py | 109 ++- .../src/codex_app_server/async_client.py | 208 +++++ .../codex_app_server/generated/v2_types.py | 20 +- sdk/python/src/codex_app_server/public_api.py | 795 ++++++++++++++++++ .../src/codex_app_server/public_types.py | 41 + .../tests/test_async_client_behavior.py | 64 ++ .../tests/test_public_api_runtime_behavior.py | 286 +++++++ .../tests/test_public_api_signatures.py | 211 +++++ .../tests/test_real_app_server_integration.py | 237 ++++++ 41 files changed, 4124 insertions(+), 32 deletions(-) create mode 100644 sdk/python/docs/api-reference.md create mode 100644 sdk/python/examples/01_quickstart_constructor/async.py create mode 100644 sdk/python/examples/01_quickstart_constructor/sync.py create mode 100644 sdk/python/examples/02_turn_run/async.py create mode 100644 sdk/python/examples/02_turn_run/sync.py create mode 100644 sdk/python/examples/03_turn_stream_events/async.py create mode 100644 sdk/python/examples/03_turn_stream_events/sync.py create mode 100644 sdk/python/examples/04_models_and_metadata/async.py create mode 100644 sdk/python/examples/04_models_and_metadata/sync.py create mode 100644 sdk/python/examples/05_existing_thread/async.py create mode 100644 sdk/python/examples/05_existing_thread/sync.py create mode 100644 sdk/python/examples/06_thread_lifecycle_and_controls/async.py create mode 100644 sdk/python/examples/06_thread_lifecycle_and_controls/sync.py create mode 100644 sdk/python/examples/07_image_and_text/async.py create mode 100644 sdk/python/examples/07_image_and_text/sync.py create mode 100644 sdk/python/examples/08_local_image_and_text/async.py create mode 100644 sdk/python/examples/08_local_image_and_text/sync.py create mode 100644 sdk/python/examples/09_async_parity/sync.py create mode 100644 sdk/python/examples/10_error_handling_and_retry/async.py create mode 100644 sdk/python/examples/10_error_handling_and_retry/sync.py create mode 100644 sdk/python/examples/11_cli_mini_app/async.py create mode 100644 sdk/python/examples/11_cli_mini_app/sync.py create mode 100644 sdk/python/examples/12_turn_params_kitchen_sink/async.py create mode 100644 sdk/python/examples/12_turn_params_kitchen_sink/sync.py create mode 100644 sdk/python/examples/13_model_select_and_turn_params/async.py create mode 100644 sdk/python/examples/13_model_select_and_turn_params/sync.py create mode 100644 sdk/python/examples/README.md create mode 100644 sdk/python/examples/_bootstrap.py create mode 100644 sdk/python/examples/assets/sample_scene.png create mode 100644 sdk/python/notebooks/sdk_walkthrough.ipynb create mode 100644 sdk/python/src/codex_app_server/async_client.py create mode 100644 sdk/python/src/codex_app_server/public_api.py create mode 100644 sdk/python/src/codex_app_server/public_types.py create mode 100644 sdk/python/tests/test_async_client_behavior.py create mode 100644 sdk/python/tests/test_public_api_runtime_behavior.py create mode 100644 sdk/python/tests/test_public_api_signatures.py create mode 100644 sdk/python/tests/test_real_app_server_integration.py diff --git a/sdk/python/docs/api-reference.md b/sdk/python/docs/api-reference.md new file mode 100644 index 00000000000..597b069707f --- /dev/null +++ b/sdk/python/docs/api-reference.md @@ -0,0 +1,180 @@ +# Codex App Server SDK — API Reference + +Public surface of `codex_app_server` for app-server v2. + +This SDK surface is experimental. The current implementation intentionally allows only one active `Turn.stream()` or `Turn.run()` consumer per client instance at a time. + +## Package Entry + +```python +from codex_app_server import ( + Codex, + AsyncCodex, + Thread, + AsyncThread, + Turn, + AsyncTurn, + TurnResult, + InitializeResult, + Input, + InputItem, + TextInput, + ImageInput, + LocalImageInput, + SkillInput, + MentionInput, + ThreadItem, + TurnStatus, +) +``` + +- Version: `codex_app_server.__version__` +- Requires Python >= 3.10 + +## Codex (sync) + +```python +Codex(config: AppServerConfig | None = None) +``` + +Properties/methods: + +- `metadata -> InitializeResult` +- `close() -> None` +- `thread_start(*, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, ephemeral=None, model=None, model_provider=None, personality=None, sandbox=None) -> Thread` +- `thread_list(*, archived=None, cursor=None, cwd=None, limit=None, model_providers=None, sort_key=None, source_kinds=None) -> ThreadListResponse` +- `thread_resume(thread_id: str, *, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, model=None, model_provider=None, personality=None, sandbox=None) -> Thread` +- `thread_fork(thread_id: str, *, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, model=None, model_provider=None, sandbox=None) -> Thread` +- `thread_archive(thread_id: str) -> ThreadArchiveResponse` +- `thread_unarchive(thread_id: str) -> Thread` +- `models(*, include_hidden: bool = False) -> ModelListResponse` + +Context manager: + +```python +with Codex() as codex: + ... +``` + +## AsyncCodex (async parity) + +```python +AsyncCodex(config: AppServerConfig | None = None) +``` + +Properties/methods: + +- `metadata -> InitializeResult` +- `close() -> Awaitable[None]` +- `thread_start(*, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, ephemeral=None, model=None, model_provider=None, personality=None, sandbox=None) -> Awaitable[AsyncThread]` +- `thread_list(*, archived=None, cursor=None, cwd=None, limit=None, model_providers=None, sort_key=None, source_kinds=None) -> Awaitable[ThreadListResponse]` +- `thread_resume(thread_id: str, *, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, model=None, model_provider=None, personality=None, sandbox=None) -> Awaitable[AsyncThread]` +- `thread_fork(thread_id: str, *, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, model=None, model_provider=None, sandbox=None) -> Awaitable[AsyncThread]` +- `thread_archive(thread_id: str) -> Awaitable[ThreadArchiveResponse]` +- `thread_unarchive(thread_id: str) -> Awaitable[AsyncThread]` +- `models(*, include_hidden: bool = False) -> Awaitable[ModelListResponse]` + +Async context manager: + +```python +async with AsyncCodex() as codex: + ... +``` + +## Thread / AsyncThread + +`Thread` and `AsyncThread` share the same shape and intent. + +### Thread + +- `turn(input: Input, *, approval_policy=None, cwd=None, effort=None, model=None, output_schema=None, personality=None, sandbox_policy=None, summary=None) -> Turn` +- `read(*, include_turns: bool = False) -> ThreadReadResponse` +- `set_name(name: str) -> ThreadSetNameResponse` +- `compact() -> ThreadCompactStartResponse` + +### AsyncThread + +- `turn(input: Input, *, approval_policy=None, cwd=None, effort=None, model=None, output_schema=None, personality=None, sandbox_policy=None, summary=None) -> Awaitable[AsyncTurn]` +- `read(*, include_turns: bool = False) -> Awaitable[ThreadReadResponse]` +- `set_name(name: str) -> Awaitable[ThreadSetNameResponse]` +- `compact() -> Awaitable[ThreadCompactStartResponse]` + +## Turn / AsyncTurn + +### Turn + +- `steer(input: Input) -> TurnSteerResponse` +- `interrupt() -> TurnInterruptResponse` +- `stream() -> Iterator[Notification]` +- `run() -> TurnResult` + +Behavior notes: + +- `stream()` and `run()` are exclusive per client instance in the current experimental build +- starting a second turn consumer on the same `Codex` instance raises `RuntimeError` + +### AsyncTurn + +- `steer(input: Input) -> Awaitable[TurnSteerResponse]` +- `interrupt() -> Awaitable[TurnInterruptResponse]` +- `stream() -> AsyncIterator[Notification]` +- `run() -> Awaitable[TurnResult]` + +Behavior notes: + +- `stream()` and `run()` are exclusive per client instance in the current experimental build +- starting a second turn consumer on the same `AsyncCodex` instance raises `RuntimeError` + +## TurnResult + +```python +@dataclass +class TurnResult: + thread_id: str + turn_id: str + status: TurnStatus + error: TurnError | None + text: str + items: list[ThreadItem] + usage: ThreadTokenUsageUpdatedNotification | None +``` + +## Inputs + +```python +@dataclass class TextInput: text: str +@dataclass class ImageInput: url: str +@dataclass class LocalImageInput: path: str +@dataclass class SkillInput: name: str; path: str +@dataclass class MentionInput: name: str; path: str + +InputItem = TextInput | ImageInput | LocalImageInput | SkillInput | MentionInput +Input = list[InputItem] | InputItem +``` + +## Retry + errors + +```python +from codex_app_server import ( + retry_on_overload, + JsonRpcError, + MethodNotFoundError, + InvalidParamsError, + ServerBusyError, + is_retryable_error, +) +``` + +- `retry_on_overload(...)` retries transient overload errors with exponential backoff + jitter. +- `is_retryable_error(exc)` checks if an exception is transient/overload-like. + +## Example + +```python +from codex_app_server import Codex, TextInput + +with Codex() as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + result = thread.turn(TextInput("Say hello in one sentence.")).run() + print(result.text) +``` diff --git a/sdk/python/docs/faq.md b/sdk/python/docs/faq.md index ebfd2ddad28..7a402b59d4d 100644 --- a/sdk/python/docs/faq.md +++ b/sdk/python/docs/faq.md @@ -8,24 +8,42 @@ ## `run()` vs `stream()` -- `Turn.run()` is the easiest path. It consumes events until completion and returns `TurnResult`. -- `Turn.stream()` yields raw notifications (`Notification`) so you can react event-by-event. +- `Turn.run()` / `AsyncTurn.run()` is the easiest path. It consumes events until completion and returns `TurnResult`. +- `Turn.stream()` / `AsyncTurn.stream()` yields raw notifications (`Notification`) so you can react event-by-event. Choose `run()` for most apps. Choose `stream()` for progress UIs, custom timeout logic, or custom parsing. ## Sync vs async clients -- `Codex` is the minimal sync SDK and best default. -- `AsyncAppServerClient` wraps the sync transport with `asyncio.to_thread(...)` for async-friendly call sites. +- `Codex` is the sync public API. +- `AsyncCodex` is an async replica of the same public API shape. If your app is not already async, stay with `Codex`. -## `thread(...)` vs `thread_resume(...)` +## Public kwargs are snake_case -- `codex.thread(thread_id)` only binds a local helper to an existing thread ID. -- `codex.thread_resume(thread_id, ...)` performs a `thread/resume` RPC and can apply overrides (model, instructions, sandbox, etc.). +Public API keyword names are snake_case. The SDK still maps them to wire camelCase under the hood. -Use `thread(...)` for simple continuation. Use `thread_resume(...)` when you need explicit resume semantics or override fields. +If you are migrating older code, update these names: + +- `approvalPolicy` -> `approval_policy` +- `baseInstructions` -> `base_instructions` +- `developerInstructions` -> `developer_instructions` +- `modelProvider` -> `model_provider` +- `modelProviders` -> `model_providers` +- `sortKey` -> `sort_key` +- `sourceKinds` -> `source_kinds` +- `outputSchema` -> `output_schema` +- `sandboxPolicy` -> `sandbox_policy` + +## Why only `thread_start(...)` and `thread_resume(...)`? + +The public API keeps only explicit lifecycle calls: + +- `thread_start(...)` to create new threads +- `thread_resume(thread_id, ...)` to continue existing threads + +This avoids duplicate ways to do the same operation and keeps behavior explicit. ## Why does constructor fail? @@ -61,7 +79,7 @@ python scripts/update_sdk_artifacts.py \ A turn is complete only when `turn/completed` arrives for that turn ID. - `run()` waits for this automatically. -- With `stream()`, make sure you keep consuming notifications until completion. +- With `stream()`, keep consuming notifications until completion. ## How do I retry safely? @@ -72,6 +90,6 @@ Do not blindly retry all errors. For `InvalidParamsError` or `MethodNotFoundErro ## Common pitfalls - Starting a new thread for every prompt when you wanted continuity. -- Forgetting to `close()` (or not using `with Codex() as codex:`). +- Forgetting to `close()` (or not using context managers). - Ignoring `TurnResult.status` and `TurnResult.error`. -- Mixing SDK input classes with raw dicts incorrectly in minimal API paths. +- Mixing SDK input classes with raw dicts incorrectly. diff --git a/sdk/python/docs/getting-started.md b/sdk/python/docs/getting-started.md index 9108902b38b..258e2636259 100644 --- a/sdk/python/docs/getting-started.md +++ b/sdk/python/docs/getting-started.md @@ -1,6 +1,8 @@ # Getting Started -This is the fastest path from install to a multi-turn thread using the minimal SDK surface. +This is the fastest path from install to a multi-turn thread using the public SDK surface. + +The SDK is experimental. Treat the API, bundled runtime strategy, and packaging details as unstable until the first public release. ## 1) Install @@ -15,9 +17,9 @@ Requirements: - Python `>=3.10` - installed `codex-cli-bin` runtime package, or an explicit `codex_bin` override -- Local Codex auth/session configured +- local Codex auth/session configured -## 2) Run your first turn +## 2) Run your first turn (sync) ```python from codex_app_server import Codex, TextInput @@ -25,7 +27,7 @@ from codex_app_server import Codex, TextInput with Codex() as codex: print("Server:", codex.metadata.server_name, codex.metadata.server_version) - thread = codex.thread_start(model="gpt-5") + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) result = thread.turn(TextInput("Say hello in one sentence.")).run() print("Thread:", result.thread_id) @@ -39,6 +41,7 @@ What happened: - `Codex()` started and initialized `codex app-server`. - `thread_start(...)` created a thread. - `turn(...).run()` consumed events until `turn/completed` and returned a `TurnResult`. +- one client can have only one active `Turn.stream()` / `Turn.run()` consumer at a time in the current experimental build ## 3) Continue the same thread (multi-turn) @@ -46,7 +49,7 @@ What happened: from codex_app_server import Codex, TextInput with Codex() as codex: - thread = codex.thread_start(model="gpt-5") + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) first = thread.turn(TextInput("Summarize Rust ownership in 2 bullets.")).run() second = thread.turn(TextInput("Now explain it to a Python developer.")).run() @@ -55,7 +58,25 @@ with Codex() as codex: print("second:", second.text) ``` -## 4) Resume an existing thread +## 4) Async parity + +```python +import asyncio +from codex_app_server import AsyncCodex, TextInput + + +async def main() -> None: + async with AsyncCodex() as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = await thread.turn(TextInput("Continue where we left off.")) + result = await turn.run() + print(result.text) + + +asyncio.run(main()) +``` + +## 5) Resume an existing thread ```python from codex_app_server import Codex, TextInput @@ -63,12 +84,12 @@ from codex_app_server import Codex, TextInput THREAD_ID = "thr_123" # replace with a real id with Codex() as codex: - thread = codex.thread(THREAD_ID) + thread = codex.thread_resume(THREAD_ID) result = thread.turn(TextInput("Continue where we left off.")).run() print(result.text) ``` -## 5) Next stops +## 6) Next stops - API surface and signatures: `docs/api-reference.md` - Common decisions/pitfalls: `docs/faq.md` diff --git a/sdk/python/examples/01_quickstart_constructor/async.py b/sdk/python/examples/01_quickstart_constructor/async.py new file mode 100644 index 00000000000..548640bc9fe --- /dev/null +++ b/sdk/python/examples/01_quickstart_constructor/async.py @@ -0,0 +1,30 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, TextInput + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + print("Server:", codex.metadata.server_name, codex.metadata.server_version) + + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = await thread.turn(TextInput("Say hello in one sentence.")) + result = await turn.run() + + print("Status:", result.status) + print("Text:", result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/01_quickstart_constructor/sync.py b/sdk/python/examples/01_quickstart_constructor/sync.py new file mode 100644 index 00000000000..3ad7981299b --- /dev/null +++ b/sdk/python/examples/01_quickstart_constructor/sync.py @@ -0,0 +1,20 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, TextInput + +with Codex(config=runtime_config()) as codex: + print("Server:", codex.metadata.server_name, codex.metadata.server_version) + + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + result = thread.turn(TextInput("Say hello in one sentence.")).run() + print("Status:", result.status) + print("Text:", result.text) diff --git a/sdk/python/examples/02_turn_run/async.py b/sdk/python/examples/02_turn_run/async.py new file mode 100644 index 00000000000..81a572ea0b9 --- /dev/null +++ b/sdk/python/examples/02_turn_run/async.py @@ -0,0 +1,37 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, TextInput + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = await thread.turn(TextInput("Give 3 bullets about SIMD.")) + result = await turn.run() + + print("thread_id:", result.thread_id) + print("turn_id:", result.turn_id) + print("status:", result.status) + if result.error is not None: + print("error:", result.error) + print("text:", result.text) + print("items.count:", len(result.items)) + if result.usage is None: + raise RuntimeError("missing usage for completed turn") + print("usage.thread_id:", result.usage.thread_id) + print("usage.turn_id:", result.usage.turn_id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/02_turn_run/sync.py b/sdk/python/examples/02_turn_run/sync.py new file mode 100644 index 00000000000..8125c487c06 --- /dev/null +++ b/sdk/python/examples/02_turn_run/sync.py @@ -0,0 +1,28 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, TextInput + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + result = thread.turn(TextInput("Give 3 bullets about SIMD.")).run() + + print("thread_id:", result.thread_id) + print("turn_id:", result.turn_id) + print("status:", result.status) + if result.error is not None: + print("error:", result.error) + print("text:", result.text) + print("items.count:", len(result.items)) + if result.usage is None: + raise RuntimeError("missing usage for completed turn") + print("usage.thread_id:", result.usage.thread_id) + print("usage.turn_id:", result.usage.turn_id) diff --git a/sdk/python/examples/03_turn_stream_events/async.py b/sdk/python/examples/03_turn_stream_events/async.py new file mode 100644 index 00000000000..1198c8f7527 --- /dev/null +++ b/sdk/python/examples/03_turn_stream_events/async.py @@ -0,0 +1,44 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, TextInput + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = await thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) + + # Best effort controls: models can finish quickly, so races are expected. + try: + _ = await turn.steer(TextInput("Keep it brief and stop after 20 numbers.")) + print("steer: sent") + except Exception as exc: + print("steer: skipped", type(exc).__name__) + + try: + _ = await turn.interrupt() + print("interrupt: sent") + except Exception as exc: + print("interrupt: skipped", type(exc).__name__) + + event_count = 0 + async for event in turn.stream(): + event_count += 1 + print(event.method, event.payload) + + print("events.count:", event_count) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/03_turn_stream_events/sync.py b/sdk/python/examples/03_turn_stream_events/sync.py new file mode 100644 index 00000000000..03c02c44c47 --- /dev/null +++ b/sdk/python/examples/03_turn_stream_events/sync.py @@ -0,0 +1,36 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, TextInput + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) + + # Best effort controls: models can finish quickly, so races are expected. + try: + _ = turn.steer(TextInput("Keep it brief and stop after 20 numbers.")) + print("steer: sent") + except Exception as exc: + print("steer: skipped", type(exc).__name__) + + try: + _ = turn.interrupt() + print("interrupt: sent") + except Exception as exc: + print("interrupt: skipped", type(exc).__name__) + + event_count = 0 + for event in turn.stream(): + event_count += 1 + print(event.method, event.payload) + + print("events.count:", event_count) diff --git a/sdk/python/examples/04_models_and_metadata/async.py b/sdk/python/examples/04_models_and_metadata/async.py new file mode 100644 index 00000000000..9686eec95dd --- /dev/null +++ b/sdk/python/examples/04_models_and_metadata/async.py @@ -0,0 +1,28 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + print("metadata:", codex.metadata) + + models = await codex.models(include_hidden=True) + print("models.count:", len(models.data)) + if models.data: + print("first model id:", models.data[0].id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/04_models_and_metadata/sync.py b/sdk/python/examples/04_models_and_metadata/sync.py new file mode 100644 index 00000000000..800e133db64 --- /dev/null +++ b/sdk/python/examples/04_models_and_metadata/sync.py @@ -0,0 +1,20 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex + +with Codex(config=runtime_config()) as codex: + print("metadata:", codex.metadata) + + models = codex.models() + print("models.count:", len(models.data)) + if models.data: + print("first model id:", models.data[0].id) diff --git a/sdk/python/examples/05_existing_thread/async.py b/sdk/python/examples/05_existing_thread/async.py new file mode 100644 index 00000000000..d1da384dd21 --- /dev/null +++ b/sdk/python/examples/05_existing_thread/async.py @@ -0,0 +1,32 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, TextInput + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + original = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + + first_turn = await original.turn(TextInput("Tell me one fact about Saturn.")) + first = await first_turn.run() + print("Created thread:", first.thread_id) + + resumed = await codex.thread_resume(first.thread_id) + second_turn = await resumed.turn(TextInput("Continue with one more fact.")) + second = await second_turn.run() + print(second.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/05_existing_thread/sync.py b/sdk/python/examples/05_existing_thread/sync.py new file mode 100644 index 00000000000..b6526355c71 --- /dev/null +++ b/sdk/python/examples/05_existing_thread/sync.py @@ -0,0 +1,23 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, TextInput + +with Codex(config=runtime_config()) as codex: + # Create an initial thread and turn so we have a real thread to resume. + original = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + first = original.turn(TextInput("Tell me one fact about Saturn.")).run() + print("Created thread:", first.thread_id) + + # Resume the existing thread by ID. + resumed = codex.thread_resume(first.thread_id) + second = resumed.turn(TextInput("Continue with one more fact.")).run() + print(second.text) diff --git a/sdk/python/examples/06_thread_lifecycle_and_controls/async.py b/sdk/python/examples/06_thread_lifecycle_and_controls/async.py new file mode 100644 index 00000000000..ed263e71577 --- /dev/null +++ b/sdk/python/examples/06_thread_lifecycle_and_controls/async.py @@ -0,0 +1,70 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, TextInput + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + first = await (await thread.turn(TextInput("One sentence about structured planning."))).run() + second = await (await thread.turn(TextInput("Now restate it for a junior engineer."))).run() + + reopened = await codex.thread_resume(thread.id) + listing_active = await codex.thread_list(limit=20, archived=False) + reading = await reopened.read(include_turns=True) + + _ = await reopened.set_name("sdk-lifecycle-demo") + _ = await codex.thread_archive(reopened.id) + listing_archived = await codex.thread_list(limit=20, archived=True) + unarchived = await codex.thread_unarchive(reopened.id) + + resumed_info = "n/a" + try: + resumed = await codex.thread_resume( + unarchived.id, + model="gpt-5", + config={"model_reasoning_effort": "high"}, + ) + resumed_result = await (await resumed.turn(TextInput("Continue in one short sentence."))).run() + resumed_info = f"{resumed_result.turn_id} {resumed_result.status}" + except Exception as exc: + resumed_info = f"skipped({type(exc).__name__})" + + forked_info = "n/a" + try: + forked = await codex.thread_fork(unarchived.id, model="gpt-5") + forked_result = await (await forked.turn(TextInput("Take a different angle in one short sentence."))).run() + forked_info = f"{forked_result.turn_id} {forked_result.status}" + except Exception as exc: + forked_info = f"skipped({type(exc).__name__})" + + compact_info = "sent" + try: + _ = await unarchived.compact() + except Exception as exc: + compact_info = f"skipped({type(exc).__name__})" + + print("Lifecycle OK:", thread.id) + print("first:", first.turn_id, first.status) + print("second:", second.turn_id, second.status) + print("read.turns:", len(reading.thread.turns or [])) + print("list.active:", len(listing_active.data)) + print("list.archived:", len(listing_archived.data)) + print("resumed:", resumed_info) + print("forked:", forked_info) + print("compact:", compact_info) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py b/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py new file mode 100644 index 00000000000..fb1316e78ca --- /dev/null +++ b/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py @@ -0,0 +1,63 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, TextInput + + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + first = thread.turn(TextInput("One sentence about structured planning.")).run() + second = thread.turn(TextInput("Now restate it for a junior engineer.")).run() + + reopened = codex.thread_resume(thread.id) + listing_active = codex.thread_list(limit=20, archived=False) + reading = reopened.read(include_turns=True) + + _ = reopened.set_name("sdk-lifecycle-demo") + _ = codex.thread_archive(reopened.id) + listing_archived = codex.thread_list(limit=20, archived=True) + unarchived = codex.thread_unarchive(reopened.id) + + resumed_info = "n/a" + try: + resumed = codex.thread_resume( + unarchived.id, + model="gpt-5", + config={"model_reasoning_effort": "high"}, + ) + resumed_result = resumed.turn(TextInput("Continue in one short sentence.")).run() + resumed_info = f"{resumed_result.turn_id} {resumed_result.status}" + except Exception as exc: + resumed_info = f"skipped({type(exc).__name__})" + + forked_info = "n/a" + try: + forked = codex.thread_fork(unarchived.id, model="gpt-5") + forked_result = forked.turn(TextInput("Take a different angle in one short sentence.")).run() + forked_info = f"{forked_result.turn_id} {forked_result.status}" + except Exception as exc: + forked_info = f"skipped({type(exc).__name__})" + + compact_info = "sent" + try: + _ = unarchived.compact() + except Exception as exc: + compact_info = f"skipped({type(exc).__name__})" + + print("Lifecycle OK:", thread.id) + print("first:", first.turn_id, first.status) + print("second:", second.turn_id, second.status) + print("read.turns:", len(reading.thread.turns or [])) + print("list.active:", len(listing_active.data)) + print("list.archived:", len(listing_archived.data)) + print("resumed:", resumed_info) + print("forked:", forked_info) + print("compact:", compact_info) diff --git a/sdk/python/examples/07_image_and_text/async.py b/sdk/python/examples/07_image_and_text/async.py new file mode 100644 index 00000000000..67f667c87e9 --- /dev/null +++ b/sdk/python/examples/07_image_and_text/async.py @@ -0,0 +1,35 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, ImageInput, TextInput + +REMOTE_IMAGE_URL = "https://raw.githubusercontent.com/github/explore/main/topics/python/python.png" + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = await thread.turn( + [ + TextInput("What is in this image? Give 3 bullets."), + ImageInput(REMOTE_IMAGE_URL), + ] + ) + result = await turn.run() + + print("Status:", result.status) + print(result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/07_image_and_text/sync.py b/sdk/python/examples/07_image_and_text/sync.py new file mode 100644 index 00000000000..e4ec44d3c4a --- /dev/null +++ b/sdk/python/examples/07_image_and_text/sync.py @@ -0,0 +1,26 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, ImageInput, TextInput + +REMOTE_IMAGE_URL = "https://raw.githubusercontent.com/github/explore/main/topics/python/python.png" + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + result = thread.turn( + [ + TextInput("What is in this image? Give 3 bullets."), + ImageInput(REMOTE_IMAGE_URL), + ] + ).run() + + print("Status:", result.status) + print(result.text) diff --git a/sdk/python/examples/08_local_image_and_text/async.py b/sdk/python/examples/08_local_image_and_text/async.py new file mode 100644 index 00000000000..f20e33c0886 --- /dev/null +++ b/sdk/python/examples/08_local_image_and_text/async.py @@ -0,0 +1,38 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, LocalImageInput, TextInput + +IMAGE_PATH = Path(__file__).resolve().parents[1] / "assets" / "sample_scene.png" +if not IMAGE_PATH.exists(): + raise FileNotFoundError(f"Missing bundled image: {IMAGE_PATH}") + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + + turn = await thread.turn( + [ + TextInput("Read this local image and summarize what you see in 2 bullets."), + LocalImageInput(str(IMAGE_PATH.resolve())), + ] + ) + result = await turn.run() + + print("Status:", result.status) + print(result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/08_local_image_and_text/sync.py b/sdk/python/examples/08_local_image_and_text/sync.py new file mode 100644 index 00000000000..41b489e0a88 --- /dev/null +++ b/sdk/python/examples/08_local_image_and_text/sync.py @@ -0,0 +1,29 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, LocalImageInput, TextInput + +IMAGE_PATH = Path(__file__).resolve().parents[1] / "assets" / "sample_scene.png" +if not IMAGE_PATH.exists(): + raise FileNotFoundError(f"Missing bundled image: {IMAGE_PATH}") + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + + result = thread.turn( + [ + TextInput("Read this local image and summarize what you see in 2 bullets."), + LocalImageInput(str(IMAGE_PATH.resolve())), + ] + ).run() + + print("Status:", result.status) + print(result.text) diff --git a/sdk/python/examples/09_async_parity/sync.py b/sdk/python/examples/09_async_parity/sync.py new file mode 100644 index 00000000000..0ebaf6b8d49 --- /dev/null +++ b/sdk/python/examples/09_async_parity/sync.py @@ -0,0 +1,23 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import Codex, TextInput + +with Codex(config=runtime_config()) as codex: + print("Server:", codex.metadata.server_name, codex.metadata.server_version) + + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = thread.turn(TextInput("Say hello in one sentence.")) + result = turn.run() + + print("Thread:", result.thread_id) + print("Turn:", result.turn_id) + print("Text:", result.text.strip()) diff --git a/sdk/python/examples/10_error_handling_and_retry/async.py b/sdk/python/examples/10_error_handling_and_retry/async.py new file mode 100644 index 00000000000..c8555b69361 --- /dev/null +++ b/sdk/python/examples/10_error_handling_and_retry/async.py @@ -0,0 +1,91 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio +import random +from collections.abc import Awaitable, Callable +from typing import TypeVar + +from codex_app_server import ( + AsyncCodex, + JsonRpcError, + ServerBusyError, + TextInput, + TurnStatus, + is_retryable_error, +) + +ResultT = TypeVar("ResultT") + + +async def retry_on_overload_async( + op: Callable[[], Awaitable[ResultT]], + *, + max_attempts: int = 3, + initial_delay_s: float = 0.25, + max_delay_s: float = 2.0, + jitter_ratio: float = 0.2, +) -> ResultT: + if max_attempts < 1: + raise ValueError("max_attempts must be >= 1") + + delay = initial_delay_s + attempt = 0 + while True: + attempt += 1 + try: + return await op() + except Exception as exc: # noqa: BLE001 + if attempt >= max_attempts or not is_retryable_error(exc): + raise + jitter = delay * jitter_ratio + sleep_for = min(max_delay_s, delay) + random.uniform(-jitter, jitter) + if sleep_for > 0: + await asyncio.sleep(sleep_for) + delay = min(max_delay_s, delay * 2) + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + + try: + result = await retry_on_overload_async( + _run_turn(thread, "Summarize retry best practices in 3 bullets."), + max_attempts=3, + initial_delay_s=0.25, + max_delay_s=2.0, + ) + except ServerBusyError as exc: + print("Server overloaded after retries:", exc.message) + print("Text:") + return + except JsonRpcError as exc: + print(f"JSON-RPC error {exc.code}: {exc.message}") + print("Text:") + return + + if result.status == TurnStatus.failed: + print("Turn failed:", result.error) + + print("Text:", result.text) + + +def _run_turn(thread, prompt: str): + async def _inner(): + turn = await thread.turn(TextInput(prompt)) + return await turn.run() + + return _inner + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/10_error_handling_and_retry/sync.py b/sdk/python/examples/10_error_handling_and_retry/sync.py new file mode 100644 index 00000000000..02371566390 --- /dev/null +++ b/sdk/python/examples/10_error_handling_and_retry/sync.py @@ -0,0 +1,40 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import ( + Codex, + JsonRpcError, + ServerBusyError, + TextInput, + TurnStatus, + retry_on_overload, +) + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + + try: + result = retry_on_overload( + lambda: thread.turn(TextInput("Summarize retry best practices in 3 bullets.")).run(), + max_attempts=3, + initial_delay_s=0.25, + max_delay_s=2.0, + ) + except ServerBusyError as exc: + print("Server overloaded after retries:", exc.message) + print("Text:") + except JsonRpcError as exc: + print(f"JSON-RPC error {exc.code}: {exc.message}") + print("Text:") + else: + if result.status == TurnStatus.failed: + print("Turn failed:", result.error) + print("Text:", result.text) diff --git a/sdk/python/examples/11_cli_mini_app/async.py b/sdk/python/examples/11_cli_mini_app/async.py new file mode 100644 index 00000000000..18e6ea46e0c --- /dev/null +++ b/sdk/python/examples/11_cli_mini_app/async.py @@ -0,0 +1,96 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import ( + AsyncCodex, + TextInput, + ThreadTokenUsageUpdatedNotification, + TurnCompletedNotificationPayload, +) + + +def _status_value(status: object | None) -> str: + return str(getattr(status, "value", status)) + + +def _format_usage(usage: object | None) -> str: + if usage is None: + return "usage> (none)" + + last = getattr(usage, "last", None) + total = getattr(usage, "total", None) + if last is None or total is None: + return f"usage> {usage}" + + return ( + "usage>\n" + f" last: input={last.inputTokens} output={last.outputTokens} reasoning={last.reasoningOutputTokens} total={last.totalTokens} cached={last.cachedInputTokens}\n" + f" total: input={total.inputTokens} output={total.outputTokens} reasoning={total.reasoningOutputTokens} total={total.totalTokens} cached={total.cachedInputTokens}" + ) + + +async def main() -> None: + print("Codex async mini CLI. Type /exit to quit.") + + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + print("Thread:", thread.id) + + while True: + try: + user_input = (await asyncio.to_thread(input, "you> ")).strip() + except EOFError: + break + + if not user_input: + continue + if user_input in {"/exit", "/quit"}: + break + + turn = await thread.turn(TextInput(user_input)) + usage = None + status = None + error = None + printed_delta = False + + print("assistant> ", end="", flush=True) + async for event in turn.stream(): + payload = event.payload + if event.method == "item/agentMessage/delta": + delta = getattr(payload, "delta", "") + if delta: + print(delta, end="", flush=True) + printed_delta = True + continue + if isinstance(payload, ThreadTokenUsageUpdatedNotification): + usage = payload.token_usage + continue + if isinstance(payload, TurnCompletedNotificationPayload): + status = payload.turn.status + error = payload.turn.error + + if printed_delta: + print() + else: + print("[no text]") + + status_text = _status_value(status) + print(f"assistant.status> {status_text}") + if status_text == "failed": + print("assistant.error>", error) + + print(_format_usage(usage)) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/11_cli_mini_app/sync.py b/sdk/python/examples/11_cli_mini_app/sync.py new file mode 100644 index 00000000000..3e02534bb56 --- /dev/null +++ b/sdk/python/examples/11_cli_mini_app/sync.py @@ -0,0 +1,89 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import ( + Codex, + TextInput, + ThreadTokenUsageUpdatedNotification, + TurnCompletedNotificationPayload, +) + +print("Codex mini CLI. Type /exit to quit.") + + +def _status_value(status: object | None) -> str: + return str(getattr(status, "value", status)) + + +def _format_usage(usage: object | None) -> str: + if usage is None: + return "usage> (none)" + + last = getattr(usage, "last", None) + total = getattr(usage, "total", None) + if last is None or total is None: + return f"usage> {usage}" + + return ( + "usage>\n" + f" last: input={last.inputTokens} output={last.outputTokens} reasoning={last.reasoningOutputTokens} total={last.totalTokens} cached={last.cachedInputTokens}\n" + f" total: input={total.inputTokens} output={total.outputTokens} reasoning={total.reasoningOutputTokens} total={total.totalTokens} cached={total.cachedInputTokens}" + ) + + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + print("Thread:", thread.id) + + while True: + try: + user_input = input("you> ").strip() + except EOFError: + break + + if not user_input: + continue + if user_input in {"/exit", "/quit"}: + break + + turn = thread.turn(TextInput(user_input)) + usage = None + status = None + error = None + printed_delta = False + + print("assistant> ", end="", flush=True) + for event in turn.stream(): + payload = event.payload + if event.method == "item/agentMessage/delta": + delta = getattr(payload, "delta", "") + if delta: + print(delta, end="", flush=True) + printed_delta = True + continue + if isinstance(payload, ThreadTokenUsageUpdatedNotification): + usage = payload.token_usage + continue + if isinstance(payload, TurnCompletedNotificationPayload): + status = payload.turn.status + error = payload.turn.error + + if printed_delta: + print() + else: + print("[no text]") + + status_text = _status_value(status) + print(f"assistant.status> {status_text}") + if status_text == "failed": + print("assistant.error>", error) + + print(_format_usage(usage)) diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/async.py b/sdk/python/examples/12_turn_params_kitchen_sink/async.py new file mode 100644 index 00000000000..bc3d5e57855 --- /dev/null +++ b/sdk/python/examples/12_turn_params_kitchen_sink/async.py @@ -0,0 +1,75 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import ( + AskForApproval, + AsyncCodex, + Personality, + ReasoningEffort, + ReasoningSummary, + SandboxPolicy, + TextInput, +) + +OUTPUT_SCHEMA = { + "type": "object", + "properties": { + "summary": {"type": "string"}, + "actions": { + "type": "array", + "items": {"type": "string"}, + }, + }, + "required": ["summary", "actions"], + "additionalProperties": False, +} + +SANDBOX_POLICY = SandboxPolicy.model_validate( + { + "type": "readOnly", + "access": {"type": "fullAccess"}, + } +) +SUMMARY = ReasoningSummary.model_validate("concise") + +PROMPT = ( + "Analyze a safe rollout plan for enabling a feature flag in production. " + "Return JSON matching the requested schema." +) +APPROVAL_POLICY = AskForApproval.model_validate("never") + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + + turn = await thread.turn( + TextInput(PROMPT), + approval_policy=APPROVAL_POLICY, + cwd=str(Path.cwd()), + effort=ReasoningEffort.medium, + model="gpt-5", + output_schema=OUTPUT_SCHEMA, + personality=Personality.pragmatic, + sandbox_policy=SANDBOX_POLICY, + summary=SUMMARY, + ) + result = await turn.run() + + print("Status:", result.status) + print("Text:", result.text) + print("Usage:", result.usage) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/sync.py b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py new file mode 100644 index 00000000000..3f3574fd2ca --- /dev/null +++ b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py @@ -0,0 +1,67 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import ( + AskForApproval, + Codex, + Personality, + ReasoningEffort, + ReasoningSummary, + SandboxPolicy, + TextInput, +) + +OUTPUT_SCHEMA = { + "type": "object", + "properties": { + "summary": {"type": "string"}, + "actions": { + "type": "array", + "items": {"type": "string"}, + }, + }, + "required": ["summary", "actions"], + "additionalProperties": False, +} + +SANDBOX_POLICY = SandboxPolicy.model_validate( + { + "type": "readOnly", + "access": {"type": "fullAccess"}, + } +) +SUMMARY = ReasoningSummary.model_validate("concise") + +PROMPT = ( + "Analyze a safe rollout plan for enabling a feature flag in production. " + "Return JSON matching the requested schema." +) +APPROVAL_POLICY = AskForApproval.model_validate("never") + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + + turn = thread.turn( + TextInput(PROMPT), + approval_policy=APPROVAL_POLICY, + cwd=str(Path.cwd()), + effort=ReasoningEffort.medium, + model="gpt-5", + output_schema=OUTPUT_SCHEMA, + personality=Personality.pragmatic, + sandbox_policy=SANDBOX_POLICY, + summary=SUMMARY, + ) + result = turn.run() + + print("Status:", result.status) + print("Text:", result.text) + print("Usage:", result.usage) diff --git a/sdk/python/examples/13_model_select_and_turn_params/async.py b/sdk/python/examples/13_model_select_and_turn_params/async.py new file mode 100644 index 00000000000..521193b8e1f --- /dev/null +++ b/sdk/python/examples/13_model_select_and_turn_params/async.py @@ -0,0 +1,117 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import ( + AskForApproval, + AsyncCodex, + Personality, + ReasoningEffort, + ReasoningSummary, + SandboxPolicy, + TextInput, +) + +REASONING_RANK = { + "none": 0, + "minimal": 1, + "low": 2, + "medium": 3, + "high": 4, + "xhigh": 5, +} + + +def _pick_highest_model(models): + visible = [m for m in models if not m.hidden] or models + known_names = {m.id for m in visible} | {m.model for m in visible} + top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)] + pool = top_candidates or visible + return max(pool, key=lambda m: (m.model, m.id)) + + +def _pick_highest_turn_effort(model) -> ReasoningEffort: + if not model.supported_reasoning_efforts: + return ReasoningEffort.medium + + best = max( + model.supported_reasoning_efforts, + key=lambda option: REASONING_RANK.get(option.reasoning_effort.value, -1), + ) + return ReasoningEffort(best.reasoning_effort.value) + + +OUTPUT_SCHEMA = { + "type": "object", + "properties": { + "summary": {"type": "string"}, + "actions": { + "type": "array", + "items": {"type": "string"}, + }, + }, + "required": ["summary", "actions"], + "additionalProperties": False, +} + +SANDBOX_POLICY = SandboxPolicy.model_validate( + { + "type": "readOnly", + "access": {"type": "fullAccess"}, + } +) +APPROVAL_POLICY = AskForApproval.model_validate("never") + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + models = await codex.models(include_hidden=True) + selected_model = _pick_highest_model(models.data) + selected_effort = _pick_highest_turn_effort(selected_model) + + print("selected.model:", selected_model.model) + print("selected.effort:", selected_effort.value) + + thread = await codex.thread_start( + model=selected_model.model, + config={"model_reasoning_effort": selected_effort.value}, + ) + + first_turn = await thread.turn( + TextInput("Give one short sentence about reliable production releases."), + model=selected_model.model, + effort=selected_effort, + ) + first = await first_turn.run() + + print("agent.message:", first.text) + print("usage:", first.usage) + + second_turn = await thread.turn( + TextInput("Return JSON for a safe feature-flag rollout plan."), + approval_policy=APPROVAL_POLICY, + cwd=str(Path.cwd()), + effort=selected_effort, + model=selected_model.model, + output_schema=OUTPUT_SCHEMA, + personality=Personality.pragmatic, + sandbox_policy=SANDBOX_POLICY, + summary=ReasoningSummary.model_validate("concise"), + ) + second = await second_turn.run() + + print("agent.message.params:", second.text) + print("usage.params:", second.usage) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/13_model_select_and_turn_params/sync.py b/sdk/python/examples/13_model_select_and_turn_params/sync.py new file mode 100644 index 00000000000..4fb680e7d18 --- /dev/null +++ b/sdk/python/examples/13_model_select_and_turn_params/sync.py @@ -0,0 +1,108 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ensure_local_sdk_src, runtime_config + +ensure_local_sdk_src() + +from codex_app_server import ( + AskForApproval, + Codex, + Personality, + ReasoningEffort, + ReasoningSummary, + SandboxPolicy, + TextInput, +) + +REASONING_RANK = { + "none": 0, + "minimal": 1, + "low": 2, + "medium": 3, + "high": 4, + "xhigh": 5, +} + + +def _pick_highest_model(models): + visible = [m for m in models if not m.hidden] or models + known_names = {m.id for m in visible} | {m.model for m in visible} + top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)] + pool = top_candidates or visible + return max(pool, key=lambda m: (m.model, m.id)) + + +def _pick_highest_turn_effort(model) -> ReasoningEffort: + if not model.supported_reasoning_efforts: + return ReasoningEffort.medium + + best = max( + model.supported_reasoning_efforts, + key=lambda option: REASONING_RANK.get(option.reasoning_effort.value, -1), + ) + return ReasoningEffort(best.reasoning_effort.value) + + +OUTPUT_SCHEMA = { + "type": "object", + "properties": { + "summary": {"type": "string"}, + "actions": { + "type": "array", + "items": {"type": "string"}, + }, + }, + "required": ["summary", "actions"], + "additionalProperties": False, +} + +SANDBOX_POLICY = SandboxPolicy.model_validate( + { + "type": "readOnly", + "access": {"type": "fullAccess"}, + } +) +APPROVAL_POLICY = AskForApproval.model_validate("never") + + +with Codex(config=runtime_config()) as codex: + models = codex.models(include_hidden=True) + selected_model = _pick_highest_model(models.data) + selected_effort = _pick_highest_turn_effort(selected_model) + + print("selected.model:", selected_model.model) + print("selected.effort:", selected_effort.value) + + thread = codex.thread_start( + model=selected_model.model, + config={"model_reasoning_effort": selected_effort.value}, + ) + + first = thread.turn( + TextInput("Give one short sentence about reliable production releases."), + model=selected_model.model, + effort=selected_effort, + ).run() + + print("agent.message:", first.text) + print("usage:", first.usage) + + second = thread.turn( + TextInput("Return JSON for a safe feature-flag rollout plan."), + approval_policy=APPROVAL_POLICY, + cwd=str(Path.cwd()), + effort=selected_effort, + model=selected_model.model, + output_schema=OUTPUT_SCHEMA, + personality=Personality.pragmatic, + sandbox_policy=SANDBOX_POLICY, + summary=ReasoningSummary.model_validate("concise"), + ).run() + + print("agent.message.params:", second.text) + print("usage.params:", second.usage) diff --git a/sdk/python/examples/README.md b/sdk/python/examples/README.md new file mode 100644 index 00000000000..ed687a35ada --- /dev/null +++ b/sdk/python/examples/README.md @@ -0,0 +1,78 @@ +# Python SDK Examples + +Each example folder contains runnable versions: + +- `sync.py` (public sync surface: `Codex`) +- `async.py` (public async surface: `AsyncCodex`) + +All examples intentionally use only public SDK exports from `codex_app_server`. + +## Prerequisites + +- Python `>=3.10` +- Install SDK dependencies for the same Python interpreter you will use to run examples + +Recommended setup (from `sdk/python`): + +```bash +python -m venv .venv +source .venv/bin/activate +python -m pip install -U pip +python -m pip install -e . +``` + +When running examples from this repo checkout, the SDK source uses the local +tree and does not bundle a runtime binary. The helper in `examples/_bootstrap.py` +therefore resolves `codex` from: + +- `CODEX_PYTHON_SDK_CODEX_BIN`, if set +- otherwise `codex` on `PATH` + +## Run examples + +From `sdk/python`: + +```bash +python examples//sync.py +python examples//async.py +``` + +The examples bootstrap local imports from `sdk/python/src` automatically, so no +extra package build step is required beyond installing dependencies for your +active interpreter and making a `codex` binary available. + +## Recommended first run + +```bash +python examples/01_quickstart_constructor/sync.py +python examples/01_quickstart_constructor/async.py +``` + +## Index + +- `01_quickstart_constructor/` + - first run / sanity check +- `02_turn_run/` + - inspect full turn output fields +- `03_turn_stream_events/` + - stream and print raw notifications +- `04_models_and_metadata/` + - read server metadata and model list +- `05_existing_thread/` + - resume a real existing thread (created in-script) +- `06_thread_lifecycle_and_controls/` + - thread lifecycle + control calls +- `07_image_and_text/` + - remote image URL + text multimodal turn +- `08_local_image_and_text/` + - local image + text multimodal turn using bundled sample image +- `09_async_parity/` + - parity-style sync flow (see async parity in other examples) +- `10_error_handling_and_retry/` + - overload retry pattern + typed error handling structure +- `11_cli_mini_app/` + - interactive chat loop +- `12_turn_params_kitchen_sink/` + - one turn using most optional `turn(...)` params (sync + async) +- `13_model_select_and_turn_params/` + - list models, pick highest model + highest supported reasoning effort, run turns, print message and usage diff --git a/sdk/python/examples/_bootstrap.py b/sdk/python/examples/_bootstrap.py new file mode 100644 index 00000000000..44b89b4f789 --- /dev/null +++ b/sdk/python/examples/_bootstrap.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import importlib.util +import os +import shutil +import sys +from pathlib import Path + + +def _ensure_runtime_dependencies(sdk_python_dir: Path) -> None: + if importlib.util.find_spec("pydantic") is not None: + return + + python = sys.executable + raise RuntimeError( + "Missing required dependency: pydantic.\n" + f"Interpreter: {python}\n" + "Install dependencies with the same interpreter used to run this example:\n" + f" {python} -m pip install -e {sdk_python_dir}\n" + "If you installed with `pip` from another Python, reinstall using the command above." + ) + + +def ensure_local_sdk_src() -> Path: + """Add sdk/python/src to sys.path so examples run without installing the package.""" + sdk_python_dir = Path(__file__).resolve().parents[1] + src_dir = sdk_python_dir / "src" + package_dir = src_dir / "codex_app_server" + if not package_dir.exists(): + raise RuntimeError(f"Could not locate local SDK package at {package_dir}") + + _ensure_runtime_dependencies(sdk_python_dir) + + src_str = str(src_dir) + if src_str not in sys.path: + sys.path.insert(0, src_str) + return src_dir + + +def runtime_config(): + """Return an example-friendly AppServerConfig for local repo usage.""" + from codex_app_server import AppServerConfig + + codex_bin = os.environ.get("CODEX_PYTHON_SDK_CODEX_BIN") or shutil.which("codex") + if codex_bin is None: + raise RuntimeError( + "Examples require a Codex CLI binary when run from this repo checkout.\n" + "Set CODEX_PYTHON_SDK_CODEX_BIN=/absolute/path/to/codex, or ensure `codex` is on PATH." + ) + return AppServerConfig(codex_bin=codex_bin) diff --git a/sdk/python/examples/assets/sample_scene.png b/sdk/python/examples/assets/sample_scene.png new file mode 100644 index 0000000000000000000000000000000000000000..e4efdd429dd397a00a594112e0d6af0ec0e8e2f6 GIT binary patch literal 3724 zcmZ`+3piBi8~@IkF$SXsEi>y$Xv0>RozZp?#w~26A=_oirGL6?D>k+?kyE0UN@Yc* z7$xo6rPP+KOvkF#vM!M*mDy)=D>1v}QW*dDoI$tUXU_ASIq&zqzxVgvzH{DhCEgyo z+8=5ogmk?;-Fy+k352kNY#b!#4=vb?(1fL4Zq9zu_n$Q_Pj6_LT>j^9;$~g*oU^8; zNuQm6@Obj$c@N#rG4L%ulJiD(4i-MgR(0uacaG&>5ZUP8{hF8Z-M#H2)p1T69gGS> z3i^6t+AkdFOs@$=*+q}FkeI;WB3v+4LnI%aT|_@-r=k(*5*C_n5RACNsN*B@`aO{b z^80kMf`k0pj0MQ8?WJP@IF{I>8*+x?O}MM-fAuih3$xp|gKbNDBzW zfD7f~F|BgII8yMyX&mr~>YNi`X$cTpfTHvjPTe&q!cX<0-9(0Ctd5ICmgV(TtB8qf zkTjz1_V8`q_J*aVB)XD+!y1oi*vTQ4ejVNC=MVl3bUV{RFjny4`Wa=GTt%;@5HYwv z$UWv%@HPxT=M&!=3QC-AoinalKwx*4$NMv*%RuU7r`I{TbTBc_D(g+$n!tqIdY3DH`Au81^l6ypL zUy89LLAr*)XkLE!WnJ$^c?QPLPeog!SB@=Q`7wMCwE;6!7-I77n;fy-gOEXXHH%sz5hxZ;MUo__G%0n3DJd3vwx9nw3n|H5vG+Ue`7h?W%ov ziWRpJV{wBq0PW39VdWVIRwK9UVi9A0M`WIHkoti$_yFf*%RdAw_##W}(ZFU__S~ON zG!V8nAel)n#)m#*Wn0T8H!60TID|>3F=~%g9QfqUDIEcUbRv$UB^V{pRSHtNjRizF zDaS4ku&%vKIr-`JW7ztIq2(IU*&aq?{&hBtt%DhG4>*)Y{i7RQna?LK z`EA)J16$jyX~jeGVAVUvUZ1G6))XNcNLq$k9EhuLW}9s=I72DJaK%(asul(RqB1TTRMXwaE7w- z$h08Qhd;`34w-G-6+vq^+mX#J$ME3Ii`}FrI#<+V)o9U0W$!nqR-S$qz7DZWS4$tZ zmgin=sU~+zBW}8+kY^>m-yHax$+MVMa%w(XChTJ3oU$c_9E0g)Cx@AMOieRCgZ;lc z`wG`jTobybk5f%aLFS7JLE6GDuO-%G&l=MoQ$2eo3cGY)>RC=%RXE#&$xA#M$Zzwz z-=+5@GrHWK&kmhXZ(mX7Vx>loM~tev8D-oVqi(3xJJr==|D7e=^D+Pch<7vWM@&@YO}^y|wL zbw%Rk9&bT7DR;2e=4MC!s;RYh8fjW0YU`a=->*{8Q-2)eCXC5KFc+iRkJ6vlpB27h@WVIIer(C^S#e?e&5S=hjTq9hGs~#Z`ip8yJ!jO zlIe(;m%&k4J|Q%N{p8oLg@zWp(A1J!5G7Ar;UZg1W#uZaC=FQKbY`!mgUTmSa%Y#z zzGMlL*Xf?adm=e0)Q)~S#w80ZZYnn_p=qpl0er>jndZMq6-$^?;G{<$8 zs=^@iknes#H!Wie>U4zrARW8RmQE+HI#mnW&y0MvU)Ap;+R@&Wm7-EUe`uU?thZgH zlAL^HJ7xIg`M89sBmXQ>Eqxs1sNxYa=y!LE$vTiTxbysOB7-|+O7t) zM5t<>Ndjw%_kN&=i%T<`q@|~DDO^W~h&m4Kw3Ll}mqT34h6?E67g>d(-R5R^Ku0fK zD)2hNSnz`z6*r9)gv^h~8lLBBrEg9q04#f&!}dA!Q!f5hkQ)S^LWNXD?)fhu$Y;;o z%-2i~_r;{rPjn!F{t7~a$~@d4tJnIEb7Y=-k&)ElFY`&j0yt0!gj?rFov6Z$*R5LBG#2>InBVmaWz|zrZQdbrDLRAdhpaORG2%%W;04ox*+?kYD zK#cF5wn{#@8R}4@SVHWl+UAW|oJmJc+vd`m)5$*fk2;GiZR5u^-HA?fQno07&@3xv z8!9Yq=%s}kP4c~NP`bbC@^?2Ql$yE7y1g1MrSMdMM*H$vS*p=xWUovL&Z}7(0lByTsl-i-tr-STAJWKYhyE!p{^FEJ)coBy6)qxBYu$Mj5pgHJ+#Ap0FYqu!M;quUc+n>wwt zB@n%{6ME}_&U=<1f~^PAUbAU+nDL%>YBoG|<0blHjw9XA$7B9(C5Agqp_5EE@Ws`G z+BARec(=&Bh@LMnirbqdQ)^CE_Ixqneaa^)-=hPR@13ZmJ)x6cWPjv!mA#$x66Qc( z=o?{qzR79gf0kfaI|lPZ?qzEYdd`6Q_fN;^DF6Z$JN@oMH6$^GlbN#^WNrdD%|jldK2^a2A6ygb%D zh+vIPJz`r-uHsPkl&=!h_MlK7y3kG#hM42OreF@UPNP!rkbywV%t+X&fAt(F~%a bool:\n", + " return (path / 'pyproject.toml').exists() and (path / 'src' / 'codex_app_server').exists()\n", + "\n", + "\n", + "def _iter_home_fallback_candidates(home: Path):\n", + " # bounded depth scan under home to support launching notebooks from unrelated cwd values\n", + " patterns = ('sdk/python', '*/sdk/python', '*/*/sdk/python', '*/*/*/sdk/python')\n", + " for pattern in patterns:\n", + " yield from home.glob(pattern)\n", + "\n", + "\n", + "def _find_sdk_python_dir(start: Path) -> Path | None:\n", + " checked = set()\n", + "\n", + " def _consider(candidate: Path) -> Path | None:\n", + " resolved = candidate.resolve()\n", + " if resolved in checked:\n", + " return None\n", + " checked.add(resolved)\n", + " if _is_sdk_python_dir(resolved):\n", + " return resolved\n", + " return None\n", + "\n", + " for candidate in [start, *start.parents]:\n", + " found = _consider(candidate)\n", + " if found is not None:\n", + " return found\n", + "\n", + " for candidate in [start / 'sdk' / 'python', *(parent / 'sdk' / 'python' for parent in start.parents)]:\n", + " found = _consider(candidate)\n", + " if found is not None:\n", + " return found\n", + "\n", + " env_dir = os.environ.get('CODEX_PYTHON_SDK_DIR')\n", + " if env_dir:\n", + " found = _consider(Path(env_dir).expanduser())\n", + " if found is not None:\n", + " return found\n", + "\n", + " for entry in sys.path:\n", + " if not entry:\n", + " continue\n", + " entry_path = Path(entry).expanduser()\n", + " for candidate in (entry_path, entry_path / 'sdk' / 'python'):\n", + " found = _consider(candidate)\n", + " if found is not None:\n", + " return found\n", + "\n", + " home = Path.home()\n", + " for candidate in _iter_home_fallback_candidates(home):\n", + " found = _consider(candidate)\n", + " if found is not None:\n", + " return found\n", + "\n", + " return None\n", + "\n", + "\n", + "repo_python_dir = _find_sdk_python_dir(Path.cwd())\n", + "if repo_python_dir is None:\n", + " raise RuntimeError('Could not locate sdk/python. Set CODEX_PYTHON_SDK_DIR to your sdk/python path.')\n", + "\n", + "src_dir = repo_python_dir / 'src'\n", + "if str(src_dir) not in sys.path:\n", + " sys.path.insert(0, str(src_dir))\n", + "\n", + "# Force fresh imports after SDK upgrades in the same notebook kernel.\n", + "for module_name in list(sys.modules):\n", + " if module_name == 'codex_app_server' or module_name.startswith('codex_app_server.'):\n", + " sys.modules.pop(module_name, None)\n", + "\n", + "print('Kernel:', sys.executable)\n", + "print('SDK source:', src_dir)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 2: imports (public only)\n", + "from codex_app_server import (\n", + " AsyncCodex,\n", + " Codex,\n", + " ImageInput,\n", + " LocalImageInput,\n", + " TextInput,\n", + " retry_on_overload,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 3: simple sync conversation\n", + "with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " turn = thread.turn(TextInput('Explain gradient descent in 3 bullets.'))\n", + " result = turn.run()\n", + "\n", + " print('server:', codex.metadata)\n", + " print('status:', result.status)\n", + " print(result.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 4: multi-turn continuity in same thread\n", + "with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + "\n", + " first = thread.turn(TextInput('Give a short summary of transformers.')).run()\n", + " second = thread.turn(TextInput('Now explain that to a high-school student.')).run()\n", + "\n", + " print('first status:', first.status)\n", + " print('second status:', second.status)\n", + " print('second text:', second.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 5: full thread lifecycle and branching (sync)\n", + "with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " first = thread.turn(TextInput('One sentence about structured planning.')).run()\n", + " second = thread.turn(TextInput('Now restate it for a junior engineer.')).run()\n", + "\n", + " reopened = codex.thread_resume(thread.id)\n", + " listing_active = codex.thread_list(limit=20, archived=False)\n", + " reading = reopened.read(include_turns=True)\n", + "\n", + " _ = reopened.set_name('sdk-lifecycle-demo')\n", + " _ = codex.thread_archive(reopened.id)\n", + " listing_archived = codex.thread_list(limit=20, archived=True)\n", + " unarchived = codex.thread_unarchive(reopened.id)\n", + "\n", + " resumed_info = 'n/a'\n", + " try:\n", + " resumed = codex.thread_resume(\n", + " unarchived.id,\n", + " model='gpt-5',\n", + " config={'model_reasoning_effort': 'high'},\n", + " )\n", + " resumed_result = resumed.turn(TextInput('Continue in one short sentence.')).run()\n", + " resumed_info = f'{resumed_result.turn_id} {resumed_result.status}'\n", + " except Exception as e:\n", + " resumed_info = f'skipped({type(e).__name__})'\n", + "\n", + " forked_info = 'n/a'\n", + " try:\n", + " forked = codex.thread_fork(unarchived.id, model='gpt-5')\n", + " forked_result = forked.turn(TextInput('Take a different angle in one short sentence.')).run()\n", + " forked_info = f'{forked_result.turn_id} {forked_result.status}'\n", + " except Exception as e:\n", + " forked_info = f'skipped({type(e).__name__})'\n", + "\n", + " compact_info = 'sent'\n", + " try:\n", + " _ = unarchived.compact()\n", + " except Exception as e:\n", + " compact_info = f'skipped({type(e).__name__})'\n", + "\n", + " print('Lifecycle OK:', thread.id)\n", + " print('first:', first.turn_id, first.status)\n", + " print('second:', second.turn_id, second.status)\n", + " print('read.turns:', len(reading.thread.turns or []))\n", + " print('list.active:', len(listing_active.data))\n", + " print('list.archived:', len(listing_archived.data))\n", + " print('resumed:', resumed_info)\n", + " print('forked:', forked_info)\n", + " print('compact:', compact_info)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 5b: one turn with most optional turn params\n", + "from pathlib import Path\n", + "from codex_app_server import (\n", + " AskForApproval,\n", + " Personality,\n", + " ReasoningEffort,\n", + " ReasoningSummary,\n", + " SandboxPolicy,\n", + ")\n", + "\n", + "output_schema = {\n", + " 'type': 'object',\n", + " 'properties': {\n", + " 'summary': {'type': 'string'},\n", + " 'actions': {'type': 'array', 'items': {'type': 'string'}},\n", + " },\n", + " 'required': ['summary', 'actions'],\n", + " 'additionalProperties': False,\n", + "}\n", + "\n", + "sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n", + "summary = ReasoningSummary.model_validate('concise')\n", + "\n", + "with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " turn = thread.turn(\n", + " TextInput('Propose a safe production feature-flag rollout. Return JSON matching the schema.'),\n", + " approval_policy=AskForApproval.never,\n", + " cwd=str(Path.cwd()),\n", + " effort=ReasoningEffort.medium,\n", + " model='gpt-5',\n", + " output_schema=output_schema,\n", + " personality=Personality.pragmatic,\n", + " sandbox_policy=sandbox_policy,\n", + " summary=summary,\n", + " )\n", + " result = turn.run()\n", + "\n", + " print('status:', result.status)\n", + " print(result.text)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 5c: choose highest model + highest supported reasoning, then run turns\n", + "from pathlib import Path\n", + "from codex_app_server import (\n", + " AskForApproval,\n", + " Personality,\n", + " ReasoningEffort,\n", + " ReasoningSummary,\n", + " SandboxPolicy,\n", + ")\n", + "\n", + "reasoning_rank = {\n", + " 'none': 0,\n", + " 'minimal': 1,\n", + " 'low': 2,\n", + " 'medium': 3,\n", + " 'high': 4,\n", + " 'xhigh': 5,\n", + "}\n", + "\n", + "\n", + "def pick_highest_model(models):\n", + " visible = [m for m in models if not m.hidden] or models\n", + " known_names = {m.id for m in visible} | {m.model for m in visible}\n", + " top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)]\n", + " pool = top_candidates or visible\n", + " return max(pool, key=lambda m: (m.model, m.id))\n", + "\n", + "\n", + "def pick_highest_turn_effort(model) -> ReasoningEffort:\n", + " if not model.supported_reasoning_efforts:\n", + " return ReasoningEffort.medium\n", + " best = max(model.supported_reasoning_efforts, key=lambda opt: reasoning_rank.get(opt.reasoning_effort.value, -1))\n", + " return ReasoningEffort(best.reasoning_effort.value)\n", + "\n", + "\n", + "output_schema = {\n", + " 'type': 'object',\n", + " 'properties': {\n", + " 'summary': {'type': 'string'},\n", + " 'actions': {'type': 'array', 'items': {'type': 'string'}},\n", + " },\n", + " 'required': ['summary', 'actions'],\n", + " 'additionalProperties': False,\n", + "}\n", + "sandbox_policy = SandboxPolicy.model_validate({'type': 'readOnly', 'access': {'type': 'fullAccess'}})\n", + "\n", + "with Codex() as codex:\n", + " models = codex.models(include_hidden=True)\n", + " selected_model = pick_highest_model(models.data)\n", + " selected_effort = pick_highest_turn_effort(selected_model)\n", + "\n", + " print('selected.model:', selected_model.model)\n", + " print('selected.effort:', selected_effort.value)\n", + "\n", + " thread = codex.thread_start(model=selected_model.model, config={'model_reasoning_effort': selected_effort.value})\n", + "\n", + " first = thread.turn(\n", + " TextInput('Give one short sentence about reliable production releases.'),\n", + " model=selected_model.model,\n", + " effort=selected_effort,\n", + " ).run()\n", + " print('agent.message:', first.text)\n", + " print('usage:', first.usage)\n", + "\n", + " second = thread.turn(\n", + " TextInput('Return JSON for a safe feature-flag rollout plan.'),\n", + " approval_policy=AskForApproval.never,\n", + " cwd=str(Path.cwd()),\n", + " effort=selected_effort,\n", + " model=selected_model.model,\n", + " output_schema=output_schema,\n", + " personality=Personality.pragmatic,\n", + " sandbox_policy=sandbox_policy,\n", + " summary=ReasoningSummary.model_validate('concise'),\n", + " ).run()\n", + " print('agent.message.params:', second.text)\n", + " print('usage.params:', second.usage)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 6: multimodal with remote image\n", + "remote_image_url = 'https://raw.githubusercontent.com/github/explore/main/topics/python/python.png'\n", + "\n", + "with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " result = thread.turn([\n", + " TextInput('What do you see in this image? 3 bullets.'),\n", + " ImageInput(remote_image_url),\n", + " ]).run()\n", + "\n", + " print('status:', result.status)\n", + " print(result.text)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 7: multimodal with local image (bundled asset)\n", + "local_image_path = repo_python_dir / 'examples' / 'assets' / 'sample_scene.png'\n", + "if not local_image_path.exists():\n", + " raise FileNotFoundError(f'Missing bundled image: {local_image_path}')\n", + "\n", + "with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " result = thread.turn([\n", + " TextInput('Describe this local image in 2 bullets.'),\n", + " LocalImageInput(str(local_image_path.resolve())),\n", + " ]).run()\n", + "\n", + " print('status:', result.status)\n", + " print(result.text)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 8: retry-on-overload pattern\n", + "with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + "\n", + " result = retry_on_overload(\n", + " lambda: thread.turn(TextInput('List 5 failure modes in distributed systems.')).run(),\n", + " max_attempts=3,\n", + " initial_delay_s=0.25,\n", + " max_delay_s=2.0,\n", + " )\n", + "\n", + " print('status:', result.status)\n", + " print(result.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 9: full thread lifecycle and branching (async)\n", + "import asyncio\n", + "\n", + "\n", + "async def async_lifecycle_demo():\n", + " async with AsyncCodex() as codex:\n", + " thread = await codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " first = await (await thread.turn(TextInput('One sentence about structured planning.'))).run()\n", + " second = await (await thread.turn(TextInput('Now restate it for a junior engineer.'))).run()\n", + "\n", + " reopened = await codex.thread_resume(thread.id)\n", + " listing_active = await codex.thread_list(limit=20, archived=False)\n", + " reading = await reopened.read(include_turns=True)\n", + "\n", + " _ = await reopened.set_name('sdk-lifecycle-demo')\n", + " _ = await codex.thread_archive(reopened.id)\n", + " listing_archived = await codex.thread_list(limit=20, archived=True)\n", + " unarchived = await codex.thread_unarchive(reopened.id)\n", + "\n", + " resumed_info = 'n/a'\n", + " try:\n", + " resumed = await codex.thread_resume(\n", + " unarchived.id,\n", + " model='gpt-5',\n", + " config={'model_reasoning_effort': 'high'},\n", + " )\n", + " resumed_result = await (await resumed.turn(TextInput('Continue in one short sentence.'))).run()\n", + " resumed_info = f'{resumed_result.turn_id} {resumed_result.status}'\n", + " except Exception as e:\n", + " resumed_info = f'skipped({type(e).__name__})'\n", + "\n", + " forked_info = 'n/a'\n", + " try:\n", + " forked = await codex.thread_fork(unarchived.id, model='gpt-5')\n", + " forked_result = await (await forked.turn(TextInput('Take a different angle in one short sentence.'))).run()\n", + " forked_info = f'{forked_result.turn_id} {forked_result.status}'\n", + " except Exception as e:\n", + " forked_info = f'skipped({type(e).__name__})'\n", + "\n", + " compact_info = 'sent'\n", + " try:\n", + " _ = await unarchived.compact()\n", + " except Exception as e:\n", + " compact_info = f'skipped({type(e).__name__})'\n", + "\n", + " print('Lifecycle OK:', thread.id)\n", + " print('first:', first.turn_id, first.status)\n", + " print('second:', second.turn_id, second.status)\n", + " print('read.turns:', len(reading.thread.turns or []))\n", + " print('list.active:', len(listing_active.data))\n", + " print('list.archived:', len(listing_archived.data))\n", + " print('resumed:', resumed_info)\n", + " print('forked:', forked_info)\n", + " print('compact:', compact_info)\n", + "\n", + "\n", + "await async_lifecycle_demo()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 10: async stream + steer + interrupt (best effort)\n", + "import asyncio\n", + "\n", + "\n", + "async def async_stream_demo():\n", + " async with AsyncCodex() as codex:\n", + " thread = await codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " turn = await thread.turn(TextInput('Count from 1 to 200 with commas, then one summary sentence.'))\n", + "\n", + " try:\n", + " _ = await turn.steer(TextInput('Keep it brief and stop after 20 numbers.'))\n", + " print('steer: sent')\n", + " except Exception as e:\n", + " print('steer: skipped', type(e).__name__)\n", + "\n", + " try:\n", + " _ = await turn.interrupt()\n", + " print('interrupt: sent')\n", + " except Exception as e:\n", + " print('interrupt: skipped', type(e).__name__)\n", + "\n", + " event_count = 0\n", + " async for event in turn.stream():\n", + " event_count += 1\n", + " print(event.method, event.payload)\n", + "\n", + " print('events.count:', event_count)\n", + "\n", + "\n", + "await async_stream_demo()\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10+" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/sdk/python/src/codex_app_server/__init__.py b/sdk/python/src/codex_app_server/__init__.py index aff63176b9f..4e2af18fc4e 100644 --- a/sdk/python/src/codex_app_server/__init__.py +++ b/sdk/python/src/codex_app_server/__init__.py @@ -1,10 +1,115 @@ +from .async_client import AsyncAppServerClient from .client import AppServerClient, AppServerConfig -from .errors import AppServerError, JsonRpcError, TransportClosedError +from .errors import ( + AppServerError, + AppServerRpcError, + InternalRpcError, + InvalidParamsError, + InvalidRequestError, + JsonRpcError, + MethodNotFoundError, + ParseError, + RetryLimitExceededError, + ServerBusyError, + TransportClosedError, + is_retryable_error, +) +from .generated.v2_types import ( + ThreadItem, + ThreadTokenUsageUpdatedNotification, + TurnCompletedNotificationPayload, +) +from .public_api import ( + AsyncCodex, + AsyncThread, + AsyncTurn, + Codex, + ImageInput, + InitializeResult, + Input, + InputItem, + LocalImageInput, + MentionInput, + SkillInput, + TextInput, + Thread, + Turn, + TurnResult, +) +from .public_types import ( + AskForApproval, + Personality, + PlanType, + ReasoningEffort, + ReasoningSummary, + SandboxMode, + SandboxPolicy, + ServiceTier, + ThreadForkParams, + ThreadListParams, + ThreadResumeParams, + ThreadSortKey, + ThreadSourceKind, + ThreadStartParams, + TurnStartParams, + TurnStatus, + TurnSteerParams, +) +from .retry import retry_on_overload + +__version__ = "0.2.0" __all__ = [ + "__version__", "AppServerClient", + "AsyncAppServerClient", "AppServerConfig", + "Codex", + "AsyncCodex", + "Thread", + "AsyncThread", + "Turn", + "AsyncTurn", + "TurnResult", + "InitializeResult", + "Input", + "InputItem", + "TextInput", + "ImageInput", + "LocalImageInput", + "SkillInput", + "MentionInput", + "ThreadItem", + "ThreadTokenUsageUpdatedNotification", + "TurnCompletedNotificationPayload", + "AskForApproval", + "Personality", + "PlanType", + "ReasoningEffort", + "ReasoningSummary", + "SandboxMode", + "SandboxPolicy", + "ServiceTier", + "ThreadStartParams", + "ThreadResumeParams", + "ThreadListParams", + "ThreadSortKey", + "ThreadSourceKind", + "ThreadForkParams", + "TurnStatus", + "TurnStartParams", + "TurnSteerParams", + "retry_on_overload", "AppServerError", - "JsonRpcError", "TransportClosedError", + "JsonRpcError", + "AppServerRpcError", + "ParseError", + "InvalidRequestError", + "MethodNotFoundError", + "InvalidParamsError", + "InternalRpcError", + "ServerBusyError", + "RetryLimitExceededError", + "is_retryable_error", ] diff --git a/sdk/python/src/codex_app_server/async_client.py b/sdk/python/src/codex_app_server/async_client.py new file mode 100644 index 00000000000..6ca0c42a78f --- /dev/null +++ b/sdk/python/src/codex_app_server/async_client.py @@ -0,0 +1,208 @@ +from __future__ import annotations + +import asyncio +from collections.abc import Iterator +from typing import AsyncIterator, Callable, Iterable, ParamSpec, TypeVar + +from pydantic import BaseModel + +from .client import AppServerClient, AppServerConfig +from .generated.v2_all import ( + AgentMessageDeltaNotification, + ModelListResponse, + ThreadArchiveResponse, + ThreadCompactStartResponse, + ThreadForkParams as V2ThreadForkParams, + ThreadForkResponse, + ThreadListParams as V2ThreadListParams, + ThreadListResponse, + ThreadReadResponse, + ThreadResumeParams as V2ThreadResumeParams, + ThreadResumeResponse, + ThreadSetNameResponse, + ThreadStartParams as V2ThreadStartParams, + ThreadStartResponse, + ThreadUnarchiveResponse, + TurnCompletedNotification, + TurnInterruptResponse, + TurnStartParams as V2TurnStartParams, + TurnStartResponse, + TurnSteerResponse, +) +from .models import InitializeResponse, JsonObject, Notification + +ModelT = TypeVar("ModelT", bound=BaseModel) +ParamsT = ParamSpec("ParamsT") +ReturnT = TypeVar("ReturnT") + + +class AsyncAppServerClient: + """Async wrapper around AppServerClient using thread offloading.""" + + def __init__(self, config: AppServerConfig | None = None) -> None: + self._sync = AppServerClient(config=config) + # Single stdio transport cannot be read safely from multiple threads. + self._transport_lock = asyncio.Lock() + + async def __aenter__(self) -> "AsyncAppServerClient": + await self.start() + return self + + async def __aexit__(self, _exc_type, _exc, _tb) -> None: + await self.close() + + async def _call_sync( + self, + fn: Callable[ParamsT, ReturnT], + /, + *args: ParamsT.args, + **kwargs: ParamsT.kwargs, + ) -> ReturnT: + async with self._transport_lock: + return await asyncio.to_thread(fn, *args, **kwargs) + + @staticmethod + def _next_from_iterator( + iterator: Iterator[AgentMessageDeltaNotification], + ) -> tuple[bool, AgentMessageDeltaNotification | None]: + try: + return True, next(iterator) + except StopIteration: + return False, None + + async def start(self) -> None: + await self._call_sync(self._sync.start) + + async def close(self) -> None: + await self._call_sync(self._sync.close) + + async def initialize(self) -> InitializeResponse: + return await self._call_sync(self._sync.initialize) + + def acquire_turn_consumer(self, turn_id: str) -> None: + self._sync.acquire_turn_consumer(turn_id) + + def release_turn_consumer(self, turn_id: str) -> None: + self._sync.release_turn_consumer(turn_id) + + async def request( + self, + method: str, + params: JsonObject | None, + *, + response_model: type[ModelT], + ) -> ModelT: + return await self._call_sync( + self._sync.request, + method, + params, + response_model=response_model, + ) + + async def thread_start(self, params: V2ThreadStartParams | JsonObject | None = None) -> ThreadStartResponse: + return await self._call_sync(self._sync.thread_start, params) + + async def thread_resume( + self, + thread_id: str, + params: V2ThreadResumeParams | JsonObject | None = None, + ) -> ThreadResumeResponse: + return await self._call_sync(self._sync.thread_resume, thread_id, params) + + async def thread_list(self, params: V2ThreadListParams | JsonObject | None = None) -> ThreadListResponse: + return await self._call_sync(self._sync.thread_list, params) + + async def thread_read(self, thread_id: str, include_turns: bool = False) -> ThreadReadResponse: + return await self._call_sync(self._sync.thread_read, thread_id, include_turns) + + async def thread_fork( + self, + thread_id: str, + params: V2ThreadForkParams | JsonObject | None = None, + ) -> ThreadForkResponse: + return await self._call_sync(self._sync.thread_fork, thread_id, params) + + async def thread_archive(self, thread_id: str) -> ThreadArchiveResponse: + return await self._call_sync(self._sync.thread_archive, thread_id) + + async def thread_unarchive(self, thread_id: str) -> ThreadUnarchiveResponse: + return await self._call_sync(self._sync.thread_unarchive, thread_id) + + async def thread_set_name(self, thread_id: str, name: str) -> ThreadSetNameResponse: + return await self._call_sync(self._sync.thread_set_name, thread_id, name) + + async def thread_compact(self, thread_id: str) -> ThreadCompactStartResponse: + return await self._call_sync(self._sync.thread_compact, thread_id) + + async def turn_start( + self, + thread_id: str, + input_items: list[JsonObject] | JsonObject | str, + params: V2TurnStartParams | JsonObject | None = None, + ) -> TurnStartResponse: + return await self._call_sync(self._sync.turn_start, thread_id, input_items, params) + + async def turn_interrupt(self, thread_id: str, turn_id: str) -> TurnInterruptResponse: + return await self._call_sync(self._sync.turn_interrupt, thread_id, turn_id) + + async def turn_steer( + self, + thread_id: str, + expected_turn_id: str, + input_items: list[JsonObject] | JsonObject | str, + ) -> TurnSteerResponse: + return await self._call_sync( + self._sync.turn_steer, + thread_id, + expected_turn_id, + input_items, + ) + + async def model_list(self, include_hidden: bool = False) -> ModelListResponse: + return await self._call_sync(self._sync.model_list, include_hidden) + + async def request_with_retry_on_overload( + self, + method: str, + params: JsonObject | None, + *, + response_model: type[ModelT], + max_attempts: int = 3, + initial_delay_s: float = 0.25, + max_delay_s: float = 2.0, + ) -> ModelT: + return await self._call_sync( + self._sync.request_with_retry_on_overload, + method, + params, + response_model=response_model, + max_attempts=max_attempts, + initial_delay_s=initial_delay_s, + max_delay_s=max_delay_s, + ) + + async def next_notification(self) -> Notification: + return await self._call_sync(self._sync.next_notification) + + async def wait_for_turn_completed(self, turn_id: str) -> TurnCompletedNotification: + return await self._call_sync(self._sync.wait_for_turn_completed, turn_id) + + async def stream_until_methods(self, methods: Iterable[str] | str) -> list[Notification]: + return await self._call_sync(self._sync.stream_until_methods, methods) + + async def stream_text( + self, + thread_id: str, + text: str, + params: V2TurnStartParams | JsonObject | None = None, + ) -> AsyncIterator[AgentMessageDeltaNotification]: + async with self._transport_lock: + iterator = self._sync.stream_text(thread_id, text, params) + while True: + has_value, chunk = await asyncio.to_thread( + self._next_from_iterator, + iterator, + ) + if not has_value: + break + yield chunk diff --git a/sdk/python/src/codex_app_server/generated/v2_types.py b/sdk/python/src/codex_app_server/generated/v2_types.py index 932ab438dba..ef28f982e5a 100644 --- a/sdk/python/src/codex_app_server/generated/v2_types.py +++ b/sdk/python/src/codex_app_server/generated/v2_types.py @@ -1,25 +1,23 @@ -"""Stable aliases over full v2 autogenerated models (datamodel-code-generator).""" +"""Stable aliases over the canonical generated v2 models.""" -from .v2_all.ModelListResponse import ModelListResponse -from .v2_all.ThreadCompactStartResponse import ThreadCompactStartResponse -from .v2_all.ThreadListResponse import ThreadListResponse -from .v2_all.ThreadReadResponse import ThreadReadResponse -from .v2_all.ThreadTokenUsageUpdatedNotification import ( +from .v2_all import ( + ModelListResponse, + ThreadCompactStartResponse, + ThreadItem, + ThreadListResponse, + ThreadReadResponse, ThreadTokenUsageUpdatedNotification, -) -from .v2_all.TurnCompletedNotification import ThreadItem153 as ThreadItem -from .v2_all.TurnCompletedNotification import ( TurnCompletedNotification as TurnCompletedNotificationPayload, + TurnSteerResponse, ) -from .v2_all.TurnSteerResponse import TurnSteerResponse __all__ = [ "ModelListResponse", "ThreadCompactStartResponse", + "ThreadItem", "ThreadListResponse", "ThreadReadResponse", "ThreadTokenUsageUpdatedNotification", "TurnCompletedNotificationPayload", "TurnSteerResponse", - "ThreadItem", ] diff --git a/sdk/python/src/codex_app_server/public_api.py b/sdk/python/src/codex_app_server/public_api.py new file mode 100644 index 00000000000..842dcfe539d --- /dev/null +++ b/sdk/python/src/codex_app_server/public_api.py @@ -0,0 +1,795 @@ +from __future__ import annotations + +import asyncio +from dataclasses import dataclass +from typing import AsyncIterator, Iterator + +from .async_client import AsyncAppServerClient +from .client import AppServerClient, AppServerConfig +from .generated.v2_all import ( + AgentMessageDeltaNotification, + RawResponseItemCompletedNotification, + ThreadArchiveResponse, + ThreadSetNameResponse, + TurnError, + TurnInterruptResponse, +) +from .generated.v2_types import ( + ModelListResponse, + ThreadCompactStartResponse, + ThreadItem, + ThreadListResponse, + ThreadReadResponse, + ThreadTokenUsageUpdatedNotification, + TurnCompletedNotificationPayload, + TurnSteerResponse, +) +from .models import InitializeResponse, JsonObject, Notification +from .public_types import ( + AskForApproval, + Personality, + ReasoningEffort, + ReasoningSummary, + SandboxMode, + SandboxPolicy, + ServiceTier, + ThreadForkParams, + ThreadListParams, + ThreadResumeParams, + ThreadSortKey, + ThreadSourceKind, + ThreadStartParams, + TurnStartParams, + TurnStatus, +) + + +@dataclass(slots=True) +class TurnResult: + thread_id: str + turn_id: str + status: TurnStatus + error: TurnError | None + text: str + items: list[ThreadItem] + usage: ThreadTokenUsageUpdatedNotification | None = None + + +@dataclass(slots=True) +class TextInput: + text: str + + +@dataclass(slots=True) +class ImageInput: + url: str + + +@dataclass(slots=True) +class LocalImageInput: + path: str + + +@dataclass(slots=True) +class SkillInput: + name: str + path: str + + +@dataclass(slots=True) +class MentionInput: + name: str + path: str + + +InputItem = TextInput | ImageInput | LocalImageInput | SkillInput | MentionInput +Input = list[InputItem] | InputItem + + +@dataclass(slots=True) +class InitializeResult: + server_name: str + server_version: str + user_agent: str + + +def _to_wire_item(item: InputItem) -> JsonObject: + if isinstance(item, TextInput): + return {"type": "text", "text": item.text} + if isinstance(item, ImageInput): + return {"type": "image", "url": item.url} + if isinstance(item, LocalImageInput): + return {"type": "localImage", "path": item.path} + if isinstance(item, SkillInput): + return {"type": "skill", "name": item.name, "path": item.path} + if isinstance(item, MentionInput): + return {"type": "mention", "name": item.name, "path": item.path} + raise TypeError(f"unsupported input item: {type(item)!r}") + + +def _to_wire_input(input: Input) -> list[JsonObject]: + if isinstance(input, list): + return [_to_wire_item(i) for i in input] + return [_to_wire_item(input)] + + +def _split_user_agent(user_agent: str) -> tuple[str | None, str | None]: + raw = user_agent.strip() + if not raw: + return None, None + if "/" in raw: + name, version = raw.split("/", 1) + return (name or None), (version or None) + parts = raw.split(maxsplit=1) + if len(parts) == 2: + return parts[0], parts[1] + return raw, None + + +def _enum_value(value: object) -> object: + return getattr(value, "value", value) + + +def _assistant_output_text_chunks( + notification: RawResponseItemCompletedNotification, +) -> list[str]: + item = notification.item.root + if _enum_value(getattr(item, "type", None)) != "message": + return [] + if getattr(item, "role", None) != "assistant": + return [] + + chunks: list[str] = [] + for content in getattr(item, "content", []) or []: + content_item = getattr(content, "root", content) + if _enum_value(getattr(content_item, "type", None)) != "output_text": + continue + text = getattr(content_item, "text", None) + if isinstance(text, str) and text: + chunks.append(text) + return chunks + + +def _build_turn_result( + completed: TurnCompletedNotificationPayload | None, + usage: ThreadTokenUsageUpdatedNotification | None, + delta_chunks: list[str], + raw_text_chunks: list[str], +) -> TurnResult: + if completed is None: + raise RuntimeError("turn completed event not received") + if completed.turn.status == TurnStatus.completed and usage is None: + raise RuntimeError( + "thread/tokenUsage/updated notification not received for completed turn" + ) + + text = "".join(delta_chunks) if delta_chunks else "".join(raw_text_chunks) + return TurnResult( + thread_id=completed.thread_id, + turn_id=completed.turn.id, + status=completed.turn.status, + error=completed.turn.error, + text=text, + items=list(completed.turn.items or []), + usage=usage, + ) + + +class Codex: + """Minimal typed SDK surface for app-server v2.""" + + def __init__(self, config: AppServerConfig | None = None) -> None: + self._client = AppServerClient(config=config) + try: + self._client.start() + self._init = self._parse_initialize(self._client.initialize()) + except Exception: + self._client.close() + raise + + def __enter__(self) -> "Codex": + return self + + def __exit__(self, _exc_type, _exc, _tb) -> None: + self.close() + + @staticmethod + def _parse_initialize(payload: InitializeResponse) -> InitializeResult: + user_agent = (payload.userAgent or "").strip() + server = payload.serverInfo + + server_name: str | None = None + server_version: str | None = None + + if server is not None: + server_name = (server.name or "").strip() or None + server_version = (server.version or "").strip() or None + + if (server_name is None or server_version is None) and user_agent: + parsed_name, parsed_version = _split_user_agent(user_agent) + if server_name is None: + server_name = parsed_name + if server_version is None: + server_version = parsed_version + + normalized_server_name = (server_name or "").strip() + normalized_server_version = (server_version or "").strip() + if not user_agent or not normalized_server_name or not normalized_server_version: + raise RuntimeError( + "initialize response missing required metadata " + f"(user_agent={user_agent!r}, server_name={normalized_server_name!r}, server_version={normalized_server_version!r})" + ) + + return InitializeResult( + server_name=normalized_server_name, + server_version=normalized_server_version, + user_agent=user_agent, + ) + + @property + def metadata(self) -> InitializeResult: + return self._init + + def close(self) -> None: + self._client.close() + + # BEGIN GENERATED: Codex.flat_methods + def thread_start( + self, + *, + approval_policy: AskForApproval | None = None, + base_instructions: str | None = None, + config: JsonObject | None = None, + cwd: str | None = None, + developer_instructions: str | None = None, + ephemeral: bool | None = None, + model: str | None = None, + model_provider: str | None = None, + personality: Personality | None = None, + sandbox: SandboxMode | None = None, + service_name: str | None = None, + service_tier: ServiceTier | None = None, + ) -> Thread: + params = ThreadStartParams( + approval_policy=approval_policy, + base_instructions=base_instructions, + config=config, + cwd=cwd, + developer_instructions=developer_instructions, + ephemeral=ephemeral, + model=model, + model_provider=model_provider, + personality=personality, + sandbox=sandbox, + service_name=service_name, + service_tier=service_tier, + ) + started = self._client.thread_start(params) + return Thread(self._client, started.thread.id) + + def thread_list( + self, + *, + archived: bool | None = None, + cursor: str | None = None, + cwd: str | None = None, + limit: int | None = None, + model_providers: list[str] | None = None, + search_term: str | None = None, + sort_key: ThreadSortKey | None = None, + source_kinds: list[ThreadSourceKind] | None = None, + ) -> ThreadListResponse: + params = ThreadListParams( + archived=archived, + cursor=cursor, + cwd=cwd, + limit=limit, + model_providers=model_providers, + search_term=search_term, + sort_key=sort_key, + source_kinds=source_kinds, + ) + return self._client.thread_list(params) + + def thread_resume( + self, + thread_id: str, + *, + approval_policy: AskForApproval | None = None, + base_instructions: str | None = None, + config: JsonObject | None = None, + cwd: str | None = None, + developer_instructions: str | None = None, + model: str | None = None, + model_provider: str | None = None, + personality: Personality | None = None, + sandbox: SandboxMode | None = None, + service_tier: ServiceTier | None = None, + ) -> Thread: + params = ThreadResumeParams( + thread_id=thread_id, + approval_policy=approval_policy, + base_instructions=base_instructions, + config=config, + cwd=cwd, + developer_instructions=developer_instructions, + model=model, + model_provider=model_provider, + personality=personality, + sandbox=sandbox, + service_tier=service_tier, + ) + resumed = self._client.thread_resume(thread_id, params) + return Thread(self._client, resumed.thread.id) + + def thread_fork( + self, + thread_id: str, + *, + approval_policy: AskForApproval | None = None, + base_instructions: str | None = None, + config: JsonObject | None = None, + cwd: str | None = None, + developer_instructions: str | None = None, + ephemeral: bool | None = None, + model: str | None = None, + model_provider: str | None = None, + sandbox: SandboxMode | None = None, + service_tier: ServiceTier | None = None, + ) -> Thread: + params = ThreadForkParams( + thread_id=thread_id, + approval_policy=approval_policy, + base_instructions=base_instructions, + config=config, + cwd=cwd, + developer_instructions=developer_instructions, + ephemeral=ephemeral, + model=model, + model_provider=model_provider, + sandbox=sandbox, + service_tier=service_tier, + ) + forked = self._client.thread_fork(thread_id, params) + return Thread(self._client, forked.thread.id) + + def thread_archive(self, thread_id: str) -> ThreadArchiveResponse: + return self._client.thread_archive(thread_id) + + def thread_unarchive(self, thread_id: str) -> Thread: + unarchived = self._client.thread_unarchive(thread_id) + return Thread(self._client, unarchived.thread.id) + # END GENERATED: Codex.flat_methods + + def models(self, *, include_hidden: bool = False) -> ModelListResponse: + return self._client.model_list(include_hidden=include_hidden) + + +class AsyncCodex: + """Async mirror of :class:`Codex` with matching method shapes.""" + + def __init__(self, config: AppServerConfig | None = None) -> None: + self._client = AsyncAppServerClient(config=config) + self._init: InitializeResult | None = None + self._initialized = False + self._init_lock = asyncio.Lock() + + async def __aenter__(self) -> "AsyncCodex": + await self._ensure_initialized() + return self + + async def __aexit__(self, _exc_type, _exc, _tb) -> None: + await self.close() + + async def _ensure_initialized(self) -> None: + if self._initialized: + return + async with self._init_lock: + if self._initialized: + return + try: + await self._client.start() + payload = await self._client.initialize() + self._init = Codex._parse_initialize(payload) + self._initialized = True + except Exception: + await self._client.close() + self._init = None + self._initialized = False + raise + + @property + def metadata(self) -> InitializeResult: + if self._init is None: + raise RuntimeError( + "AsyncCodex is not initialized yet. Use `async with AsyncCodex()` or call an async API first." + ) + return self._init + + async def close(self) -> None: + await self._client.close() + self._init = None + self._initialized = False + + # BEGIN GENERATED: AsyncCodex.flat_methods + async def thread_start( + self, + *, + approval_policy: AskForApproval | None = None, + base_instructions: str | None = None, + config: JsonObject | None = None, + cwd: str | None = None, + developer_instructions: str | None = None, + ephemeral: bool | None = None, + model: str | None = None, + model_provider: str | None = None, + personality: Personality | None = None, + sandbox: SandboxMode | None = None, + service_name: str | None = None, + service_tier: ServiceTier | None = None, + ) -> AsyncThread: + await self._ensure_initialized() + params = ThreadStartParams( + approval_policy=approval_policy, + base_instructions=base_instructions, + config=config, + cwd=cwd, + developer_instructions=developer_instructions, + ephemeral=ephemeral, + model=model, + model_provider=model_provider, + personality=personality, + sandbox=sandbox, + service_name=service_name, + service_tier=service_tier, + ) + started = await self._client.thread_start(params) + return AsyncThread(self, started.thread.id) + + async def thread_list( + self, + *, + archived: bool | None = None, + cursor: str | None = None, + cwd: str | None = None, + limit: int | None = None, + model_providers: list[str] | None = None, + search_term: str | None = None, + sort_key: ThreadSortKey | None = None, + source_kinds: list[ThreadSourceKind] | None = None, + ) -> ThreadListResponse: + await self._ensure_initialized() + params = ThreadListParams( + archived=archived, + cursor=cursor, + cwd=cwd, + limit=limit, + model_providers=model_providers, + search_term=search_term, + sort_key=sort_key, + source_kinds=source_kinds, + ) + return await self._client.thread_list(params) + + async def thread_resume( + self, + thread_id: str, + *, + approval_policy: AskForApproval | None = None, + base_instructions: str | None = None, + config: JsonObject | None = None, + cwd: str | None = None, + developer_instructions: str | None = None, + model: str | None = None, + model_provider: str | None = None, + personality: Personality | None = None, + sandbox: SandboxMode | None = None, + service_tier: ServiceTier | None = None, + ) -> AsyncThread: + await self._ensure_initialized() + params = ThreadResumeParams( + thread_id=thread_id, + approval_policy=approval_policy, + base_instructions=base_instructions, + config=config, + cwd=cwd, + developer_instructions=developer_instructions, + model=model, + model_provider=model_provider, + personality=personality, + sandbox=sandbox, + service_tier=service_tier, + ) + resumed = await self._client.thread_resume(thread_id, params) + return AsyncThread(self, resumed.thread.id) + + async def thread_fork( + self, + thread_id: str, + *, + approval_policy: AskForApproval | None = None, + base_instructions: str | None = None, + config: JsonObject | None = None, + cwd: str | None = None, + developer_instructions: str | None = None, + ephemeral: bool | None = None, + model: str | None = None, + model_provider: str | None = None, + sandbox: SandboxMode | None = None, + service_tier: ServiceTier | None = None, + ) -> AsyncThread: + await self._ensure_initialized() + params = ThreadForkParams( + thread_id=thread_id, + approval_policy=approval_policy, + base_instructions=base_instructions, + config=config, + cwd=cwd, + developer_instructions=developer_instructions, + ephemeral=ephemeral, + model=model, + model_provider=model_provider, + sandbox=sandbox, + service_tier=service_tier, + ) + forked = await self._client.thread_fork(thread_id, params) + return AsyncThread(self, forked.thread.id) + + async def thread_archive(self, thread_id: str) -> ThreadArchiveResponse: + await self._ensure_initialized() + return await self._client.thread_archive(thread_id) + + async def thread_unarchive(self, thread_id: str) -> AsyncThread: + await self._ensure_initialized() + unarchived = await self._client.thread_unarchive(thread_id) + return AsyncThread(self, unarchived.thread.id) + # END GENERATED: AsyncCodex.flat_methods + + async def models(self, *, include_hidden: bool = False) -> ModelListResponse: + await self._ensure_initialized() + return await self._client.model_list(include_hidden=include_hidden) + + +@dataclass(slots=True) +class Thread: + _client: AppServerClient + id: str + + # BEGIN GENERATED: Thread.flat_methods + def turn( + self, + input: Input, + *, + approval_policy: AskForApproval | None = None, + cwd: str | None = None, + effort: ReasoningEffort | None = None, + model: str | None = None, + output_schema: JsonObject | None = None, + personality: Personality | None = None, + sandbox_policy: SandboxPolicy | None = None, + service_tier: ServiceTier | None = None, + summary: ReasoningSummary | None = None, + ) -> Turn: + wire_input = _to_wire_input(input) + params = TurnStartParams( + thread_id=self.id, + input=wire_input, + approval_policy=approval_policy, + cwd=cwd, + effort=effort, + model=model, + output_schema=output_schema, + personality=personality, + sandbox_policy=sandbox_policy, + service_tier=service_tier, + summary=summary, + ) + turn = self._client.turn_start(self.id, wire_input, params=params) + return Turn(self._client, self.id, turn.turn.id) + # END GENERATED: Thread.flat_methods + + def read(self, *, include_turns: bool = False) -> ThreadReadResponse: + return self._client.thread_read(self.id, include_turns=include_turns) + + def set_name(self, name: str) -> ThreadSetNameResponse: + return self._client.thread_set_name(self.id, name) + + def compact(self) -> ThreadCompactStartResponse: + return self._client.thread_compact(self.id) + + +@dataclass(slots=True) +class AsyncThread: + _codex: AsyncCodex + id: str + + # BEGIN GENERATED: AsyncThread.flat_methods + async def turn( + self, + input: Input, + *, + approval_policy: AskForApproval | None = None, + cwd: str | None = None, + effort: ReasoningEffort | None = None, + model: str | None = None, + output_schema: JsonObject | None = None, + personality: Personality | None = None, + sandbox_policy: SandboxPolicy | None = None, + service_tier: ServiceTier | None = None, + summary: ReasoningSummary | None = None, + ) -> AsyncTurn: + await self._codex._ensure_initialized() + wire_input = _to_wire_input(input) + params = TurnStartParams( + thread_id=self.id, + input=wire_input, + approval_policy=approval_policy, + cwd=cwd, + effort=effort, + model=model, + output_schema=output_schema, + personality=personality, + sandbox_policy=sandbox_policy, + service_tier=service_tier, + summary=summary, + ) + turn = await self._codex._client.turn_start( + self.id, + wire_input, + params=params, + ) + return AsyncTurn(self._codex, self.id, turn.turn.id) + # END GENERATED: AsyncThread.flat_methods + + async def read(self, *, include_turns: bool = False) -> ThreadReadResponse: + await self._codex._ensure_initialized() + return await self._codex._client.thread_read(self.id, include_turns=include_turns) + + async def set_name(self, name: str) -> ThreadSetNameResponse: + await self._codex._ensure_initialized() + return await self._codex._client.thread_set_name(self.id, name) + + async def compact(self) -> ThreadCompactStartResponse: + await self._codex._ensure_initialized() + return await self._codex._client.thread_compact(self.id) + + +@dataclass(slots=True) +class Turn: + _client: AppServerClient + thread_id: str + id: str + + def steer(self, input: Input) -> TurnSteerResponse: + return self._client.turn_steer(self.thread_id, self.id, _to_wire_input(input)) + + def interrupt(self) -> TurnInterruptResponse: + return self._client.turn_interrupt(self.thread_id, self.id) + + def stream(self) -> Iterator[Notification]: + # TODO: replace this client-wide experimental guard with per-turn event demux. + self._client.acquire_turn_consumer(self.id) + try: + while True: + event = self._client.next_notification() + yield event + if ( + event.method == "turn/completed" + and isinstance(event.payload, TurnCompletedNotificationPayload) + and event.payload.turn.id == self.id + ): + break + finally: + self._client.release_turn_consumer(self.id) + + def run(self) -> TurnResult: + completed: TurnCompletedNotificationPayload | None = None + usage: ThreadTokenUsageUpdatedNotification | None = None + delta_chunks: list[str] = [] + raw_text_chunks: list[str] = [] + + stream = self.stream() + try: + for event in stream: + payload = event.payload + if ( + isinstance(payload, AgentMessageDeltaNotification) + and payload.turn_id == self.id + ): + delta_chunks.append(payload.delta) + continue + if ( + isinstance(payload, RawResponseItemCompletedNotification) + and payload.turn_id == self.id + ): + raw_text_chunks.extend(_assistant_output_text_chunks(payload)) + continue + if ( + isinstance(payload, ThreadTokenUsageUpdatedNotification) + and payload.turn_id == self.id + ): + usage = payload + continue + if ( + isinstance(payload, TurnCompletedNotificationPayload) + and payload.turn.id == self.id + ): + completed = payload + finally: + stream.close() + + return _build_turn_result(completed, usage, delta_chunks, raw_text_chunks) + + +@dataclass(slots=True) +class AsyncTurn: + _codex: AsyncCodex + thread_id: str + id: str + + async def steer(self, input: Input) -> TurnSteerResponse: + await self._codex._ensure_initialized() + return await self._codex._client.turn_steer( + self.thread_id, + self.id, + _to_wire_input(input), + ) + + async def interrupt(self) -> TurnInterruptResponse: + await self._codex._ensure_initialized() + return await self._codex._client.turn_interrupt(self.thread_id, self.id) + + async def stream(self) -> AsyncIterator[Notification]: + await self._codex._ensure_initialized() + # TODO: replace this client-wide experimental guard with per-turn event demux. + self._codex._client.acquire_turn_consumer(self.id) + try: + while True: + event = await self._codex._client.next_notification() + yield event + if ( + event.method == "turn/completed" + and isinstance(event.payload, TurnCompletedNotificationPayload) + and event.payload.turn.id == self.id + ): + break + finally: + self._codex._client.release_turn_consumer(self.id) + + async def run(self) -> TurnResult: + completed: TurnCompletedNotificationPayload | None = None + usage: ThreadTokenUsageUpdatedNotification | None = None + delta_chunks: list[str] = [] + raw_text_chunks: list[str] = [] + + stream = self.stream() + try: + async for event in stream: + payload = event.payload + if ( + isinstance(payload, AgentMessageDeltaNotification) + and payload.turn_id == self.id + ): + delta_chunks.append(payload.delta) + continue + if ( + isinstance(payload, RawResponseItemCompletedNotification) + and payload.turn_id == self.id + ): + raw_text_chunks.extend(_assistant_output_text_chunks(payload)) + continue + if ( + isinstance(payload, ThreadTokenUsageUpdatedNotification) + and payload.turn_id == self.id + ): + usage = payload + continue + if ( + isinstance(payload, TurnCompletedNotificationPayload) + and payload.turn.id == self.id + ): + completed = payload + finally: + await stream.aclose() + + return _build_turn_result(completed, usage, delta_chunks, raw_text_chunks) diff --git a/sdk/python/src/codex_app_server/public_types.py b/sdk/python/src/codex_app_server/public_types.py new file mode 100644 index 00000000000..bc7e67d3ce2 --- /dev/null +++ b/sdk/python/src/codex_app_server/public_types.py @@ -0,0 +1,41 @@ +"""Shallow public aliases over the generated v2 wire models.""" + +from .generated.v2_all import ( + AskForApproval, + Personality, + PlanType, + ReasoningEffort, + ReasoningSummary, + SandboxMode, + SandboxPolicy, + ServiceTier, + ThreadForkParams, + ThreadListParams, + ThreadResumeParams, + ThreadSortKey, + ThreadSourceKind, + ThreadStartParams, + TurnStartParams, + TurnStatus, + TurnSteerParams, +) + +__all__ = [ + "AskForApproval", + "Personality", + "PlanType", + "ReasoningEffort", + "ReasoningSummary", + "SandboxMode", + "SandboxPolicy", + "ServiceTier", + "ThreadForkParams", + "ThreadListParams", + "ThreadResumeParams", + "ThreadSortKey", + "ThreadSourceKind", + "ThreadStartParams", + "TurnStartParams", + "TurnStatus", + "TurnSteerParams", +] diff --git a/sdk/python/tests/test_async_client_behavior.py b/sdk/python/tests/test_async_client_behavior.py new file mode 100644 index 00000000000..580ff2a93bf --- /dev/null +++ b/sdk/python/tests/test_async_client_behavior.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import asyncio +import time + +from codex_app_server.async_client import AsyncAppServerClient + + +def test_async_client_serializes_transport_calls() -> None: + async def scenario() -> int: + client = AsyncAppServerClient() + active = 0 + max_active = 0 + + def fake_model_list(include_hidden: bool = False) -> bool: + nonlocal active, max_active + active += 1 + max_active = max(max_active, active) + time.sleep(0.05) + active -= 1 + return include_hidden + + client._sync.model_list = fake_model_list # type: ignore[method-assign] + await asyncio.gather(client.model_list(), client.model_list()) + return max_active + + assert asyncio.run(scenario()) == 1 + + +def test_async_stream_text_is_incremental_and_blocks_parallel_calls() -> None: + async def scenario() -> tuple[str, list[str], bool]: + client = AsyncAppServerClient() + + def fake_stream_text(thread_id: str, text: str, params=None): # type: ignore[no-untyped-def] + yield "first" + time.sleep(0.03) + yield "second" + yield "third" + + def fake_model_list(include_hidden: bool = False) -> str: + return "done" + + client._sync.stream_text = fake_stream_text # type: ignore[method-assign] + client._sync.model_list = fake_model_list # type: ignore[method-assign] + + stream = client.stream_text("thread-1", "hello") + first = await anext(stream) + + blocked_before_stream_done = False + competing_call = asyncio.create_task(client.model_list()) + await asyncio.sleep(0.01) + blocked_before_stream_done = not competing_call.done() + + remaining: list[str] = [] + async for item in stream: + remaining.append(item) + + await competing_call + return first, remaining, blocked_before_stream_done + + first, remaining, blocked = asyncio.run(scenario()) + assert first == "first" + assert remaining == ["second", "third"] + assert blocked diff --git a/sdk/python/tests/test_public_api_runtime_behavior.py b/sdk/python/tests/test_public_api_runtime_behavior.py new file mode 100644 index 00000000000..414460905d8 --- /dev/null +++ b/sdk/python/tests/test_public_api_runtime_behavior.py @@ -0,0 +1,286 @@ +from __future__ import annotations + +import asyncio +from collections import deque +from pathlib import Path + +import pytest + +import codex_app_server.public_api as public_api_module +from codex_app_server.client import AppServerClient +from codex_app_server.generated.v2_all import ( + AgentMessageDeltaNotification, + RawResponseItemCompletedNotification, + ThreadTokenUsageUpdatedNotification, +) +from codex_app_server.models import InitializeResponse, Notification +from codex_app_server.public_api import AsyncCodex, AsyncTurn, Codex, Turn +from codex_app_server.public_types import TurnStatus + +ROOT = Path(__file__).resolve().parents[1] + + +def _delta_notification( + *, + thread_id: str = "thread-1", + turn_id: str = "turn-1", + text: str = "delta-text", +) -> Notification: + return Notification( + method="item/agentMessage/delta", + payload=AgentMessageDeltaNotification.model_validate( + { + "delta": text, + "itemId": "item-1", + "threadId": thread_id, + "turnId": turn_id, + } + ), + ) + + +def _raw_response_notification( + *, + thread_id: str = "thread-1", + turn_id: str = "turn-1", + text: str = "raw-text", +) -> Notification: + return Notification( + method="rawResponseItem/completed", + payload=RawResponseItemCompletedNotification.model_validate( + { + "item": { + "type": "message", + "role": "assistant", + "content": [{"type": "output_text", "text": text}], + }, + "threadId": thread_id, + "turnId": turn_id, + } + ), + ) + + +def _usage_notification( + *, + thread_id: str = "thread-1", + turn_id: str = "turn-1", +) -> Notification: + return Notification( + method="thread/tokenUsage/updated", + payload=ThreadTokenUsageUpdatedNotification.model_validate( + { + "threadId": thread_id, + "turnId": turn_id, + "tokenUsage": { + "last": { + "cachedInputTokens": 0, + "inputTokens": 1, + "outputTokens": 2, + "reasoningOutputTokens": 0, + "totalTokens": 3, + }, + "total": { + "cachedInputTokens": 0, + "inputTokens": 1, + "outputTokens": 2, + "reasoningOutputTokens": 0, + "totalTokens": 3, + }, + }, + } + ), + ) + + +def _completed_notification( + *, + thread_id: str = "thread-1", + turn_id: str = "turn-1", + status: str = "completed", +) -> Notification: + return Notification( + method="turn/completed", + payload=public_api_module.TurnCompletedNotificationPayload.model_validate( + { + "threadId": thread_id, + "turn": { + "id": turn_id, + "items": [], + "status": status, + }, + } + ), + ) + + +def test_codex_init_failure_closes_client(monkeypatch: pytest.MonkeyPatch) -> None: + closed: list[bool] = [] + + class FakeClient: + def __init__(self, config=None) -> None: # noqa: ANN001,ARG002 + self._closed = False + + def start(self) -> None: + return None + + def initialize(self) -> InitializeResponse: + return InitializeResponse.model_validate({}) + + def close(self) -> None: + self._closed = True + closed.append(True) + + monkeypatch.setattr(public_api_module, "AppServerClient", FakeClient) + + with pytest.raises(RuntimeError, match="missing required metadata"): + Codex() + + assert closed == [True] + + +def test_async_codex_init_failure_closes_client() -> None: + async def scenario() -> None: + codex = AsyncCodex() + close_calls = 0 + + async def fake_start() -> None: + return None + + async def fake_initialize() -> InitializeResponse: + return InitializeResponse.model_validate({}) + + async def fake_close() -> None: + nonlocal close_calls + close_calls += 1 + + codex._client.start = fake_start # type: ignore[method-assign] + codex._client.initialize = fake_initialize # type: ignore[method-assign] + codex._client.close = fake_close # type: ignore[method-assign] + + with pytest.raises(RuntimeError, match="missing required metadata"): + await codex.models() + + assert close_calls == 1 + assert codex._initialized is False + assert codex._init is None + + asyncio.run(scenario()) + + +def test_async_codex_initializes_only_once_under_concurrency() -> None: + async def scenario() -> None: + codex = AsyncCodex() + start_calls = 0 + initialize_calls = 0 + ready = asyncio.Event() + + async def fake_start() -> None: + nonlocal start_calls + start_calls += 1 + + async def fake_initialize() -> InitializeResponse: + nonlocal initialize_calls + initialize_calls += 1 + ready.set() + await asyncio.sleep(0.02) + return InitializeResponse.model_validate( + { + "userAgent": "codex-cli/1.2.3", + "serverInfo": {"name": "codex-cli", "version": "1.2.3"}, + } + ) + + async def fake_model_list(include_hidden: bool = False): # noqa: ANN202,ARG001 + await ready.wait() + return object() + + codex._client.start = fake_start # type: ignore[method-assign] + codex._client.initialize = fake_initialize # type: ignore[method-assign] + codex._client.model_list = fake_model_list # type: ignore[method-assign] + + await asyncio.gather(codex.models(), codex.models()) + + assert start_calls == 1 + assert initialize_calls == 1 + + asyncio.run(scenario()) + + +def test_turn_stream_rejects_second_active_consumer() -> None: + client = AppServerClient() + notifications: deque[Notification] = deque( + [ + _delta_notification(turn_id="turn-1"), + _completed_notification(turn_id="turn-1"), + ] + ) + client.next_notification = notifications.popleft # type: ignore[method-assign] + + first_stream = Turn(client, "thread-1", "turn-1").stream() + assert next(first_stream).method == "item/agentMessage/delta" + + second_stream = Turn(client, "thread-1", "turn-2").stream() + with pytest.raises(RuntimeError, match="Concurrent turn consumers are not yet supported"): + next(second_stream) + + first_stream.close() + + +def test_async_turn_stream_rejects_second_active_consumer() -> None: + async def scenario() -> None: + codex = AsyncCodex() + + async def fake_ensure_initialized() -> None: + return None + + notifications: deque[Notification] = deque( + [ + _delta_notification(turn_id="turn-1"), + _completed_notification(turn_id="turn-1"), + ] + ) + + async def fake_next_notification() -> Notification: + return notifications.popleft() + + codex._ensure_initialized = fake_ensure_initialized # type: ignore[method-assign] + codex._client.next_notification = fake_next_notification # type: ignore[method-assign] + + first_stream = AsyncTurn(codex, "thread-1", "turn-1").stream() + assert (await anext(first_stream)).method == "item/agentMessage/delta" + + second_stream = AsyncTurn(codex, "thread-1", "turn-2").stream() + with pytest.raises(RuntimeError, match="Concurrent turn consumers are not yet supported"): + await anext(second_stream) + + await first_stream.aclose() + + asyncio.run(scenario()) + + +def test_turn_run_falls_back_to_completed_raw_response_text() -> None: + client = AppServerClient() + notifications: deque[Notification] = deque( + [ + _raw_response_notification(text="hello from raw response"), + _usage_notification(), + _completed_notification(), + ] + ) + client.next_notification = notifications.popleft # type: ignore[method-assign] + + result = Turn(client, "thread-1", "turn-1").run() + + assert result.status == TurnStatus.completed + assert result.text == "hello from raw response" + + +def test_retry_examples_compare_status_with_enum() -> None: + for path in ( + ROOT / "examples" / "10_error_handling_and_retry" / "sync.py", + ROOT / "examples" / "10_error_handling_and_retry" / "async.py", + ): + source = path.read_text() + assert '== "failed"' not in source + assert "TurnStatus.failed" in source diff --git a/sdk/python/tests/test_public_api_signatures.py b/sdk/python/tests/test_public_api_signatures.py new file mode 100644 index 00000000000..dc61b7ba8df --- /dev/null +++ b/sdk/python/tests/test_public_api_signatures.py @@ -0,0 +1,211 @@ +from __future__ import annotations + +import importlib.resources as resources +import inspect +from typing import Any + +from codex_app_server import AppServerConfig +from codex_app_server.models import InitializeResponse +from codex_app_server.public_api import AsyncCodex, AsyncThread, Codex, Thread + + +def _keyword_only_names(fn: object) -> list[str]: + signature = inspect.signature(fn) + return [ + param.name + for param in signature.parameters.values() + if param.kind == inspect.Parameter.KEYWORD_ONLY + ] + + +def _assert_no_any_annotations(fn: object) -> None: + signature = inspect.signature(fn) + for param in signature.parameters.values(): + if param.annotation is Any: + raise AssertionError(f"{fn} has public parameter typed as Any: {param.name}") + if signature.return_annotation is Any: + raise AssertionError(f"{fn} has public return annotation typed as Any") + + +def test_root_exports_app_server_config() -> None: + assert AppServerConfig.__name__ == "AppServerConfig" + + +def test_package_includes_py_typed_marker() -> None: + marker = resources.files("codex_app_server").joinpath("py.typed") + assert marker.is_file() + + +def test_generated_public_signatures_are_snake_case_and_typed() -> None: + expected = { + Codex.thread_start: [ + "approval_policy", + "base_instructions", + "config", + "cwd", + "developer_instructions", + "ephemeral", + "model", + "model_provider", + "personality", + "sandbox", + "service_name", + "service_tier", + ], + Codex.thread_list: [ + "archived", + "cursor", + "cwd", + "limit", + "model_providers", + "search_term", + "sort_key", + "source_kinds", + ], + Codex.thread_resume: [ + "approval_policy", + "base_instructions", + "config", + "cwd", + "developer_instructions", + "model", + "model_provider", + "personality", + "sandbox", + "service_tier", + ], + Codex.thread_fork: [ + "approval_policy", + "base_instructions", + "config", + "cwd", + "developer_instructions", + "ephemeral", + "model", + "model_provider", + "sandbox", + "service_tier", + ], + Thread.turn: [ + "approval_policy", + "cwd", + "effort", + "model", + "output_schema", + "personality", + "sandbox_policy", + "service_tier", + "summary", + ], + AsyncCodex.thread_start: [ + "approval_policy", + "base_instructions", + "config", + "cwd", + "developer_instructions", + "ephemeral", + "model", + "model_provider", + "personality", + "sandbox", + "service_name", + "service_tier", + ], + AsyncCodex.thread_list: [ + "archived", + "cursor", + "cwd", + "limit", + "model_providers", + "search_term", + "sort_key", + "source_kinds", + ], + AsyncCodex.thread_resume: [ + "approval_policy", + "base_instructions", + "config", + "cwd", + "developer_instructions", + "model", + "model_provider", + "personality", + "sandbox", + "service_tier", + ], + AsyncCodex.thread_fork: [ + "approval_policy", + "base_instructions", + "config", + "cwd", + "developer_instructions", + "ephemeral", + "model", + "model_provider", + "sandbox", + "service_tier", + ], + AsyncThread.turn: [ + "approval_policy", + "cwd", + "effort", + "model", + "output_schema", + "personality", + "sandbox_policy", + "service_tier", + "summary", + ], + } + + for fn, expected_kwargs in expected.items(): + actual = _keyword_only_names(fn) + assert actual == expected_kwargs, f"unexpected kwargs for {fn}: {actual}" + assert all(name == name.lower() for name in actual), f"non snake_case kwargs in {fn}: {actual}" + _assert_no_any_annotations(fn) + + +def test_lifecycle_methods_are_codex_scoped() -> None: + assert hasattr(Codex, "thread_resume") + assert hasattr(Codex, "thread_fork") + assert hasattr(Codex, "thread_archive") + assert hasattr(Codex, "thread_unarchive") + assert hasattr(AsyncCodex, "thread_resume") + assert hasattr(AsyncCodex, "thread_fork") + assert hasattr(AsyncCodex, "thread_archive") + assert hasattr(AsyncCodex, "thread_unarchive") + assert not hasattr(Codex, "thread") + assert not hasattr(AsyncCodex, "thread") + + assert not hasattr(Thread, "resume") + assert not hasattr(Thread, "fork") + assert not hasattr(Thread, "archive") + assert not hasattr(Thread, "unarchive") + assert not hasattr(AsyncThread, "resume") + assert not hasattr(AsyncThread, "fork") + assert not hasattr(AsyncThread, "archive") + assert not hasattr(AsyncThread, "unarchive") + + for fn in ( + Codex.thread_archive, + Codex.thread_unarchive, + AsyncCodex.thread_archive, + AsyncCodex.thread_unarchive, + ): + _assert_no_any_annotations(fn) + + +def test_initialize_metadata_parses_user_agent_shape() -> None: + parsed = Codex._parse_initialize(InitializeResponse.model_validate({"userAgent": "codex-cli/1.2.3"})) + assert parsed.user_agent == "codex-cli/1.2.3" + assert parsed.server_name == "codex-cli" + assert parsed.server_version == "1.2.3" + + +def test_initialize_metadata_requires_non_empty_information() -> None: + try: + Codex._parse_initialize(InitializeResponse.model_validate({})) + except RuntimeError as exc: + assert "missing required metadata" in str(exc) + else: + raise AssertionError("expected RuntimeError when initialize metadata is missing") diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py new file mode 100644 index 00000000000..63ecc8cb458 --- /dev/null +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -0,0 +1,237 @@ +from __future__ import annotations + +import asyncio +import json +import os +import shutil +import subprocess +import sys +import tempfile +from pathlib import Path + +import pytest + +from codex_app_server import AppServerConfig, AsyncCodex, Codex, TextInput + +ROOT = Path(__file__).resolve().parents[1] +EXAMPLES_DIR = ROOT / "examples" +NOTEBOOK_PATH = ROOT / "notebooks" / "sdk_walkthrough.ipynb" + +RUN_REAL_CODEX_TESTS = os.environ.get("RUN_REAL_CODEX_TESTS") == "1" +pytestmark = pytest.mark.skipif( + not RUN_REAL_CODEX_TESTS, + reason="set RUN_REAL_CODEX_TESTS=1 to run real Codex integration coverage", +) + +# 11_cli_mini_app is interactive; we still run it by feeding '/exit'. +EXAMPLE_CASES: list[tuple[str, str]] = [ + ("01_quickstart_constructor", "sync.py"), + ("01_quickstart_constructor", "async.py"), + ("02_turn_run", "sync.py"), + ("02_turn_run", "async.py"), + ("03_turn_stream_events", "sync.py"), + ("03_turn_stream_events", "async.py"), + ("04_models_and_metadata", "sync.py"), + ("04_models_and_metadata", "async.py"), + ("05_existing_thread", "sync.py"), + ("05_existing_thread", "async.py"), + ("06_thread_lifecycle_and_controls", "sync.py"), + ("06_thread_lifecycle_and_controls", "async.py"), + ("07_image_and_text", "sync.py"), + ("07_image_and_text", "async.py"), + ("08_local_image_and_text", "sync.py"), + ("08_local_image_and_text", "async.py"), + ("09_async_parity", "sync.py"), + # 09_async_parity async path is represented by 01 async + dedicated async-based cases above. + ("10_error_handling_and_retry", "sync.py"), + ("10_error_handling_and_retry", "async.py"), + ("11_cli_mini_app", "sync.py"), + ("11_cli_mini_app", "async.py"), + ("12_turn_params_kitchen_sink", "sync.py"), + ("12_turn_params_kitchen_sink", "async.py"), + ("13_model_select_and_turn_params", "sync.py"), + ("13_model_select_and_turn_params", "async.py"), +] + + +def _run_example( + folder: str, script: str, *, timeout_s: int = 150 +) -> subprocess.CompletedProcess[str]: + path = EXAMPLES_DIR / folder / script + assert path.exists(), f"Missing example script: {path}" + + env = os.environ.copy() + env.setdefault( + "CODEX_PYTHON_SDK_CODEX_BIN", + _real_test_config().codex_bin or "", + ) + + # Feed '/exit' only to interactive mini-cli examples. + stdin = "/exit\n" if folder == "11_cli_mini_app" else None + + return subprocess.run( + [sys.executable, str(path)], + cwd=str(ROOT), + env=env, + input=stdin, + text=True, + capture_output=True, + timeout=timeout_s, + check=False, + ) + + +def _notebook_cell_source(cell_index: int) -> str: + notebook = json.loads(NOTEBOOK_PATH.read_text()) + return "".join(notebook["cells"][cell_index]["source"]) + + +def _real_test_config() -> AppServerConfig: + codex_bin = os.environ.get("CODEX_PYTHON_SDK_CODEX_BIN") or shutil.which("codex") + if codex_bin is None: + raise RuntimeError( + "Real SDK integration tests require a Codex CLI binary.\n" + "Set RUN_REAL_CODEX_TESTS=1 and CODEX_PYTHON_SDK_CODEX_BIN=/absolute/path/to/codex, " + "or ensure `codex` is on PATH." + ) + return AppServerConfig(codex_bin=codex_bin) + + +def test_real_initialize_and_model_list(): + with Codex(config=_real_test_config()) as codex: + metadata = codex.metadata + assert isinstance(metadata.user_agent, str) and metadata.user_agent.strip() + assert isinstance(metadata.server_name, str) and metadata.server_name.strip() + assert isinstance(metadata.server_version, str) and metadata.server_version.strip() + + models = codex.models(include_hidden=True) + assert isinstance(models.data, list) + + +def test_real_thread_and_turn_start_smoke(): + with Codex(config=_real_test_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + result = thread.turn(TextInput("hello")).run() + + assert isinstance(result.thread_id, str) and result.thread_id.strip() + assert isinstance(result.turn_id, str) and result.turn_id.strip() + assert isinstance(result.items, list) + assert result.usage is not None + assert result.usage.thread_id == result.thread_id + assert result.usage.turn_id == result.turn_id + + +def test_real_async_thread_turn_usage_and_ids_smoke() -> None: + async def _run() -> None: + async with AsyncCodex(config=_real_test_config()) as codex: + thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + result = await (await thread.turn(TextInput("say ok"))).run() + + assert isinstance(result.thread_id, str) and result.thread_id.strip() + assert isinstance(result.turn_id, str) and result.turn_id.strip() + assert isinstance(result.items, list) + assert result.usage is not None + assert result.usage.thread_id == result.thread_id + assert result.usage.turn_id == result.turn_id + + asyncio.run(_run()) + + +def test_notebook_bootstrap_resolves_sdk_from_unrelated_cwd() -> None: + cell_1_source = _notebook_cell_source(1) + env = os.environ.copy() + env["CODEX_PYTHON_SDK_DIR"] = str(ROOT) + + with tempfile.TemporaryDirectory() as temp_cwd: + result = subprocess.run( + [sys.executable, "-c", cell_1_source], + cwd=temp_cwd, + env=env, + text=True, + capture_output=True, + timeout=60, + check=False, + ) + + assert result.returncode == 0, ( + f"Notebook bootstrap failed from unrelated cwd.\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}" + ) + assert "SDK source:" in result.stdout + assert "codex_app_server" in result.stdout or "sdk/python/src" in result.stdout + + +def test_real_streaming_smoke_turn_completed(): + with Codex(config=_real_test_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = thread.turn(TextInput("Reply with one short sentence.")) + + saw_delta = False + saw_completed = False + for evt in turn.stream(): + if evt.method == "item/agentMessage/delta": + saw_delta = True + if evt.method == "turn/completed": + saw_completed = True + + assert saw_completed + # Some environments can produce zero deltas for very short output; + # this assert keeps the smoke test informative but non-flaky. + assert isinstance(saw_delta, bool) + + +def test_real_turn_interrupt_smoke(): + with Codex(config=_real_test_config()) as codex: + thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + turn = thread.turn(TextInput("Count from 1 to 200 with commas.")) + + # Best effort: interrupting quickly may race with completion on fast models. + _ = turn.interrupt() + + # Confirm the session is still usable after interrupt race. + follow_up = thread.turn(TextInput("Say 'ok' only.")).run() + assert follow_up.status.value in {"completed", "failed"} + +@pytest.mark.parametrize(("folder", "script"), EXAMPLE_CASES) +def test_real_examples_run_and_assert(folder: str, script: str): + result = _run_example(folder, script) + + assert result.returncode == 0, ( + f"Example failed: {folder}/{script}\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}" + ) + + out = result.stdout + + # Minimal content assertions so we validate behavior, not just exit code. + if folder == "01_quickstart_constructor": + assert "Status:" in out and "Text:" in out + assert "Server: None None" not in out + elif folder == "02_turn_run": + assert "thread_id:" in out and "turn_id:" in out and "status:" in out + assert "usage: None" not in out + elif folder == "03_turn_stream_events": + assert "turn/completed" in out + elif folder == "04_models_and_metadata": + assert "models.count:" in out + assert "server_name=None" not in out + assert "server_version=None" not in out + elif folder == "05_existing_thread": + assert "Created thread:" in out + elif folder == "06_thread_lifecycle_and_controls": + assert "Lifecycle OK:" in out + elif folder in {"07_image_and_text", "08_local_image_and_text"}: + assert "completed" in out.lower() or "Status:" in out + elif folder == "09_async_parity": + assert "Thread:" in out and "Turn:" in out + elif folder == "10_error_handling_and_retry": + assert "Text:" in out + elif folder == "11_cli_mini_app": + assert "Thread:" in out + elif folder == "12_turn_params_kitchen_sink": + assert "Status:" in out and "Usage:" in out + elif folder == "13_model_select_and_turn_params": + assert "selected.model:" in out and "agent.message.params:" in out and "usage.params:" in out + assert "usage.params: None" not in out From 3cf13069687f0f14e4f5b87c6519124c641441a4 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Thu, 12 Mar 2026 09:51:07 -0700 Subject: [PATCH 02/14] python-sdk: use pinned runtime package in real coverage (2026-03-12) 2026-03-12 Switch the repo-source Python SDK real coverage over to a pinned runtime-package flow backed by GitHub release artifacts instead of PATH or explicit binary overrides. - add sdk/python/_runtime_setup.py to download the release codex archive for a requested CODEX_PYTHON_RUNTIME_VERSION, stage a temporary codex-cli-bin package, and install it into a target Python environment with cleanup - refactor real integration tests to run repo-source SDK code against an isolated site-packages target that contains the staged codex-cli-bin runtime - update examples and notebook bootstrap to install and use the runtime package, and stop consulting CODEX_PYTHON_SDK_CODEX_BIN or PATH - switch the failing turn-run and model-selection examples to runtime-compatible model selection for the pinned release binary - keep the main SDK runtime resolution model unchanged: explicit codex_bin or installed codex-cli-bin Validation: - python3 -m pytest sdk/python/tests - RUN_REAL_CODEX_TESTS=1 CODEX_PYTHON_RUNTIME_VERSION=0.115.0-alpha.11 python3 -m pytest sdk/python/tests/test_real_app_server_integration.py Co-authored-by: Codex --- sdk/python/_runtime_setup.py | 380 ++++++++++++++++++ sdk/python/examples/02_turn_run/async.py | 2 +- sdk/python/examples/02_turn_run/sync.py | 2 +- .../13_model_select_and_turn_params/async.py | 4 + .../13_model_select_and_turn_params/sync.py | 4 + sdk/python/examples/README.md | 15 +- sdk/python/examples/_bootstrap.py | 21 +- sdk/python/notebooks/sdk_walkthrough.ipynb | 20 +- .../tests/test_real_app_server_integration.py | 380 +++++++++++++----- 9 files changed, 708 insertions(+), 120 deletions(-) create mode 100644 sdk/python/_runtime_setup.py diff --git a/sdk/python/_runtime_setup.py b/sdk/python/_runtime_setup.py new file mode 100644 index 00000000000..b02bfb50cca --- /dev/null +++ b/sdk/python/_runtime_setup.py @@ -0,0 +1,380 @@ +from __future__ import annotations + +import importlib +import importlib.util +import json +import os +import platform +import shutil +import subprocess +import sys +import tarfile +import tempfile +import urllib.error +import urllib.request +import zipfile +from pathlib import Path + +PACKAGE_NAME = "codex-cli-bin" +RUNTIME_VERSION_ENV_VAR = "CODEX_PYTHON_RUNTIME_VERSION" +REPO_SLUG = "openai/codex" + + +class RuntimeSetupError(RuntimeError): + pass + + +def configured_runtime_version() -> str | None: + value = os.environ.get(RUNTIME_VERSION_ENV_VAR) + if value is None: + return None + normalized = value.strip() + if not normalized: + raise RuntimeSetupError( + f"{RUNTIME_VERSION_ENV_VAR} is set but empty. " + "Set it to a release version like 0.115.0-alpha.11." + ) + return normalized + + +def required_runtime_version() -> str: + version = configured_runtime_version() + if version is not None: + return version + raise RuntimeSetupError( + f"Set {RUNTIME_VERSION_ENV_VAR}= so repo-local examples and real " + "integration coverage can install the pinned codex-cli-bin runtime package." + ) + + +def ensure_runtime_package_installed( + python_executable: str | Path, + sdk_python_dir: Path, + runtime_version: str | None = None, + install_target: Path | None = None, +) -> str: + requested_version = runtime_version or configured_runtime_version() + installed_version = None + if install_target is None: + installed_version = _installed_runtime_version(python_executable) + normalized_requested = ( + _normalized_package_version(requested_version) if requested_version is not None else None + ) + if requested_version is None: + if install_target is not None: + raise RuntimeSetupError( + f"{RUNTIME_VERSION_ENV_VAR} is required when installing {PACKAGE_NAME} " + "into an isolated target directory." + ) + if installed_version is None: + raise RuntimeSetupError( + f"Unable to locate {PACKAGE_NAME} in {python_executable}.\n" + f"Install {PACKAGE_NAME} first, or set {RUNTIME_VERSION_ENV_VAR}= " + "to download a matching release codex artifact and install a local runtime wheel." + ) + return installed_version + + if installed_version is not None and _normalized_package_version(installed_version) == normalized_requested: + return requested_version + + with tempfile.TemporaryDirectory(prefix="codex-python-runtime-") as temp_root_str: + temp_root = Path(temp_root_str) + archive_path = _download_release_archive(requested_version, temp_root) + runtime_binary = _extract_runtime_binary(archive_path, temp_root) + staged_runtime_dir = _stage_runtime_package( + sdk_python_dir, + requested_version, + runtime_binary, + temp_root / "runtime-stage", + ) + _install_runtime_package(python_executable, staged_runtime_dir, install_target) + + if install_target is not None: + return requested_version + + if Path(python_executable).resolve() == Path(sys.executable).resolve(): + importlib.invalidate_caches() + + installed_version = _installed_runtime_version(python_executable) + if installed_version is None or _normalized_package_version(installed_version) != normalized_requested: + raise RuntimeSetupError( + f"Expected {PACKAGE_NAME} {requested_version} in {python_executable}, " + f"but found {installed_version!r} after installation." + ) + return requested_version + + +def platform_asset_name() -> str: + system = platform.system().lower() + machine = platform.machine().lower() + + if system == "darwin": + if machine in {"arm64", "aarch64"}: + return "codex-aarch64-apple-darwin.tar.gz" + if machine in {"x86_64", "amd64"}: + return "codex-x86_64-apple-darwin.tar.gz" + elif system == "linux": + if machine in {"aarch64", "arm64"}: + return "codex-aarch64-unknown-linux-musl.tar.gz" + if machine in {"x86_64", "amd64"}: + return "codex-x86_64-unknown-linux-musl.tar.gz" + elif system == "windows": + if machine in {"aarch64", "arm64"}: + return "codex-aarch64-pc-windows-msvc.exe.zip" + if machine in {"x86_64", "amd64"}: + return "codex-x86_64-pc-windows-msvc.exe.zip" + + raise RuntimeSetupError( + f"Unsupported runtime artifact platform: system={platform.system()!r}, " + f"machine={platform.machine()!r}" + ) + + +def runtime_binary_name() -> str: + return "codex.exe" if platform.system().lower() == "windows" else "codex" + + +def _installed_runtime_version(python_executable: str | Path) -> str | None: + snippet = ( + "import importlib.metadata, json, sys\n" + "try:\n" + " from codex_cli_bin import bundled_codex_path\n" + " bundled_codex_path()\n" + " print(json.dumps({'version': importlib.metadata.version('codex-cli-bin')}))\n" + "except Exception:\n" + " sys.exit(1)\n" + ) + result = subprocess.run( + [str(python_executable), "-c", snippet], + text=True, + capture_output=True, + check=False, + ) + if result.returncode != 0: + return None + return json.loads(result.stdout)["version"] + + +def _release_metadata(version: str) -> dict[str, object]: + url = f"https://api.github.com/repos/{REPO_SLUG}/releases/tags/rust-v{version}" + request = urllib.request.Request( + url, + headers=_github_api_headers("application/vnd.github+json"), + ) + try: + with urllib.request.urlopen(request) as response: + return json.load(response) + except urllib.error.HTTPError as exc: + raise RuntimeSetupError( + f"Failed to resolve release metadata for rust-v{version} from {REPO_SLUG}: " + f"{exc.code} {exc.reason}" + ) from exc + + +def _download_release_archive(version: str, temp_root: Path) -> Path: + asset_name = platform_asset_name() + metadata = _release_metadata(version) + assets = metadata.get("assets") + if not isinstance(assets, list): + raise RuntimeSetupError(f"Release rust-v{version} returned malformed assets metadata.") + asset = next( + ( + item + for item in assets + if isinstance(item, dict) and item.get("name") == asset_name + ), + None, + ) + if asset is None: + raise RuntimeSetupError( + f"Release rust-v{version} does not contain asset {asset_name} for this platform." + ) + + archive_path = temp_root / asset_name + api_url = asset.get("url") + browser_download_url = asset.get("browser_download_url") + if not isinstance(api_url, str): + api_url = None + if not isinstance(browser_download_url, str): + browser_download_url = None + + if api_url is not None: + token = _github_token() + if token is not None: + request = urllib.request.Request( + api_url, + headers=_github_api_headers("application/octet-stream"), + ) + try: + with urllib.request.urlopen(request) as response, archive_path.open("wb") as fh: + shutil.copyfileobj(response, fh) + return archive_path + except urllib.error.HTTPError: + pass + + if browser_download_url is not None: + request = urllib.request.Request( + browser_download_url, + headers={"User-Agent": "codex-python-runtime-setup"}, + ) + try: + with urllib.request.urlopen(request) as response, archive_path.open("wb") as fh: + shutil.copyfileobj(response, fh) + return archive_path + except urllib.error.HTTPError: + pass + + if shutil.which("gh") is None: + raise RuntimeSetupError( + f"Unable to download {asset_name} for rust-v{version}. " + "Provide GH_TOKEN/GITHUB_TOKEN or install/authenticate GitHub CLI." + ) + + try: + subprocess.run( + [ + "gh", + "release", + "download", + f"rust-v{version}", + "--repo", + REPO_SLUG, + "--pattern", + asset_name, + "--dir", + str(temp_root), + ], + check=True, + text=True, + capture_output=True, + ) + except subprocess.CalledProcessError as exc: + raise RuntimeSetupError( + f"gh release download failed for rust-v{version} asset {asset_name}.\n" + f"STDOUT:\n{exc.stdout}\nSTDERR:\n{exc.stderr}" + ) from exc + return archive_path + + +def _extract_runtime_binary(archive_path: Path, temp_root: Path) -> Path: + extract_dir = temp_root / "extracted" + extract_dir.mkdir(parents=True, exist_ok=True) + if archive_path.name.endswith(".tar.gz"): + with tarfile.open(archive_path, "r:gz") as tar: + try: + tar.extractall(extract_dir, filter="data") + except TypeError: + tar.extractall(extract_dir) + elif archive_path.suffix == ".zip": + with zipfile.ZipFile(archive_path) as zip_file: + zip_file.extractall(extract_dir) + else: + raise RuntimeSetupError(f"Unsupported release archive format: {archive_path.name}") + + binary_name = runtime_binary_name() + archive_stem = archive_path.name.removesuffix(".tar.gz").removesuffix(".zip") + candidates = [ + path + for path in extract_dir.rglob("*") + if path.is_file() + and ( + path.name == binary_name + or path.name == archive_stem + or path.name.startswith("codex-") + ) + ] + if not candidates: + raise RuntimeSetupError( + f"Failed to find {binary_name} in extracted runtime archive {archive_path.name}." + ) + return candidates[0] + + +def _stage_runtime_package( + sdk_python_dir: Path, + runtime_version: str, + runtime_binary: Path, + staging_dir: Path, +) -> Path: + script_module = _load_update_script_module(sdk_python_dir) + return script_module.stage_python_runtime_package( # type: ignore[no-any-return] + staging_dir, + runtime_version, + runtime_binary.resolve(), + ) + + +def _install_runtime_package( + python_executable: str | Path, + staged_runtime_dir: Path, + install_target: Path | None, +) -> None: + args = [ + str(python_executable), + "-m", + "pip", + "install", + "--force-reinstall", + "--no-deps", + ] + if install_target is not None: + install_target.mkdir(parents=True, exist_ok=True) + args.extend(["--target", str(install_target)]) + args.append(str(staged_runtime_dir)) + try: + subprocess.run( + args, + check=True, + text=True, + capture_output=True, + ) + except subprocess.CalledProcessError as exc: + raise RuntimeSetupError( + f"Failed to install {PACKAGE_NAME} into {python_executable} from {staged_runtime_dir}.\n" + f"STDOUT:\n{exc.stdout}\nSTDERR:\n{exc.stderr}" + ) from exc + + +def _load_update_script_module(sdk_python_dir: Path): + script_path = sdk_python_dir / "scripts" / "update_sdk_artifacts.py" + spec = importlib.util.spec_from_file_location("update_sdk_artifacts", script_path) + if spec is None or spec.loader is None: + raise RuntimeSetupError(f"Failed to load {script_path}") + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + +def _github_api_headers(accept: str) -> dict[str, str]: + headers = { + "Accept": accept, + "User-Agent": "codex-python-runtime-setup", + } + token = _github_token() + if token is not None: + headers["Authorization"] = f"Bearer {token}" + return headers + + +def _github_token() -> str | None: + for env_name in ("GH_TOKEN", "GITHUB_TOKEN"): + token = os.environ.get(env_name) + if token: + return token + return None + + +def _normalized_package_version(version: str) -> str: + return version.strip().replace("-alpha.", "a").replace("-beta.", "b") + + +__all__ = [ + "PACKAGE_NAME", + "RUNTIME_VERSION_ENV_VAR", + "RuntimeSetupError", + "configured_runtime_version", + "ensure_runtime_package_installed", + "platform_asset_name", + "required_runtime_version", +] diff --git a/sdk/python/examples/02_turn_run/async.py b/sdk/python/examples/02_turn_run/async.py index 81a572ea0b9..5d25d367c84 100644 --- a/sdk/python/examples/02_turn_run/async.py +++ b/sdk/python/examples/02_turn_run/async.py @@ -16,7 +16,7 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn(TextInput("Give 3 bullets about SIMD.")) result = await turn.run() diff --git a/sdk/python/examples/02_turn_run/sync.py b/sdk/python/examples/02_turn_run/sync.py index 8125c487c06..89b1411571e 100644 --- a/sdk/python/examples/02_turn_run/sync.py +++ b/sdk/python/examples/02_turn_run/sync.py @@ -12,7 +12,7 @@ from codex_app_server import Codex, TextInput with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) result = thread.turn(TextInput("Give 3 bullets about SIMD.")).run() print("thread_id:", result.thread_id) diff --git a/sdk/python/examples/13_model_select_and_turn_params/async.py b/sdk/python/examples/13_model_select_and_turn_params/async.py index 521193b8e1f..dcab14cceda 100644 --- a/sdk/python/examples/13_model_select_and_turn_params/async.py +++ b/sdk/python/examples/13_model_select_and_turn_params/async.py @@ -29,10 +29,14 @@ "high": 4, "xhigh": 5, } +PREFERRED_MODEL = "gpt-5.4" def _pick_highest_model(models): visible = [m for m in models if not m.hidden] or models + preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None) + if preferred is not None: + return preferred known_names = {m.id for m in visible} | {m.model for m in visible} top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)] pool = top_candidates or visible diff --git a/sdk/python/examples/13_model_select_and_turn_params/sync.py b/sdk/python/examples/13_model_select_and_turn_params/sync.py index 4fb680e7d18..c2fc53e36fd 100644 --- a/sdk/python/examples/13_model_select_and_turn_params/sync.py +++ b/sdk/python/examples/13_model_select_and_turn_params/sync.py @@ -27,10 +27,14 @@ "high": 4, "xhigh": 5, } +PREFERRED_MODEL = "gpt-5.4" def _pick_highest_model(models): visible = [m for m in models if not m.hidden] or models + preferred = next((m for m in visible if m.model == PREFERRED_MODEL or m.id == PREFERRED_MODEL), None) + if preferred is not None: + return preferred known_names = {m.id for m in visible} | {m.model for m in visible} top_candidates = [m for m in visible if not (m.upgrade and m.upgrade in known_names)] pool = top_candidates or visible diff --git a/sdk/python/examples/README.md b/sdk/python/examples/README.md index ed687a35ada..37e3ecdc649 100644 --- a/sdk/python/examples/README.md +++ b/sdk/python/examples/README.md @@ -23,10 +23,13 @@ python -m pip install -e . When running examples from this repo checkout, the SDK source uses the local tree and does not bundle a runtime binary. The helper in `examples/_bootstrap.py` -therefore resolves `codex` from: +uses the installed `codex-cli-bin` runtime package. -- `CODEX_PYTHON_SDK_CODEX_BIN`, if set -- otherwise `codex` on `PATH` +If `codex-cli-bin` is not already installed, set `CODEX_PYTHON_RUNTIME_VERSION` +to a release version like `0.115.0-alpha.11`; the bootstrap will download the +matching GitHub release artifact, stage a temporary local `codex-cli-bin` +package, install it into your active interpreter, and clean up the temporary +files afterward. ## Run examples @@ -38,12 +41,14 @@ python examples//async.py ``` The examples bootstrap local imports from `sdk/python/src` automatically, so no -extra package build step is required beyond installing dependencies for your -active interpreter and making a `codex` binary available. +SDK wheel install is required. You only need the Python dependencies for your +active interpreter and an installed `codex-cli-bin` runtime package (either +already present or provisioned through `CODEX_PYTHON_RUNTIME_VERSION`). ## Recommended first run ```bash +export CODEX_PYTHON_RUNTIME_VERSION=0.115.0-alpha.11 python examples/01_quickstart_constructor/sync.py python examples/01_quickstart_constructor/async.py ``` diff --git a/sdk/python/examples/_bootstrap.py b/sdk/python/examples/_bootstrap.py index 44b89b4f789..2ce0ff0617a 100644 --- a/sdk/python/examples/_bootstrap.py +++ b/sdk/python/examples/_bootstrap.py @@ -2,10 +2,16 @@ import importlib.util import os -import shutil import sys from pathlib import Path +_SDK_PYTHON_DIR = Path(__file__).resolve().parents[1] +_SDK_PYTHON_STR = str(_SDK_PYTHON_DIR) +if _SDK_PYTHON_STR not in sys.path: + sys.path.insert(0, _SDK_PYTHON_STR) + +from _runtime_setup import ensure_runtime_package_installed + def _ensure_runtime_dependencies(sdk_python_dir: Path) -> None: if importlib.util.find_spec("pydantic") is not None: @@ -23,7 +29,7 @@ def _ensure_runtime_dependencies(sdk_python_dir: Path) -> None: def ensure_local_sdk_src() -> Path: """Add sdk/python/src to sys.path so examples run without installing the package.""" - sdk_python_dir = Path(__file__).resolve().parents[1] + sdk_python_dir = _SDK_PYTHON_DIR src_dir = sdk_python_dir / "src" package_dir = src_dir / "codex_app_server" if not package_dir.exists(): @@ -38,13 +44,8 @@ def ensure_local_sdk_src() -> Path: def runtime_config(): - """Return an example-friendly AppServerConfig for local repo usage.""" + """Return an example-friendly AppServerConfig for repo-source SDK usage.""" from codex_app_server import AppServerConfig - codex_bin = os.environ.get("CODEX_PYTHON_SDK_CODEX_BIN") or shutil.which("codex") - if codex_bin is None: - raise RuntimeError( - "Examples require a Codex CLI binary when run from this repo checkout.\n" - "Set CODEX_PYTHON_SDK_CODEX_BIN=/absolute/path/to/codex, or ensure `codex` is on PATH." - ) - return AppServerConfig(codex_bin=codex_bin) + ensure_runtime_package_installed(sys.executable, _SDK_PYTHON_DIR) + return AppServerConfig() diff --git a/sdk/python/notebooks/sdk_walkthrough.ipynb b/sdk/python/notebooks/sdk_walkthrough.ipynb index 64012a855fd..4b2b4e95c11 100644 --- a/sdk/python/notebooks/sdk_walkthrough.ipynb +++ b/sdk/python/notebooks/sdk_walkthrough.ipynb @@ -15,7 +15,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Cell 1: bootstrap local SDK imports (no installation required)\n", + "# Cell 1: bootstrap local SDK imports + pinned runtime package\n", "import os\n", "import sys\n", "from pathlib import Path\n", @@ -92,9 +92,22 @@ "if repo_python_dir is None:\n", " raise RuntimeError('Could not locate sdk/python. Set CODEX_PYTHON_SDK_DIR to your sdk/python path.')\n", "\n", + "repo_python_str = str(repo_python_dir)\n", + "if repo_python_str not in sys.path:\n", + " sys.path.insert(0, repo_python_str)\n", + "\n", + "from _runtime_setup import configured_runtime_version, ensure_runtime_package_installed\n", + "\n", + "runtime_version = ensure_runtime_package_installed(\n", + " sys.executable,\n", + " repo_python_dir,\n", + " configured_runtime_version(),\n", + ")\n", + "\n", "src_dir = repo_python_dir / 'src'\n", - "if str(src_dir) not in sys.path:\n", - " sys.path.insert(0, str(src_dir))\n", + "src_str = str(src_dir)\n", + "if src_str not in sys.path:\n", + " sys.path.insert(0, src_str)\n", "\n", "# Force fresh imports after SDK upgrades in the same notebook kernel.\n", "for module_name in list(sys.modules):\n", @@ -103,6 +116,7 @@ "\n", "print('Kernel:', sys.executable)\n", "print('SDK source:', src_dir)\n", + "print('Runtime package:', runtime_version)\n", "\n" ] }, diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py index 63ecc8cb458..1ae4c2f5f47 100644 --- a/sdk/python/tests/test_real_app_server_integration.py +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -1,22 +1,26 @@ from __future__ import annotations -import asyncio import json import os -import shutil import subprocess import sys import tempfile +import textwrap +from dataclasses import dataclass from pathlib import Path import pytest -from codex_app_server import AppServerConfig, AsyncCodex, Codex, TextInput - ROOT = Path(__file__).resolve().parents[1] EXAMPLES_DIR = ROOT / "examples" NOTEBOOK_PATH = ROOT / "notebooks" / "sdk_walkthrough.ipynb" +root_str = str(ROOT) +if root_str not in sys.path: + sys.path.insert(0, root_str) + +from _runtime_setup import ensure_runtime_package_installed, required_runtime_version + RUN_REAL_CODEX_TESTS = os.environ.get("RUN_REAL_CODEX_TESTS") == "1" pytestmark = pytest.mark.skipif( not RUN_REAL_CODEX_TESTS, @@ -54,24 +58,59 @@ ] -def _run_example( - folder: str, script: str, *, timeout_s: int = 150 -) -> subprocess.CompletedProcess[str]: - path = EXAMPLES_DIR / folder / script - assert path.exists(), f"Missing example script: {path}" +@dataclass(frozen=True) +class PreparedRuntimeEnv: + python: str + env: dict[str, str] + runtime_version: str + + +@pytest.fixture(scope="session") +def runtime_env(tmp_path_factory: pytest.TempPathFactory) -> PreparedRuntimeEnv: + runtime_version = required_runtime_version() + temp_root = tmp_path_factory.mktemp("python-runtime-env") + isolated_site = temp_root / "site-packages" + python = sys.executable + + _run_command( + [ + python, + "-m", + "pip", + "install", + "--target", + str(isolated_site), + "pydantic>=2.12", + ], + cwd=ROOT, + env=os.environ.copy(), + timeout_s=240, + ) + ensure_runtime_package_installed( + python, + ROOT, + runtime_version, + install_target=isolated_site, + ) env = os.environ.copy() - env.setdefault( - "CODEX_PYTHON_SDK_CODEX_BIN", - _real_test_config().codex_bin or "", - ) + env["PYTHONPATH"] = os.pathsep.join([str(isolated_site), str(ROOT / "src")]) + env["CODEX_PYTHON_RUNTIME_VERSION"] = runtime_version + env["CODEX_PYTHON_SDK_DIR"] = str(ROOT) + return PreparedRuntimeEnv(python=python, env=env, runtime_version=runtime_version) - # Feed '/exit' only to interactive mini-cli examples. - stdin = "/exit\n" if folder == "11_cli_mini_app" else None +def _run_command( + args: list[str], + *, + cwd: Path, + env: dict[str, str], + timeout_s: int, + stdin: str | None = None, +) -> subprocess.CompletedProcess[str]: return subprocess.run( - [sys.executable, str(path)], - cwd=str(ROOT), + args, + cwd=str(cwd), env=env, input=stdin, text=True, @@ -81,76 +120,172 @@ def _run_example( ) -def _notebook_cell_source(cell_index: int) -> str: - notebook = json.loads(NOTEBOOK_PATH.read_text()) - return "".join(notebook["cells"][cell_index]["source"]) +def _run_python( + runtime_env: PreparedRuntimeEnv, + source: str, + *, + cwd: Path | None = None, + timeout_s: int = 180, +) -> subprocess.CompletedProcess[str]: + return _run_command( + [str(runtime_env.python), "-c", source], + cwd=cwd or ROOT, + env=runtime_env.env, + timeout_s=timeout_s, + ) -def _real_test_config() -> AppServerConfig: - codex_bin = os.environ.get("CODEX_PYTHON_SDK_CODEX_BIN") or shutil.which("codex") - if codex_bin is None: - raise RuntimeError( - "Real SDK integration tests require a Codex CLI binary.\n" - "Set RUN_REAL_CODEX_TESTS=1 and CODEX_PYTHON_SDK_CODEX_BIN=/absolute/path/to/codex, " - "or ensure `codex` is on PATH." - ) - return AppServerConfig(codex_bin=codex_bin) +def _run_json_python( + runtime_env: PreparedRuntimeEnv, + source: str, + *, + cwd: Path | None = None, + timeout_s: int = 180, +) -> dict[str, object]: + result = _run_python(runtime_env, source, cwd=cwd, timeout_s=timeout_s) + assert result.returncode == 0, ( + f"Python snippet failed.\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + ) + return json.loads(result.stdout) -def test_real_initialize_and_model_list(): - with Codex(config=_real_test_config()) as codex: - metadata = codex.metadata - assert isinstance(metadata.user_agent, str) and metadata.user_agent.strip() - assert isinstance(metadata.server_name, str) and metadata.server_name.strip() - assert isinstance(metadata.server_version, str) and metadata.server_version.strip() +def _run_example( + runtime_env: PreparedRuntimeEnv, + folder: str, + script: str, + *, + timeout_s: int = 180, +) -> subprocess.CompletedProcess[str]: + path = EXAMPLES_DIR / folder / script + assert path.exists(), f"Missing example script: {path}" - models = codex.models(include_hidden=True) - assert isinstance(models.data, list) + stdin = "/exit\n" if folder == "11_cli_mini_app" else None + return _run_command( + [str(runtime_env.python), str(path)], + cwd=ROOT, + env=runtime_env.env, + timeout_s=timeout_s, + stdin=stdin, + ) -def test_real_thread_and_turn_start_smoke(): - with Codex(config=_real_test_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) - result = thread.turn(TextInput("hello")).run() +def _notebook_cell_source(cell_index: int) -> str: + notebook = json.loads(NOTEBOOK_PATH.read_text()) + return "".join(notebook["cells"][cell_index]["source"]) - assert isinstance(result.thread_id, str) and result.thread_id.strip() - assert isinstance(result.turn_id, str) and result.turn_id.strip() - assert isinstance(result.items, list) - assert result.usage is not None - assert result.usage.thread_id == result.thread_id - assert result.usage.turn_id == result.turn_id +def test_real_initialize_and_model_list(runtime_env: PreparedRuntimeEnv) -> None: + data = _run_json_python( + runtime_env, + textwrap.dedent( + """ + import json + from codex_app_server import Codex + + with Codex() as codex: + models = codex.models(include_hidden=True) + print(json.dumps({ + "user_agent": codex.metadata.user_agent, + "server_name": codex.metadata.server_name, + "server_version": codex.metadata.server_version, + "model_count": len(models.data), + })) + """ + ), + ) -def test_real_async_thread_turn_usage_and_ids_smoke() -> None: - async def _run() -> None: - async with AsyncCodex(config=_real_test_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) - result = await (await thread.turn(TextInput("say ok"))).run() + assert isinstance(data["user_agent"], str) and data["user_agent"].strip() + assert isinstance(data["server_name"], str) and data["server_name"].strip() + assert isinstance(data["server_version"], str) and data["server_version"].strip() + assert isinstance(data["model_count"], int) + + +def test_real_thread_and_turn_start_smoke(runtime_env: PreparedRuntimeEnv) -> None: + data = _run_json_python( + runtime_env, + textwrap.dedent( + """ + import json + from codex_app_server import Codex, TextInput + + with Codex() as codex: + thread = codex.thread_start( + model="gpt-5.4", + config={"model_reasoning_effort": "high"}, + ) + result = thread.turn(TextInput("hello")).run() + print(json.dumps({ + "thread_id": result.thread_id, + "turn_id": result.turn_id, + "items_count": len(result.items), + "has_usage": result.usage is not None, + "usage_thread_id": None if result.usage is None else result.usage.thread_id, + "usage_turn_id": None if result.usage is None else result.usage.turn_id, + })) + """ + ), + ) - assert isinstance(result.thread_id, str) and result.thread_id.strip() - assert isinstance(result.turn_id, str) and result.turn_id.strip() - assert isinstance(result.items, list) - assert result.usage is not None - assert result.usage.thread_id == result.thread_id - assert result.usage.turn_id == result.turn_id + assert isinstance(data["thread_id"], str) and data["thread_id"].strip() + assert isinstance(data["turn_id"], str) and data["turn_id"].strip() + assert isinstance(data["items_count"], int) + assert data["has_usage"] is True + assert data["usage_thread_id"] == data["thread_id"] + assert data["usage_turn_id"] == data["turn_id"] + + +def test_real_async_thread_turn_usage_and_ids_smoke( + runtime_env: PreparedRuntimeEnv, +) -> None: + data = _run_json_python( + runtime_env, + textwrap.dedent( + """ + import asyncio + import json + from codex_app_server import AsyncCodex, TextInput + + async def main(): + async with AsyncCodex() as codex: + thread = await codex.thread_start( + model="gpt-5.4", + config={"model_reasoning_effort": "high"}, + ) + result = await (await thread.turn(TextInput("say ok"))).run() + print(json.dumps({ + "thread_id": result.thread_id, + "turn_id": result.turn_id, + "items_count": len(result.items), + "has_usage": result.usage is not None, + "usage_thread_id": None if result.usage is None else result.usage.thread_id, + "usage_turn_id": None if result.usage is None else result.usage.turn_id, + })) + + asyncio.run(main()) + """ + ), + ) - asyncio.run(_run()) + assert isinstance(data["thread_id"], str) and data["thread_id"].strip() + assert isinstance(data["turn_id"], str) and data["turn_id"].strip() + assert isinstance(data["items_count"], int) + assert data["has_usage"] is True + assert data["usage_thread_id"] == data["thread_id"] + assert data["usage_turn_id"] == data["turn_id"] -def test_notebook_bootstrap_resolves_sdk_from_unrelated_cwd() -> None: +def test_notebook_bootstrap_resolves_sdk_and_runtime_from_unrelated_cwd( + runtime_env: PreparedRuntimeEnv, +) -> None: cell_1_source = _notebook_cell_source(1) - env = os.environ.copy() - env["CODEX_PYTHON_SDK_DIR"] = str(ROOT) + env = runtime_env.env.copy() with tempfile.TemporaryDirectory() as temp_cwd: - result = subprocess.run( - [sys.executable, "-c", cell_1_source], - cwd=temp_cwd, + result = _run_command( + [str(runtime_env.python), "-c", cell_1_source], + cwd=Path(temp_cwd), env=env, - text=True, - capture_output=True, - timeout=60, - check=False, + timeout_s=180, ) assert result.returncode == 0, ( @@ -159,43 +294,89 @@ def test_notebook_bootstrap_resolves_sdk_from_unrelated_cwd() -> None: f"STDERR:\n{result.stderr}" ) assert "SDK source:" in result.stdout - assert "codex_app_server" in result.stdout or "sdk/python/src" in result.stdout - - -def test_real_streaming_smoke_turn_completed(): - with Codex(config=_real_test_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) - turn = thread.turn(TextInput("Reply with one short sentence.")) - - saw_delta = False - saw_completed = False - for evt in turn.stream(): - if evt.method == "item/agentMessage/delta": - saw_delta = True - if evt.method == "turn/completed": - saw_completed = True + assert f"Runtime package: {runtime_env.runtime_version}" in result.stdout - assert saw_completed - # Some environments can produce zero deltas for very short output; - # this assert keeps the smoke test informative but non-flaky. - assert isinstance(saw_delta, bool) +def test_notebook_sync_cell_smoke(runtime_env: PreparedRuntimeEnv) -> None: + source = "\n\n".join( + [ + _notebook_cell_source(1), + _notebook_cell_source(2), + _notebook_cell_source(3), + ] + ) + result = _run_python(runtime_env, source, timeout_s=240) + assert result.returncode == 0, ( + f"Notebook sync smoke failed.\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + ) + assert "status:" in result.stdout + assert "server:" in result.stdout + + +def test_real_streaming_smoke_turn_completed(runtime_env: PreparedRuntimeEnv) -> None: + data = _run_json_python( + runtime_env, + textwrap.dedent( + """ + import json + from codex_app_server import Codex, TextInput + + with Codex() as codex: + thread = codex.thread_start( + model="gpt-5.4", + config={"model_reasoning_effort": "high"}, + ) + turn = thread.turn(TextInput("Reply with one short sentence.")) + saw_delta = False + saw_completed = False + for event in turn.stream(): + if event.method == "item/agentMessage/delta": + saw_delta = True + if event.method == "turn/completed": + saw_completed = True + print(json.dumps({ + "saw_delta": saw_delta, + "saw_completed": saw_completed, + })) + """ + ), + ) -def test_real_turn_interrupt_smoke(): - with Codex(config=_real_test_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) - turn = thread.turn(TextInput("Count from 1 to 200 with commas.")) + assert data["saw_completed"] is True + assert isinstance(data["saw_delta"], bool) + + +def test_real_turn_interrupt_smoke(runtime_env: PreparedRuntimeEnv) -> None: + data = _run_json_python( + runtime_env, + textwrap.dedent( + """ + import json + from codex_app_server import Codex, TextInput + + with Codex() as codex: + thread = codex.thread_start( + model="gpt-5.4", + config={"model_reasoning_effort": "high"}, + ) + turn = thread.turn(TextInput("Count from 1 to 200 with commas.")) + turn.interrupt() + follow_up = thread.turn(TextInput("Say 'ok' only.")).run() + print(json.dumps({"status": follow_up.status.value})) + """ + ), + ) - # Best effort: interrupting quickly may race with completion on fast models. - _ = turn.interrupt() + assert data["status"] in {"completed", "failed"} - # Confirm the session is still usable after interrupt race. - follow_up = thread.turn(TextInput("Say 'ok' only.")).run() - assert follow_up.status.value in {"completed", "failed"} @pytest.mark.parametrize(("folder", "script"), EXAMPLE_CASES) -def test_real_examples_run_and_assert(folder: str, script: str): - result = _run_example(folder, script) +def test_real_examples_run_and_assert( + runtime_env: PreparedRuntimeEnv, + folder: str, + script: str, +) -> None: + result = _run_example(runtime_env, folder, script) assert result.returncode == 0, ( f"Example failed: {folder}/{script}\n" @@ -205,7 +386,6 @@ def test_real_examples_run_and_assert(folder: str, script: str): out = result.stdout - # Minimal content assertions so we validate behavior, not just exit code. if folder == "01_quickstart_constructor": assert "Status:" in out and "Text:" in out assert "Server: None None" not in out From 09a265122dac861d919b1c6ddeedf65d3edbe8bf Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Thu, 12 Mar 2026 23:20:04 -0700 Subject: [PATCH 03/14] python-sdk: pin repo runtime bootstrap version (2026-03-12) Remove CODEX_PYTHON_RUNTIME_VERSION from the repo bootstrap path and always provision the checked-in pinned codex-cli-bin runtime version for examples, notebook, and real integration coverage. This keeps repo-source Python execution aligned with one binary contract, rewires the real integration harness and notebook bootstrap to use the pinned runtime directly, and updates the docs to describe automatic pinned-runtime provisioning instead of env-driven overrides. Validation: - RUN_REAL_CODEX_TESTS=1 python3 -m pytest sdk/python/tests -rs - RUN_REAL_CODEX_TESTS=1 python3 -m pytest sdk/python/tests/test_real_app_server_integration.py -rs Co-authored-by: Codex --- sdk/python/README.md | 8 +-- sdk/python/_runtime_setup.py | 50 +++---------------- sdk/python/examples/README.md | 14 +++--- sdk/python/notebooks/sdk_walkthrough.ipynb | 3 +- .../tests/test_real_app_server_integration.py | 6 +-- 5 files changed, 22 insertions(+), 59 deletions(-) diff --git a/sdk/python/README.md b/sdk/python/README.md index ef3abdf630c..62795399efa 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -12,8 +12,9 @@ python -m pip install -e . ``` Published SDK builds pin an exact `codex-cli-bin` runtime dependency. For local -repo development, pass `AppServerConfig(codex_bin=...)` to point at a local -build explicitly. +repo development, either pass `AppServerConfig(codex_bin=...)` to point at a +local build explicitly, or use the repo examples/notebook bootstrap which +installs the pinned runtime package automatically. ## Quickstart @@ -54,7 +55,8 @@ wheel. For local repo development, the checked-in `sdk/python-runtime` package is only a template for staged release artifacts. Editable installs should use an -explicit `codex_bin` override instead. +explicit `codex_bin` override for manual SDK usage; the repo examples and +notebook bootstrap the pinned runtime package automatically. ## Maintainer workflow diff --git a/sdk/python/_runtime_setup.py b/sdk/python/_runtime_setup.py index b02bfb50cca..576e5c7da71 100644 --- a/sdk/python/_runtime_setup.py +++ b/sdk/python/_runtime_setup.py @@ -16,7 +16,7 @@ from pathlib import Path PACKAGE_NAME = "codex-cli-bin" -RUNTIME_VERSION_ENV_VAR = "CODEX_PYTHON_RUNTIME_VERSION" +PINNED_RUNTIME_VERSION = "0.115.0-alpha.11" REPO_SLUG = "openai/codex" @@ -24,55 +24,20 @@ class RuntimeSetupError(RuntimeError): pass -def configured_runtime_version() -> str | None: - value = os.environ.get(RUNTIME_VERSION_ENV_VAR) - if value is None: - return None - normalized = value.strip() - if not normalized: - raise RuntimeSetupError( - f"{RUNTIME_VERSION_ENV_VAR} is set but empty. " - "Set it to a release version like 0.115.0-alpha.11." - ) - return normalized - - -def required_runtime_version() -> str: - version = configured_runtime_version() - if version is not None: - return version - raise RuntimeSetupError( - f"Set {RUNTIME_VERSION_ENV_VAR}= so repo-local examples and real " - "integration coverage can install the pinned codex-cli-bin runtime package." - ) +def pinned_runtime_version() -> str: + return PINNED_RUNTIME_VERSION def ensure_runtime_package_installed( python_executable: str | Path, sdk_python_dir: Path, - runtime_version: str | None = None, install_target: Path | None = None, ) -> str: - requested_version = runtime_version or configured_runtime_version() + requested_version = pinned_runtime_version() installed_version = None if install_target is None: installed_version = _installed_runtime_version(python_executable) - normalized_requested = ( - _normalized_package_version(requested_version) if requested_version is not None else None - ) - if requested_version is None: - if install_target is not None: - raise RuntimeSetupError( - f"{RUNTIME_VERSION_ENV_VAR} is required when installing {PACKAGE_NAME} " - "into an isolated target directory." - ) - if installed_version is None: - raise RuntimeSetupError( - f"Unable to locate {PACKAGE_NAME} in {python_executable}.\n" - f"Install {PACKAGE_NAME} first, or set {RUNTIME_VERSION_ENV_VAR}= " - "to download a matching release codex artifact and install a local runtime wheel." - ) - return installed_version + normalized_requested = _normalized_package_version(requested_version) if installed_version is not None and _normalized_package_version(installed_version) == normalized_requested: return requested_version @@ -371,10 +336,9 @@ def _normalized_package_version(version: str) -> str: __all__ = [ "PACKAGE_NAME", - "RUNTIME_VERSION_ENV_VAR", + "PINNED_RUNTIME_VERSION", "RuntimeSetupError", - "configured_runtime_version", "ensure_runtime_package_installed", + "pinned_runtime_version", "platform_asset_name", - "required_runtime_version", ] diff --git a/sdk/python/examples/README.md b/sdk/python/examples/README.md index 37e3ecdc649..8cd18d2c4d5 100644 --- a/sdk/python/examples/README.md +++ b/sdk/python/examples/README.md @@ -25,11 +25,12 @@ When running examples from this repo checkout, the SDK source uses the local tree and does not bundle a runtime binary. The helper in `examples/_bootstrap.py` uses the installed `codex-cli-bin` runtime package. -If `codex-cli-bin` is not already installed, set `CODEX_PYTHON_RUNTIME_VERSION` -to a release version like `0.115.0-alpha.11`; the bootstrap will download the -matching GitHub release artifact, stage a temporary local `codex-cli-bin` -package, install it into your active interpreter, and clean up the temporary -files afterward. +If the pinned `codex-cli-bin` runtime is not already installed, the bootstrap +will download the matching GitHub release artifact, stage a temporary local +`codex-cli-bin` package, install it into your active interpreter, and clean up +the temporary files afterward. + +Current pinned runtime version: `0.115.0-alpha.11` ## Run examples @@ -43,12 +44,11 @@ python examples//async.py The examples bootstrap local imports from `sdk/python/src` automatically, so no SDK wheel install is required. You only need the Python dependencies for your active interpreter and an installed `codex-cli-bin` runtime package (either -already present or provisioned through `CODEX_PYTHON_RUNTIME_VERSION`). +already present or automatically provisioned by the bootstrap). ## Recommended first run ```bash -export CODEX_PYTHON_RUNTIME_VERSION=0.115.0-alpha.11 python examples/01_quickstart_constructor/sync.py python examples/01_quickstart_constructor/async.py ``` diff --git a/sdk/python/notebooks/sdk_walkthrough.ipynb b/sdk/python/notebooks/sdk_walkthrough.ipynb index 4b2b4e95c11..4b4c42a5630 100644 --- a/sdk/python/notebooks/sdk_walkthrough.ipynb +++ b/sdk/python/notebooks/sdk_walkthrough.ipynb @@ -96,12 +96,11 @@ "if repo_python_str not in sys.path:\n", " sys.path.insert(0, repo_python_str)\n", "\n", - "from _runtime_setup import configured_runtime_version, ensure_runtime_package_installed\n", + "from _runtime_setup import ensure_runtime_package_installed\n", "\n", "runtime_version = ensure_runtime_package_installed(\n", " sys.executable,\n", " repo_python_dir,\n", - " configured_runtime_version(),\n", ")\n", "\n", "src_dir = repo_python_dir / 'src'\n", diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py index 1ae4c2f5f47..578e3e1bd7e 100644 --- a/sdk/python/tests/test_real_app_server_integration.py +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -19,7 +19,7 @@ if root_str not in sys.path: sys.path.insert(0, root_str) -from _runtime_setup import ensure_runtime_package_installed, required_runtime_version +from _runtime_setup import ensure_runtime_package_installed, pinned_runtime_version RUN_REAL_CODEX_TESTS = os.environ.get("RUN_REAL_CODEX_TESTS") == "1" pytestmark = pytest.mark.skipif( @@ -67,7 +67,7 @@ class PreparedRuntimeEnv: @pytest.fixture(scope="session") def runtime_env(tmp_path_factory: pytest.TempPathFactory) -> PreparedRuntimeEnv: - runtime_version = required_runtime_version() + runtime_version = pinned_runtime_version() temp_root = tmp_path_factory.mktemp("python-runtime-env") isolated_site = temp_root / "site-packages" python = sys.executable @@ -89,13 +89,11 @@ def runtime_env(tmp_path_factory: pytest.TempPathFactory) -> PreparedRuntimeEnv: ensure_runtime_package_installed( python, ROOT, - runtime_version, install_target=isolated_site, ) env = os.environ.copy() env["PYTHONPATH"] = os.pathsep.join([str(isolated_site), str(ROOT / "src")]) - env["CODEX_PYTHON_RUNTIME_VERSION"] = runtime_version env["CODEX_PYTHON_SDK_DIR"] = str(ROOT) return PreparedRuntimeEnv(python=python, env=env, runtime_version=runtime_version) From 0ba4cfa4f4bd9488ffb2f210471573accaee9817 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Fri, 13 Mar 2026 13:50:56 -0700 Subject: [PATCH 04/14] python-sdk: switch repo examples to gpt-5.4 (2026-03-13) Update the repo-run examples that still hardcoded gpt-5 so they use the same gpt-5.4 path already succeeding elsewhere in this runtime. This fixes the quickstart, lifecycle, multimodal, parity, CLI, retry, and kitchen-sink examples that were failing with the runtime-side tool_search compatibility error under gpt-5. Validation: - ran all 25 sdk/python example scripts locally - no examples reported failed turns after the model update - remaining empty-item outputs are limited to 02_turn_run and 03_turn_stream_events for follow-up investigation Co-authored-by: Codex --- sdk/python/examples/01_quickstart_constructor/async.py | 2 +- sdk/python/examples/01_quickstart_constructor/sync.py | 2 +- sdk/python/examples/03_turn_stream_events/async.py | 2 +- sdk/python/examples/03_turn_stream_events/sync.py | 2 +- sdk/python/examples/05_existing_thread/async.py | 2 +- sdk/python/examples/05_existing_thread/sync.py | 2 +- .../examples/06_thread_lifecycle_and_controls/async.py | 6 +++--- .../examples/06_thread_lifecycle_and_controls/sync.py | 6 +++--- sdk/python/examples/07_image_and_text/async.py | 2 +- sdk/python/examples/07_image_and_text/sync.py | 2 +- sdk/python/examples/08_local_image_and_text/async.py | 2 +- sdk/python/examples/08_local_image_and_text/sync.py | 2 +- sdk/python/examples/09_async_parity/sync.py | 2 +- sdk/python/examples/10_error_handling_and_retry/async.py | 2 +- sdk/python/examples/10_error_handling_and_retry/sync.py | 2 +- sdk/python/examples/11_cli_mini_app/async.py | 2 +- sdk/python/examples/11_cli_mini_app/sync.py | 2 +- sdk/python/examples/12_turn_params_kitchen_sink/async.py | 4 ++-- sdk/python/examples/12_turn_params_kitchen_sink/sync.py | 4 ++-- 19 files changed, 25 insertions(+), 25 deletions(-) diff --git a/sdk/python/examples/01_quickstart_constructor/async.py b/sdk/python/examples/01_quickstart_constructor/async.py index 548640bc9fe..057ba160892 100644 --- a/sdk/python/examples/01_quickstart_constructor/async.py +++ b/sdk/python/examples/01_quickstart_constructor/async.py @@ -18,7 +18,7 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: print("Server:", codex.metadata.server_name, codex.metadata.server_version) - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn(TextInput("Say hello in one sentence.")) result = await turn.run() diff --git a/sdk/python/examples/01_quickstart_constructor/sync.py b/sdk/python/examples/01_quickstart_constructor/sync.py index 3ad7981299b..9c13c85927c 100644 --- a/sdk/python/examples/01_quickstart_constructor/sync.py +++ b/sdk/python/examples/01_quickstart_constructor/sync.py @@ -14,7 +14,7 @@ with Codex(config=runtime_config()) as codex: print("Server:", codex.metadata.server_name, codex.metadata.server_version) - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) result = thread.turn(TextInput("Say hello in one sentence.")).run() print("Status:", result.status) print("Text:", result.text) diff --git a/sdk/python/examples/03_turn_stream_events/async.py b/sdk/python/examples/03_turn_stream_events/async.py index 1198c8f7527..ea8165bccde 100644 --- a/sdk/python/examples/03_turn_stream_events/async.py +++ b/sdk/python/examples/03_turn_stream_events/async.py @@ -16,7 +16,7 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) # Best effort controls: models can finish quickly, so races are expected. diff --git a/sdk/python/examples/03_turn_stream_events/sync.py b/sdk/python/examples/03_turn_stream_events/sync.py index 03c02c44c47..e9fb908c52b 100644 --- a/sdk/python/examples/03_turn_stream_events/sync.py +++ b/sdk/python/examples/03_turn_stream_events/sync.py @@ -12,7 +12,7 @@ from codex_app_server import Codex, TextInput with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) # Best effort controls: models can finish quickly, so races are expected. diff --git a/sdk/python/examples/05_existing_thread/async.py b/sdk/python/examples/05_existing_thread/async.py index d1da384dd21..990231eee37 100644 --- a/sdk/python/examples/05_existing_thread/async.py +++ b/sdk/python/examples/05_existing_thread/async.py @@ -16,7 +16,7 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - original = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + original = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) first_turn = await original.turn(TextInput("Tell me one fact about Saturn.")) first = await first_turn.run() diff --git a/sdk/python/examples/05_existing_thread/sync.py b/sdk/python/examples/05_existing_thread/sync.py index b6526355c71..9abe16ddc61 100644 --- a/sdk/python/examples/05_existing_thread/sync.py +++ b/sdk/python/examples/05_existing_thread/sync.py @@ -13,7 +13,7 @@ with Codex(config=runtime_config()) as codex: # Create an initial thread and turn so we have a real thread to resume. - original = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + original = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) first = original.turn(TextInput("Tell me one fact about Saturn.")).run() print("Created thread:", first.thread_id) diff --git a/sdk/python/examples/06_thread_lifecycle_and_controls/async.py b/sdk/python/examples/06_thread_lifecycle_and_controls/async.py index ed263e71577..f34e3d2873f 100644 --- a/sdk/python/examples/06_thread_lifecycle_and_controls/async.py +++ b/sdk/python/examples/06_thread_lifecycle_and_controls/async.py @@ -16,7 +16,7 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) first = await (await thread.turn(TextInput("One sentence about structured planning."))).run() second = await (await thread.turn(TextInput("Now restate it for a junior engineer."))).run() @@ -33,7 +33,7 @@ async def main() -> None: try: resumed = await codex.thread_resume( unarchived.id, - model="gpt-5", + model="gpt-5.4", config={"model_reasoning_effort": "high"}, ) resumed_result = await (await resumed.turn(TextInput("Continue in one short sentence."))).run() @@ -43,7 +43,7 @@ async def main() -> None: forked_info = "n/a" try: - forked = await codex.thread_fork(unarchived.id, model="gpt-5") + forked = await codex.thread_fork(unarchived.id, model="gpt-5.4") forked_result = await (await forked.turn(TextInput("Take a different angle in one short sentence."))).run() forked_info = f"{forked_result.turn_id} {forked_result.status}" except Exception as exc: diff --git a/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py b/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py index fb1316e78ca..1cd6ad5a334 100644 --- a/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py +++ b/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py @@ -13,7 +13,7 @@ with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) first = thread.turn(TextInput("One sentence about structured planning.")).run() second = thread.turn(TextInput("Now restate it for a junior engineer.")).run() @@ -30,7 +30,7 @@ try: resumed = codex.thread_resume( unarchived.id, - model="gpt-5", + model="gpt-5.4", config={"model_reasoning_effort": "high"}, ) resumed_result = resumed.turn(TextInput("Continue in one short sentence.")).run() @@ -40,7 +40,7 @@ forked_info = "n/a" try: - forked = codex.thread_fork(unarchived.id, model="gpt-5") + forked = codex.thread_fork(unarchived.id, model="gpt-5.4") forked_result = forked.turn(TextInput("Take a different angle in one short sentence.")).run() forked_info = f"{forked_result.turn_id} {forked_result.status}" except Exception as exc: diff --git a/sdk/python/examples/07_image_and_text/async.py b/sdk/python/examples/07_image_and_text/async.py index 67f667c87e9..bb6604202af 100644 --- a/sdk/python/examples/07_image_and_text/async.py +++ b/sdk/python/examples/07_image_and_text/async.py @@ -18,7 +18,7 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn( [ TextInput("What is in this image? Give 3 bullets."), diff --git a/sdk/python/examples/07_image_and_text/sync.py b/sdk/python/examples/07_image_and_text/sync.py index e4ec44d3c4a..6482ca100d2 100644 --- a/sdk/python/examples/07_image_and_text/sync.py +++ b/sdk/python/examples/07_image_and_text/sync.py @@ -14,7 +14,7 @@ REMOTE_IMAGE_URL = "https://raw.githubusercontent.com/github/explore/main/topics/python/python.png" with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) result = thread.turn( [ TextInput("What is in this image? Give 3 bullets."), diff --git a/sdk/python/examples/08_local_image_and_text/async.py b/sdk/python/examples/08_local_image_and_text/async.py index f20e33c0886..82f8388671a 100644 --- a/sdk/python/examples/08_local_image_and_text/async.py +++ b/sdk/python/examples/08_local_image_and_text/async.py @@ -20,7 +20,7 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn( [ diff --git a/sdk/python/examples/08_local_image_and_text/sync.py b/sdk/python/examples/08_local_image_and_text/sync.py index 41b489e0a88..ee5dc32de0a 100644 --- a/sdk/python/examples/08_local_image_and_text/sync.py +++ b/sdk/python/examples/08_local_image_and_text/sync.py @@ -16,7 +16,7 @@ raise FileNotFoundError(f"Missing bundled image: {IMAGE_PATH}") with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) result = thread.turn( [ diff --git a/sdk/python/examples/09_async_parity/sync.py b/sdk/python/examples/09_async_parity/sync.py index 0ebaf6b8d49..b7b298c786a 100644 --- a/sdk/python/examples/09_async_parity/sync.py +++ b/sdk/python/examples/09_async_parity/sync.py @@ -14,7 +14,7 @@ with Codex(config=runtime_config()) as codex: print("Server:", codex.metadata.server_name, codex.metadata.server_version) - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = thread.turn(TextInput("Say hello in one sentence.")) result = turn.run() diff --git a/sdk/python/examples/10_error_handling_and_retry/async.py b/sdk/python/examples/10_error_handling_and_retry/async.py index c8555b69361..cdc8bcd8c3b 100644 --- a/sdk/python/examples/10_error_handling_and_retry/async.py +++ b/sdk/python/examples/10_error_handling_and_retry/async.py @@ -55,7 +55,7 @@ async def retry_on_overload_async( async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) try: result = await retry_on_overload_async( diff --git a/sdk/python/examples/10_error_handling_and_retry/sync.py b/sdk/python/examples/10_error_handling_and_retry/sync.py index 02371566390..cc2e565b764 100644 --- a/sdk/python/examples/10_error_handling_and_retry/sync.py +++ b/sdk/python/examples/10_error_handling_and_retry/sync.py @@ -19,7 +19,7 @@ ) with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) try: result = retry_on_overload( diff --git a/sdk/python/examples/11_cli_mini_app/async.py b/sdk/python/examples/11_cli_mini_app/async.py index 18e6ea46e0c..eafc3d19323 100644 --- a/sdk/python/examples/11_cli_mini_app/async.py +++ b/sdk/python/examples/11_cli_mini_app/async.py @@ -43,7 +43,7 @@ async def main() -> None: print("Codex async mini CLI. Type /exit to quit.") async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) print("Thread:", thread.id) while True: diff --git a/sdk/python/examples/11_cli_mini_app/sync.py b/sdk/python/examples/11_cli_mini_app/sync.py index 3e02534bb56..07f69520726 100644 --- a/sdk/python/examples/11_cli_mini_app/sync.py +++ b/sdk/python/examples/11_cli_mini_app/sync.py @@ -40,7 +40,7 @@ def _format_usage(usage: object | None) -> str: with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) print("Thread:", thread.id) while True: diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/async.py b/sdk/python/examples/12_turn_params_kitchen_sink/async.py index bc3d5e57855..1e3c6cdb0aa 100644 --- a/sdk/python/examples/12_turn_params_kitchen_sink/async.py +++ b/sdk/python/examples/12_turn_params_kitchen_sink/async.py @@ -51,14 +51,14 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn( TextInput(PROMPT), approval_policy=APPROVAL_POLICY, cwd=str(Path.cwd()), effort=ReasoningEffort.medium, - model="gpt-5", + model="gpt-5.4", output_schema=OUTPUT_SCHEMA, personality=Personality.pragmatic, sandbox_policy=SANDBOX_POLICY, diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/sync.py b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py index 3f3574fd2ca..a68f36d002b 100644 --- a/sdk/python/examples/12_turn_params_kitchen_sink/sync.py +++ b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py @@ -47,14 +47,14 @@ APPROVAL_POLICY = AskForApproval.model_validate("never") with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = thread.turn( TextInput(PROMPT), approval_policy=APPROVAL_POLICY, cwd=str(Path.cwd()), effort=ReasoningEffort.medium, - model="gpt-5", + model="gpt-5.4", output_schema=OUTPUT_SCHEMA, personality=Personality.pragmatic, sandbox_policy=SANDBOX_POLICY, From 3bb1a1325f01a2b627c37073bdb9cb3763fd43e6 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Fri, 13 Mar 2026 14:00:49 -0700 Subject: [PATCH 05/14] python-sdk: show persisted turn items in example 02 (2026-03-13) Update the sync and async turn-run examples to read the thread after a completed turn and print the persisted item count instead of the empty immediate TurnResult.items list. This makes the example output match the current app-server behavior, where the completed turn payload can have empty items even though the persisted thread turn later contains the generated items. Validation: - python3 sdk/python/examples/02_turn_run/sync.py - python3 sdk/python/examples/02_turn_run/async.py Co-authored-by: Codex --- sdk/python/examples/02_turn_run/async.py | 10 +++++++++- sdk/python/examples/02_turn_run/sync.py | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/sdk/python/examples/02_turn_run/async.py b/sdk/python/examples/02_turn_run/async.py index 5d25d367c84..e6fe5f7cbea 100644 --- a/sdk/python/examples/02_turn_run/async.py +++ b/sdk/python/examples/02_turn_run/async.py @@ -19,6 +19,11 @@ async def main() -> None: thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn(TextInput("Give 3 bullets about SIMD.")) result = await turn.run() + persisted = await thread.read(include_turns=True) + persisted_turn = next( + (turn for turn in persisted.thread.turns or [] if turn.id == result.turn_id), + None, + ) print("thread_id:", result.thread_id) print("turn_id:", result.turn_id) @@ -26,7 +31,10 @@ async def main() -> None: if result.error is not None: print("error:", result.error) print("text:", result.text) - print("items.count:", len(result.items)) + print( + "persisted.items.count:", + 0 if persisted_turn is None else len(persisted_turn.items or []), + ) if result.usage is None: raise RuntimeError("missing usage for completed turn") print("usage.thread_id:", result.usage.thread_id) diff --git a/sdk/python/examples/02_turn_run/sync.py b/sdk/python/examples/02_turn_run/sync.py index 89b1411571e..5d53224edff 100644 --- a/sdk/python/examples/02_turn_run/sync.py +++ b/sdk/python/examples/02_turn_run/sync.py @@ -14,6 +14,11 @@ with Codex(config=runtime_config()) as codex: thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) result = thread.turn(TextInput("Give 3 bullets about SIMD.")).run() + persisted = thread.read(include_turns=True) + persisted_turn = next( + (turn for turn in persisted.thread.turns or [] if turn.id == result.turn_id), + None, + ) print("thread_id:", result.thread_id) print("turn_id:", result.turn_id) @@ -21,7 +26,10 @@ if result.error is not None: print("error:", result.error) print("text:", result.text) - print("items.count:", len(result.items)) + print( + "persisted.items.count:", + 0 if persisted_turn is None else len(persisted_turn.items or []), + ) if result.usage is None: raise RuntimeError("missing usage for completed turn") print("usage.thread_id:", result.usage.thread_id) From d864b8c836bb6512eee6b0818da5800e02c528d0 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Mon, 16 Mar 2026 14:04:55 -0700 Subject: [PATCH 06/14] python-sdk: expose canonical app-server types (2026-03-16) Remove the SDK alias/result layers so the wrapper surface returns canonical generated app-server models directly. - delete public type alias modules and regenerate v2_all.py against current schema - return InitializeResponse from metadata and generated Turn from run() - update docs, examples, notebook, and tests to use canonical generated models and repo-only text extraction helpers Validation: - PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests - GH_TOKEN="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" RUN_REAL_CODEX_TESTS=1 PYTHONPATH=sdk/python/src python3 -m pytest sdk/python/tests -rs Co-authored-by: Codex --- sdk/python/README.md | 5 +- sdk/python/docs/api-reference.md | 50 +- sdk/python/docs/faq.md | 4 +- sdk/python/docs/getting-started.md | 43 +- .../01_quickstart_constructor/async.py | 14 +- .../01_quickstart_constructor/sync.py | 14 +- sdk/python/examples/02_turn_run/async.py | 22 +- sdk/python/examples/02_turn_run/sync.py | 22 +- .../examples/05_existing_thread/async.py | 12 +- .../examples/05_existing_thread/sync.py | 10 +- .../06_thread_lifecycle_and_controls/async.py | 8 +- .../06_thread_lifecycle_and_controls/sync.py | 8 +- .../examples/07_image_and_text/async.py | 11 +- sdk/python/examples/07_image_and_text/sync.py | 11 +- .../examples/08_local_image_and_text/async.py | 11 +- .../examples/08_local_image_and_text/sync.py | 11 +- sdk/python/examples/09_async_parity/sync.py | 18 +- .../10_error_handling_and_retry/async.py | 11 +- .../10_error_handling_and_retry/sync.py | 11 +- sdk/python/examples/11_cli_mini_app/async.py | 4 +- sdk/python/examples/11_cli_mini_app/sync.py | 4 +- .../12_turn_params_kitchen_sink/async.py | 13 +- .../12_turn_params_kitchen_sink/sync.py | 13 +- .../13_model_select_and_turn_params/async.py | 14 +- .../13_model_select_and_turn_params/sync.py | 14 +- sdk/python/examples/_bootstrap.py | 49 + sdk/python/notebooks/sdk_walkthrough.ipynb | 89 +- sdk/python/src/codex_app_server/__init__.py | 48 +- .../src/codex_app_server/generated/v2_all.py | 6250 +++++------------ .../codex_app_server/generated/v2_types.py | 23 - sdk/python/src/codex_app_server/public_api.py | 191 +- .../src/codex_app_server/public_types.py | 41 - .../test_artifact_workflow_and_binaries.py | 2 +- .../tests/test_public_api_runtime_behavior.py | 68 +- .../tests/test_public_api_signatures.py | 10 +- .../tests/test_real_app_server_integration.py | 64 +- 36 files changed, 2354 insertions(+), 4839 deletions(-) delete mode 100644 sdk/python/src/codex_app_server/generated/v2_types.py delete mode 100644 sdk/python/src/codex_app_server/public_types.py diff --git a/sdk/python/README.md b/sdk/python/README.md index 62795399efa..993e4bcecf9 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -23,8 +23,9 @@ from codex_app_server import Codex, TextInput with Codex() as codex: thread = codex.thread_start(model="gpt-5") - result = thread.turn(TextInput("Say hello in one sentence.")).run() - print(result.text) + completed_turn = thread.turn(TextInput("Say hello in one sentence.")).run() + print(completed_turn.status) + print(completed_turn.id) ``` ## Docs map diff --git a/sdk/python/docs/api-reference.md b/sdk/python/docs/api-reference.md index 597b069707f..02cd0631bde 100644 --- a/sdk/python/docs/api-reference.md +++ b/sdk/python/docs/api-reference.md @@ -14,8 +14,7 @@ from codex_app_server import ( AsyncThread, Turn, AsyncTurn, - TurnResult, - InitializeResult, + InitializeResponse, Input, InputItem, TextInput, @@ -23,13 +22,14 @@ from codex_app_server import ( LocalImageInput, SkillInput, MentionInput, - ThreadItem, TurnStatus, ) +from codex_app_server.generated.v2_all import ThreadItem ``` - Version: `codex_app_server.__version__` - Requires Python >= 3.10 +- Canonical generated app-server models live in `codex_app_server.generated.v2_all` ## Codex (sync) @@ -39,7 +39,7 @@ Codex(config: AppServerConfig | None = None) Properties/methods: -- `metadata -> InitializeResult` +- `metadata -> InitializeResponse` - `close() -> None` - `thread_start(*, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, ephemeral=None, model=None, model_provider=None, personality=None, sandbox=None) -> Thread` - `thread_list(*, archived=None, cursor=None, cwd=None, limit=None, model_providers=None, sort_key=None, source_kinds=None) -> ThreadListResponse` @@ -64,12 +64,12 @@ AsyncCodex(config: AppServerConfig | None = None) Properties/methods: -- `metadata -> InitializeResult` +- `metadata -> InitializeResponse` - `close() -> Awaitable[None]` - `thread_start(*, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, ephemeral=None, model=None, model_provider=None, personality=None, sandbox=None) -> Awaitable[AsyncThread]` - `thread_list(*, archived=None, cursor=None, cwd=None, limit=None, model_providers=None, sort_key=None, source_kinds=None) -> Awaitable[ThreadListResponse]` - `thread_resume(thread_id: str, *, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, model=None, model_provider=None, personality=None, sandbox=None) -> Awaitable[AsyncThread]` -- `thread_fork(thread_id: str, *, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, model=None, model_provider=None, sandbox=None) -> Awaitable[AsyncThread]` +- `thread_fork(thread_id: str, *, approval_policy=None, base_instructions=None, config=None, cwd=None, developer_instructions=None, ephemeral=None, model=None, model_provider=None, sandbox=None) -> Awaitable[AsyncThread]` - `thread_archive(thread_id: str) -> Awaitable[ThreadArchiveResponse]` - `thread_unarchive(thread_id: str) -> Awaitable[AsyncThread]` - `models(*, include_hidden: bool = False) -> Awaitable[ModelListResponse]` @@ -106,7 +106,7 @@ async with AsyncCodex() as codex: - `steer(input: Input) -> TurnSteerResponse` - `interrupt() -> TurnInterruptResponse` - `stream() -> Iterator[Notification]` -- `run() -> TurnResult` +- `run() -> codex_app_server.generated.v2_all.Turn` Behavior notes: @@ -118,27 +118,13 @@ Behavior notes: - `steer(input: Input) -> Awaitable[TurnSteerResponse]` - `interrupt() -> Awaitable[TurnInterruptResponse]` - `stream() -> AsyncIterator[Notification]` -- `run() -> Awaitable[TurnResult]` +- `run() -> Awaitable[codex_app_server.generated.v2_all.Turn]` Behavior notes: - `stream()` and `run()` are exclusive per client instance in the current experimental build - starting a second turn consumer on the same `AsyncCodex` instance raises `RuntimeError` -## TurnResult - -```python -@dataclass -class TurnResult: - thread_id: str - turn_id: str - status: TurnStatus - error: TurnError | None - text: str - items: list[ThreadItem] - usage: ThreadTokenUsageUpdatedNotification | None -``` - ## Inputs ```python @@ -152,6 +138,20 @@ InputItem = TextInput | ImageInput | LocalImageInput | SkillInput | MentionInput Input = list[InputItem] | InputItem ``` +## Generated Models + +The SDK wrappers return and accept canonical generated app-server models wherever possible: + +```python +from codex_app_server.generated.v2_all import ( + AskForApproval, + ThreadReadResponse, + Turn, + TurnStartParams, + TurnStatus, +) +``` + ## Retry + errors ```python @@ -174,7 +174,7 @@ from codex_app_server import ( from codex_app_server import Codex, TextInput with Codex() as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) - result = thread.turn(TextInput("Say hello in one sentence.")).run() - print(result.text) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) + completed_turn = thread.turn(TextInput("Say hello in one sentence.")).run() + print(completed_turn.id, completed_turn.status) ``` diff --git a/sdk/python/docs/faq.md b/sdk/python/docs/faq.md index 7a402b59d4d..aa35e402cdb 100644 --- a/sdk/python/docs/faq.md +++ b/sdk/python/docs/faq.md @@ -8,7 +8,7 @@ ## `run()` vs `stream()` -- `Turn.run()` / `AsyncTurn.run()` is the easiest path. It consumes events until completion and returns `TurnResult`. +- `Turn.run()` / `AsyncTurn.run()` is the easiest path. It consumes events until completion and returns the canonical generated app-server `Turn` model. - `Turn.stream()` / `AsyncTurn.stream()` yields raw notifications (`Notification`) so you can react event-by-event. Choose `run()` for most apps. Choose `stream()` for progress UIs, custom timeout logic, or custom parsing. @@ -91,5 +91,5 @@ Do not blindly retry all errors. For `InvalidParamsError` or `MethodNotFoundErro - Starting a new thread for every prompt when you wanted continuity. - Forgetting to `close()` (or not using context managers). -- Ignoring `TurnResult.status` and `TurnResult.error`. +- Assuming `run()` returns extra SDK-only fields instead of the generated `Turn` model. - Mixing SDK input classes with raw dicts incorrectly. diff --git a/sdk/python/docs/getting-started.md b/sdk/python/docs/getting-started.md index 258e2636259..85003a1e987 100644 --- a/sdk/python/docs/getting-started.md +++ b/sdk/python/docs/getting-started.md @@ -25,22 +25,23 @@ Requirements: from codex_app_server import Codex, TextInput with Codex() as codex: - print("Server:", codex.metadata.server_name, codex.metadata.server_version) + server = codex.metadata.serverInfo + print("Server:", None if server is None else server.name, None if server is None else server.version) - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) - result = thread.turn(TextInput("Say hello in one sentence.")).run() + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) + completed_turn = thread.turn(TextInput("Say hello in one sentence.")).run() - print("Thread:", result.thread_id) - print("Turn:", result.turn_id) - print("Status:", result.status) - print("Text:", result.text) + print("Thread:", thread.id) + print("Turn:", completed_turn.id) + print("Status:", completed_turn.status) + print("Items:", len(completed_turn.items or [])) ``` What happened: - `Codex()` started and initialized `codex app-server`. - `thread_start(...)` created a thread. -- `turn(...).run()` consumed events until `turn/completed` and returned a `TurnResult`. +- `turn(...).run()` consumed events until `turn/completed` and returned the canonical generated app-server `Turn` model. - one client can have only one active `Turn.stream()` / `Turn.run()` consumer at a time in the current experimental build ## 3) Continue the same thread (multi-turn) @@ -49,13 +50,13 @@ What happened: from codex_app_server import Codex, TextInput with Codex() as codex: - thread = codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) first = thread.turn(TextInput("Summarize Rust ownership in 2 bullets.")).run() second = thread.turn(TextInput("Now explain it to a Python developer.")).run() - print("first:", first.text) - print("second:", second.text) + print("first:", first.id, first.status) + print("second:", second.id, second.status) ``` ## 4) Async parity @@ -67,10 +68,10 @@ from codex_app_server import AsyncCodex, TextInput async def main() -> None: async with AsyncCodex() as codex: - thread = await codex.thread_start(model="gpt-5", config={"model_reasoning_effort": "high"}) + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn(TextInput("Continue where we left off.")) - result = await turn.run() - print(result.text) + completed_turn = await turn.run() + print(completed_turn.id, completed_turn.status) asyncio.run(main()) @@ -85,11 +86,19 @@ THREAD_ID = "thr_123" # replace with a real id with Codex() as codex: thread = codex.thread_resume(THREAD_ID) - result = thread.turn(TextInput("Continue where we left off.")).run() - print(result.text) + completed_turn = thread.turn(TextInput("Continue where we left off.")).run() + print(completed_turn.id, completed_turn.status) ``` -## 6) Next stops +## 6) Generated models + +The convenience wrappers live at the package root, but the canonical app-server models live under: + +```python +from codex_app_server.generated.v2_all import Turn, TurnStatus, ThreadReadResponse +``` + +## 7) Next stops - API surface and signatures: `docs/api-reference.md` - Common decisions/pitfalls: `docs/faq.md` diff --git a/sdk/python/examples/01_quickstart_constructor/async.py b/sdk/python/examples/01_quickstart_constructor/async.py index 057ba160892..cf525fa6389 100644 --- a/sdk/python/examples/01_quickstart_constructor/async.py +++ b/sdk/python/examples/01_quickstart_constructor/async.py @@ -5,7 +5,13 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, + server_label, +) ensure_local_sdk_src() @@ -16,14 +22,16 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - print("Server:", codex.metadata.server_name, codex.metadata.server_version) + print("Server:", server_label(codex.metadata)) thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = await thread.turn(TextInput("Say hello in one sentence.")) result = await turn.run() + persisted = await thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print("Text:", result.text) + print("Text:", assistant_text_from_turn(persisted_turn)) if __name__ == "__main__": diff --git a/sdk/python/examples/01_quickstart_constructor/sync.py b/sdk/python/examples/01_quickstart_constructor/sync.py index 9c13c85927c..6abf29af385 100644 --- a/sdk/python/examples/01_quickstart_constructor/sync.py +++ b/sdk/python/examples/01_quickstart_constructor/sync.py @@ -5,16 +5,24 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, + server_label, +) ensure_local_sdk_src() from codex_app_server import Codex, TextInput with Codex(config=runtime_config()) as codex: - print("Server:", codex.metadata.server_name, codex.metadata.server_version) + print("Server:", server_label(codex.metadata)) thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) result = thread.turn(TextInput("Say hello in one sentence.")).run() + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print("Text:", result.text) + print("Text:", assistant_text_from_turn(persisted_turn)) diff --git a/sdk/python/examples/02_turn_run/async.py b/sdk/python/examples/02_turn_run/async.py index e6fe5f7cbea..de681a828ef 100644 --- a/sdk/python/examples/02_turn_run/async.py +++ b/sdk/python/examples/02_turn_run/async.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -20,25 +25,18 @@ async def main() -> None: turn = await thread.turn(TextInput("Give 3 bullets about SIMD.")) result = await turn.run() persisted = await thread.read(include_turns=True) - persisted_turn = next( - (turn for turn in persisted.thread.turns or [] if turn.id == result.turn_id), - None, - ) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) - print("thread_id:", result.thread_id) - print("turn_id:", result.turn_id) + print("thread_id:", thread.id) + print("turn_id:", result.id) print("status:", result.status) if result.error is not None: print("error:", result.error) - print("text:", result.text) + print("text:", assistant_text_from_turn(persisted_turn)) print( "persisted.items.count:", 0 if persisted_turn is None else len(persisted_turn.items or []), ) - if result.usage is None: - raise RuntimeError("missing usage for completed turn") - print("usage.thread_id:", result.usage.thread_id) - print("usage.turn_id:", result.usage.turn_id) if __name__ == "__main__": diff --git a/sdk/python/examples/02_turn_run/sync.py b/sdk/python/examples/02_turn_run/sync.py index 5d53224edff..823ffb7fd24 100644 --- a/sdk/python/examples/02_turn_run/sync.py +++ b/sdk/python/examples/02_turn_run/sync.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -15,22 +20,15 @@ thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) result = thread.turn(TextInput("Give 3 bullets about SIMD.")).run() persisted = thread.read(include_turns=True) - persisted_turn = next( - (turn for turn in persisted.thread.turns or [] if turn.id == result.turn_id), - None, - ) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) - print("thread_id:", result.thread_id) - print("turn_id:", result.turn_id) + print("thread_id:", thread.id) + print("turn_id:", result.id) print("status:", result.status) if result.error is not None: print("error:", result.error) - print("text:", result.text) + print("text:", assistant_text_from_turn(persisted_turn)) print( "persisted.items.count:", 0 if persisted_turn is None else len(persisted_turn.items or []), ) - if result.usage is None: - raise RuntimeError("missing usage for completed turn") - print("usage.thread_id:", result.usage.thread_id) - print("usage.turn_id:", result.usage.turn_id) diff --git a/sdk/python/examples/05_existing_thread/async.py b/sdk/python/examples/05_existing_thread/async.py index 990231eee37..8ce2a1af92a 100644 --- a/sdk/python/examples/05_existing_thread/async.py +++ b/sdk/python/examples/05_existing_thread/async.py @@ -5,7 +5,7 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config ensure_local_sdk_src() @@ -19,13 +19,15 @@ async def main() -> None: original = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) first_turn = await original.turn(TextInput("Tell me one fact about Saturn.")) - first = await first_turn.run() - print("Created thread:", first.thread_id) + _ = await first_turn.run() + print("Created thread:", original.id) - resumed = await codex.thread_resume(first.thread_id) + resumed = await codex.thread_resume(original.id) second_turn = await resumed.turn(TextInput("Continue with one more fact.")) second = await second_turn.run() - print(second.text) + persisted = await resumed.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, second.id) + print(assistant_text_from_turn(persisted_turn)) if __name__ == "__main__": diff --git a/sdk/python/examples/05_existing_thread/sync.py b/sdk/python/examples/05_existing_thread/sync.py index 9abe16ddc61..f5a0c4ec451 100644 --- a/sdk/python/examples/05_existing_thread/sync.py +++ b/sdk/python/examples/05_existing_thread/sync.py @@ -5,7 +5,7 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config ensure_local_sdk_src() @@ -15,9 +15,11 @@ # Create an initial thread and turn so we have a real thread to resume. original = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) first = original.turn(TextInput("Tell me one fact about Saturn.")).run() - print("Created thread:", first.thread_id) + print("Created thread:", original.id) # Resume the existing thread by ID. - resumed = codex.thread_resume(first.thread_id) + resumed = codex.thread_resume(original.id) second = resumed.turn(TextInput("Continue with one more fact.")).run() - print(second.text) + persisted = resumed.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, second.id) + print(assistant_text_from_turn(persisted_turn)) diff --git a/sdk/python/examples/06_thread_lifecycle_and_controls/async.py b/sdk/python/examples/06_thread_lifecycle_and_controls/async.py index f34e3d2873f..1600b7b8eb6 100644 --- a/sdk/python/examples/06_thread_lifecycle_and_controls/async.py +++ b/sdk/python/examples/06_thread_lifecycle_and_controls/async.py @@ -37,7 +37,7 @@ async def main() -> None: config={"model_reasoning_effort": "high"}, ) resumed_result = await (await resumed.turn(TextInput("Continue in one short sentence."))).run() - resumed_info = f"{resumed_result.turn_id} {resumed_result.status}" + resumed_info = f"{resumed_result.id} {resumed_result.status}" except Exception as exc: resumed_info = f"skipped({type(exc).__name__})" @@ -45,7 +45,7 @@ async def main() -> None: try: forked = await codex.thread_fork(unarchived.id, model="gpt-5.4") forked_result = await (await forked.turn(TextInput("Take a different angle in one short sentence."))).run() - forked_info = f"{forked_result.turn_id} {forked_result.status}" + forked_info = f"{forked_result.id} {forked_result.status}" except Exception as exc: forked_info = f"skipped({type(exc).__name__})" @@ -56,8 +56,8 @@ async def main() -> None: compact_info = f"skipped({type(exc).__name__})" print("Lifecycle OK:", thread.id) - print("first:", first.turn_id, first.status) - print("second:", second.turn_id, second.status) + print("first:", first.id, first.status) + print("second:", second.id, second.status) print("read.turns:", len(reading.thread.turns or [])) print("list.active:", len(listing_active.data)) print("list.archived:", len(listing_archived.data)) diff --git a/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py b/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py index 1cd6ad5a334..f485ce3ca92 100644 --- a/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py +++ b/sdk/python/examples/06_thread_lifecycle_and_controls/sync.py @@ -34,7 +34,7 @@ config={"model_reasoning_effort": "high"}, ) resumed_result = resumed.turn(TextInput("Continue in one short sentence.")).run() - resumed_info = f"{resumed_result.turn_id} {resumed_result.status}" + resumed_info = f"{resumed_result.id} {resumed_result.status}" except Exception as exc: resumed_info = f"skipped({type(exc).__name__})" @@ -42,7 +42,7 @@ try: forked = codex.thread_fork(unarchived.id, model="gpt-5.4") forked_result = forked.turn(TextInput("Take a different angle in one short sentence.")).run() - forked_info = f"{forked_result.turn_id} {forked_result.status}" + forked_info = f"{forked_result.id} {forked_result.status}" except Exception as exc: forked_info = f"skipped({type(exc).__name__})" @@ -53,8 +53,8 @@ compact_info = f"skipped({type(exc).__name__})" print("Lifecycle OK:", thread.id) - print("first:", first.turn_id, first.status) - print("second:", second.turn_id, second.status) + print("first:", first.id, first.status) + print("second:", second.id, second.status) print("read.turns:", len(reading.thread.turns or [])) print("list.active:", len(listing_active.data)) print("list.archived:", len(listing_archived.data)) diff --git a/sdk/python/examples/07_image_and_text/async.py b/sdk/python/examples/07_image_and_text/async.py index bb6604202af..6087222d2ff 100644 --- a/sdk/python/examples/07_image_and_text/async.py +++ b/sdk/python/examples/07_image_and_text/async.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -26,9 +31,11 @@ async def main() -> None: ] ) result = await turn.run() + persisted = await thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print(result.text) + print(assistant_text_from_turn(persisted_turn)) if __name__ == "__main__": diff --git a/sdk/python/examples/07_image_and_text/sync.py b/sdk/python/examples/07_image_and_text/sync.py index 6482ca100d2..a857fab8378 100644 --- a/sdk/python/examples/07_image_and_text/sync.py +++ b/sdk/python/examples/07_image_and_text/sync.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -21,6 +26,8 @@ ImageInput(REMOTE_IMAGE_URL), ] ).run() + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print(result.text) + print(assistant_text_from_turn(persisted_turn)) diff --git a/sdk/python/examples/08_local_image_and_text/async.py b/sdk/python/examples/08_local_image_and_text/async.py index 82f8388671a..aae7a53a7b8 100644 --- a/sdk/python/examples/08_local_image_and_text/async.py +++ b/sdk/python/examples/08_local_image_and_text/async.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -29,9 +34,11 @@ async def main() -> None: ] ) result = await turn.run() + persisted = await thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print(result.text) + print(assistant_text_from_turn(persisted_turn)) if __name__ == "__main__": diff --git a/sdk/python/examples/08_local_image_and_text/sync.py b/sdk/python/examples/08_local_image_and_text/sync.py index ee5dc32de0a..f09d1805842 100644 --- a/sdk/python/examples/08_local_image_and_text/sync.py +++ b/sdk/python/examples/08_local_image_and_text/sync.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -24,6 +29,8 @@ LocalImageInput(str(IMAGE_PATH.resolve())), ] ).run() + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print(result.text) + print(assistant_text_from_turn(persisted_turn)) diff --git a/sdk/python/examples/09_async_parity/sync.py b/sdk/python/examples/09_async_parity/sync.py index b7b298c786a..2577072965b 100644 --- a/sdk/python/examples/09_async_parity/sync.py +++ b/sdk/python/examples/09_async_parity/sync.py @@ -5,19 +5,27 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, + server_label, +) ensure_local_sdk_src() from codex_app_server import Codex, TextInput with Codex(config=runtime_config()) as codex: - print("Server:", codex.metadata.server_name, codex.metadata.server_version) + print("Server:", server_label(codex.metadata)) thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) turn = thread.turn(TextInput("Say hello in one sentence.")) result = turn.run() + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) - print("Thread:", result.thread_id) - print("Turn:", result.turn_id) - print("Text:", result.text.strip()) + print("Thread:", thread.id) + print("Turn:", result.id) + print("Text:", assistant_text_from_turn(persisted_turn).strip()) diff --git a/sdk/python/examples/10_error_handling_and_retry/async.py b/sdk/python/examples/10_error_handling_and_retry/async.py index cdc8bcd8c3b..c23ee00847a 100644 --- a/sdk/python/examples/10_error_handling_and_retry/async.py +++ b/sdk/python/examples/10_error_handling_and_retry/async.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -73,10 +78,12 @@ async def main() -> None: print("Text:") return + persisted = await thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) if result.status == TurnStatus.failed: print("Turn failed:", result.error) - print("Text:", result.text) + print("Text:", assistant_text_from_turn(persisted_turn)) def _run_turn(thread, prompt: str): diff --git a/sdk/python/examples/10_error_handling_and_retry/sync.py b/sdk/python/examples/10_error_handling_and_retry/sync.py index cc2e565b764..585f24a9d2b 100644 --- a/sdk/python/examples/10_error_handling_and_retry/sync.py +++ b/sdk/python/examples/10_error_handling_and_retry/sync.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -35,6 +40,8 @@ print(f"JSON-RPC error {exc.code}: {exc.message}") print("Text:") else: + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) if result.status == TurnStatus.failed: print("Turn failed:", result.error) - print("Text:", result.text) + print("Text:", assistant_text_from_turn(persisted_turn)) diff --git a/sdk/python/examples/11_cli_mini_app/async.py b/sdk/python/examples/11_cli_mini_app/async.py index eafc3d19323..489dc51203f 100644 --- a/sdk/python/examples/11_cli_mini_app/async.py +++ b/sdk/python/examples/11_cli_mini_app/async.py @@ -15,7 +15,7 @@ AsyncCodex, TextInput, ThreadTokenUsageUpdatedNotification, - TurnCompletedNotificationPayload, + TurnCompletedNotification, ) @@ -75,7 +75,7 @@ async def main() -> None: if isinstance(payload, ThreadTokenUsageUpdatedNotification): usage = payload.token_usage continue - if isinstance(payload, TurnCompletedNotificationPayload): + if isinstance(payload, TurnCompletedNotification): status = payload.turn.status error = payload.turn.error diff --git a/sdk/python/examples/11_cli_mini_app/sync.py b/sdk/python/examples/11_cli_mini_app/sync.py index 07f69520726..9aecca414f0 100644 --- a/sdk/python/examples/11_cli_mini_app/sync.py +++ b/sdk/python/examples/11_cli_mini_app/sync.py @@ -13,7 +13,7 @@ Codex, TextInput, ThreadTokenUsageUpdatedNotification, - TurnCompletedNotificationPayload, + TurnCompletedNotification, ) print("Codex mini CLI. Type /exit to quit.") @@ -72,7 +72,7 @@ def _format_usage(usage: object | None) -> str: if isinstance(payload, ThreadTokenUsageUpdatedNotification): usage = payload.token_usage continue - if isinstance(payload, TurnCompletedNotificationPayload): + if isinstance(payload, TurnCompletedNotification): status = payload.turn.status error = payload.turn.error diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/async.py b/sdk/python/examples/12_turn_params_kitchen_sink/async.py index 1e3c6cdb0aa..12b36fdbf7a 100644 --- a/sdk/python/examples/12_turn_params_kitchen_sink/async.py +++ b/sdk/python/examples/12_turn_params_kitchen_sink/async.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -65,10 +70,12 @@ async def main() -> None: summary=SUMMARY, ) result = await turn.run() + persisted = await thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print("Text:", result.text) - print("Usage:", result.usage) + print("Text:", assistant_text_from_turn(persisted_turn)) + print("Items:", 0 if persisted_turn is None else len(persisted_turn.items or [])) if __name__ == "__main__": diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/sync.py b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py index a68f36d002b..f685917d2b4 100644 --- a/sdk/python/examples/12_turn_params_kitchen_sink/sync.py +++ b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -61,7 +66,9 @@ summary=SUMMARY, ) result = turn.run() + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) print("Status:", result.status) - print("Text:", result.text) - print("Usage:", result.usage) + print("Text:", assistant_text_from_turn(persisted_turn)) + print("Items:", 0 if persisted_turn is None else len(persisted_turn.items or [])) diff --git a/sdk/python/examples/13_model_select_and_turn_params/async.py b/sdk/python/examples/13_model_select_and_turn_params/async.py index dcab14cceda..cbbcff462bc 100644 --- a/sdk/python/examples/13_model_select_and_turn_params/async.py +++ b/sdk/python/examples/13_model_select_and_turn_params/async.py @@ -5,7 +5,7 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config ensure_local_sdk_src() @@ -96,9 +96,11 @@ async def main() -> None: effort=selected_effort, ) first = await first_turn.run() + persisted = await thread.read(include_turns=True) + first_persisted_turn = find_turn_by_id(persisted.thread.turns, first.id) - print("agent.message:", first.text) - print("usage:", first.usage) + print("agent.message:", assistant_text_from_turn(first_persisted_turn)) + print("items:", 0 if first_persisted_turn is None else len(first_persisted_turn.items or [])) second_turn = await thread.turn( TextInput("Return JSON for a safe feature-flag rollout plan."), @@ -112,9 +114,11 @@ async def main() -> None: summary=ReasoningSummary.model_validate("concise"), ) second = await second_turn.run() + persisted = await thread.read(include_turns=True) + second_persisted_turn = find_turn_by_id(persisted.thread.turns, second.id) - print("agent.message.params:", second.text) - print("usage.params:", second.usage) + print("agent.message.params:", assistant_text_from_turn(second_persisted_turn)) + print("items.params:", 0 if second_persisted_turn is None else len(second_persisted_turn.items or [])) if __name__ == "__main__": diff --git a/sdk/python/examples/13_model_select_and_turn_params/sync.py b/sdk/python/examples/13_model_select_and_turn_params/sync.py index c2fc53e36fd..e02d99cf750 100644 --- a/sdk/python/examples/13_model_select_and_turn_params/sync.py +++ b/sdk/python/examples/13_model_select_and_turn_params/sync.py @@ -5,7 +5,7 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import assistant_text_from_turn, ensure_local_sdk_src, find_turn_by_id, runtime_config ensure_local_sdk_src() @@ -92,9 +92,11 @@ def _pick_highest_turn_effort(model) -> ReasoningEffort: model=selected_model.model, effort=selected_effort, ).run() + persisted = thread.read(include_turns=True) + first_turn = find_turn_by_id(persisted.thread.turns, first.id) - print("agent.message:", first.text) - print("usage:", first.usage) + print("agent.message:", assistant_text_from_turn(first_turn)) + print("items:", 0 if first_turn is None else len(first_turn.items or [])) second = thread.turn( TextInput("Return JSON for a safe feature-flag rollout plan."), @@ -107,6 +109,8 @@ def _pick_highest_turn_effort(model) -> ReasoningEffort: sandbox_policy=SANDBOX_POLICY, summary=ReasoningSummary.model_validate("concise"), ).run() + persisted = thread.read(include_turns=True) + second_turn = find_turn_by_id(persisted.thread.turns, second.id) - print("agent.message.params:", second.text) - print("usage.params:", second.usage) + print("agent.message.params:", assistant_text_from_turn(second_turn)) + print("items.params:", 0 if second_turn is None else len(second_turn.items or [])) diff --git a/sdk/python/examples/_bootstrap.py b/sdk/python/examples/_bootstrap.py index 2ce0ff0617a..d15a1835cc7 100644 --- a/sdk/python/examples/_bootstrap.py +++ b/sdk/python/examples/_bootstrap.py @@ -4,6 +4,7 @@ import os import sys from pathlib import Path +from typing import Iterable _SDK_PYTHON_DIR = Path(__file__).resolve().parents[1] _SDK_PYTHON_STR = str(_SDK_PYTHON_DIR) @@ -49,3 +50,51 @@ def runtime_config(): ensure_runtime_package_installed(sys.executable, _SDK_PYTHON_DIR) return AppServerConfig() + + +def server_label(metadata: object) -> str: + server = getattr(metadata, "serverInfo", None) + server_name = ((getattr(server, "name", None) or "") if server is not None else "").strip() + server_version = ((getattr(server, "version", None) or "") if server is not None else "").strip() + if server_name and server_version: + return f"{server_name} {server_version}" + + user_agent = ((getattr(metadata, "userAgent", None) or "") if metadata is not None else "").strip() + return user_agent or "unknown" + + +def find_turn_by_id(turns: Iterable[object] | None, turn_id: str) -> object | None: + for turn in turns or []: + if getattr(turn, "id", None) == turn_id: + return turn + return None + + +def assistant_text_from_turn(turn: object | None) -> str: + if turn is None: + return "" + + chunks: list[str] = [] + for item in getattr(turn, "items", []) or []: + raw_item = item.model_dump(mode="json") if hasattr(item, "model_dump") else item + if not isinstance(raw_item, dict): + continue + + item_type = raw_item.get("type") + if item_type == "agentMessage": + text = raw_item.get("text") + if isinstance(text, str) and text: + chunks.append(text) + continue + + if item_type != "message" or raw_item.get("role") != "assistant": + continue + + for content in raw_item.get("content") or []: + if not isinstance(content, dict) or content.get("type") != "output_text": + continue + text = content.get("text") + if isinstance(text, str) and text: + chunks.append(text) + + return "".join(chunks) diff --git a/sdk/python/notebooks/sdk_walkthrough.ipynb b/sdk/python/notebooks/sdk_walkthrough.ipynb index 4b4c42a5630..c4c59372e4e 100644 --- a/sdk/python/notebooks/sdk_walkthrough.ipynb +++ b/sdk/python/notebooks/sdk_walkthrough.ipynb @@ -104,9 +104,13 @@ ")\n", "\n", "src_dir = repo_python_dir / 'src'\n", + "examples_dir = repo_python_dir / 'examples'\n", "src_str = str(src_dir)\n", + "examples_str = str(examples_dir)\n", "if src_str not in sys.path:\n", " sys.path.insert(0, src_str)\n", + "if examples_str not in sys.path:\n", + " sys.path.insert(0, examples_str)\n", "\n", "# Force fresh imports after SDK upgrades in the same notebook kernel.\n", "for module_name in list(sys.modules):\n", @@ -115,8 +119,7 @@ "\n", "print('Kernel:', sys.executable)\n", "print('SDK source:', src_dir)\n", - "print('Runtime package:', runtime_version)\n", - "\n" + "print('Runtime package:', runtime_version)\n" ] }, { @@ -126,6 +129,7 @@ "outputs": [], "source": [ "# Cell 2: imports (public only)\n", + "from _bootstrap import assistant_text_from_turn, find_turn_by_id, server_label\n", "from codex_app_server import (\n", " AsyncCodex,\n", " Codex,\n", @@ -133,7 +137,7 @@ " LocalImageInput,\n", " TextInput,\n", " retry_on_overload,\n", - ")" + ")\n" ] }, { @@ -144,13 +148,15 @@ "source": [ "# Cell 3: simple sync conversation\n", "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " turn = thread.turn(TextInput('Explain gradient descent in 3 bullets.'))\n", " result = turn.run()\n", + " persisted = thread.read(include_turns=True)\n", + " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", - " print('server:', codex.metadata)\n", + " print('server:', server_label(codex.metadata))\n", " print('status:', result.status)\n", - " print(result.text)" + " print(assistant_text_from_turn(persisted_turn))\n" ] }, { @@ -161,14 +167,16 @@ "source": [ "# Cell 4: multi-turn continuity in same thread\n", "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", "\n", " first = thread.turn(TextInput('Give a short summary of transformers.')).run()\n", " second = thread.turn(TextInput('Now explain that to a high-school student.')).run()\n", + " persisted = thread.read(include_turns=True)\n", + " second_turn = find_turn_by_id(persisted.thread.turns, second.id)\n", "\n", " print('first status:', first.status)\n", " print('second status:', second.status)\n", - " print('second text:', second.text)" + " print('second text:', assistant_text_from_turn(second_turn))\n" ] }, { @@ -179,7 +187,7 @@ "source": [ "# Cell 5: full thread lifecycle and branching (sync)\n", "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " first = thread.turn(TextInput('One sentence about structured planning.')).run()\n", " second = thread.turn(TextInput('Now restate it for a junior engineer.')).run()\n", "\n", @@ -196,19 +204,19 @@ " try:\n", " resumed = codex.thread_resume(\n", " unarchived.id,\n", - " model='gpt-5',\n", + " model='gpt-5.4',\n", " config={'model_reasoning_effort': 'high'},\n", " )\n", " resumed_result = resumed.turn(TextInput('Continue in one short sentence.')).run()\n", - " resumed_info = f'{resumed_result.turn_id} {resumed_result.status}'\n", + " resumed_info = f'{resumed_result.id} {resumed_result.status}'\n", " except Exception as e:\n", " resumed_info = f'skipped({type(e).__name__})'\n", "\n", " forked_info = 'n/a'\n", " try:\n", - " forked = codex.thread_fork(unarchived.id, model='gpt-5')\n", + " forked = codex.thread_fork(unarchived.id, model='gpt-5.4')\n", " forked_result = forked.turn(TextInput('Take a different angle in one short sentence.')).run()\n", - " forked_info = f'{forked_result.turn_id} {forked_result.status}'\n", + " forked_info = f'{forked_result.id} {forked_result.status}'\n", " except Exception as e:\n", " forked_info = f'skipped({type(e).__name__})'\n", "\n", @@ -219,15 +227,14 @@ " compact_info = f'skipped({type(e).__name__})'\n", "\n", " print('Lifecycle OK:', thread.id)\n", - " print('first:', first.turn_id, first.status)\n", - " print('second:', second.turn_id, second.status)\n", + " print('first:', first.id, first.status)\n", + " print('second:', second.id, second.status)\n", " print('read.turns:', len(reading.thread.turns or []))\n", " print('list.active:', len(listing_active.data))\n", " print('list.archived:', len(listing_archived.data))\n", " print('resumed:', resumed_info)\n", " print('forked:', forked_info)\n", - " print('compact:', compact_info)\n", - "\n" + " print('compact:', compact_info)\n" ] }, { @@ -260,22 +267,24 @@ "summary = ReasoningSummary.model_validate('concise')\n", "\n", "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " turn = thread.turn(\n", " TextInput('Propose a safe production feature-flag rollout. Return JSON matching the schema.'),\n", " approval_policy=AskForApproval.never,\n", " cwd=str(Path.cwd()),\n", " effort=ReasoningEffort.medium,\n", - " model='gpt-5',\n", + " model='gpt-5.4',\n", " output_schema=output_schema,\n", " personality=Personality.pragmatic,\n", " sandbox_policy=sandbox_policy,\n", " summary=summary,\n", " )\n", " result = turn.run()\n", + " persisted = thread.read(include_turns=True)\n", + " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", - " print(result.text)\n" + " print(assistant_text_from_turn(persisted_turn))\n" ] }, { @@ -373,14 +382,16 @@ "remote_image_url = 'https://raw.githubusercontent.com/github/explore/main/topics/python/python.png'\n", "\n", "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " result = thread.turn([\n", " TextInput('What do you see in this image? 3 bullets.'),\n", " ImageInput(remote_image_url),\n", " ]).run()\n", + " persisted = thread.read(include_turns=True)\n", + " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", - " print(result.text)\n" + " print(assistant_text_from_turn(persisted_turn))\n" ] }, { @@ -395,14 +406,16 @@ " raise FileNotFoundError(f'Missing bundled image: {local_image_path}')\n", "\n", "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " result = thread.turn([\n", " TextInput('Describe this local image in 2 bullets.'),\n", " LocalImageInput(str(local_image_path.resolve())),\n", " ]).run()\n", + " persisted = thread.read(include_turns=True)\n", + " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", - " print(result.text)\n" + " print(assistant_text_from_turn(persisted_turn))\n" ] }, { @@ -413,7 +426,7 @@ "source": [ "# Cell 8: retry-on-overload pattern\n", "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", "\n", " result = retry_on_overload(\n", " lambda: thread.turn(TextInput('List 5 failure modes in distributed systems.')).run(),\n", @@ -421,9 +434,11 @@ " initial_delay_s=0.25,\n", " max_delay_s=2.0,\n", " )\n", + " persisted = thread.read(include_turns=True)\n", + " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", "\n", " print('status:', result.status)\n", - " print(result.text)" + " print(assistant_text_from_turn(persisted_turn))\n" ] }, { @@ -438,7 +453,7 @@ "\n", "async def async_lifecycle_demo():\n", " async with AsyncCodex() as codex:\n", - " thread = await codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = await codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " first = await (await thread.turn(TextInput('One sentence about structured planning.'))).run()\n", " second = await (await thread.turn(TextInput('Now restate it for a junior engineer.'))).run()\n", "\n", @@ -455,19 +470,19 @@ " try:\n", " resumed = await codex.thread_resume(\n", " unarchived.id,\n", - " model='gpt-5',\n", + " model='gpt-5.4',\n", " config={'model_reasoning_effort': 'high'},\n", " )\n", " resumed_result = await (await resumed.turn(TextInput('Continue in one short sentence.'))).run()\n", - " resumed_info = f'{resumed_result.turn_id} {resumed_result.status}'\n", + " resumed_info = f'{resumed_result.id} {resumed_result.status}'\n", " except Exception as e:\n", " resumed_info = f'skipped({type(e).__name__})'\n", "\n", " forked_info = 'n/a'\n", " try:\n", - " forked = await codex.thread_fork(unarchived.id, model='gpt-5')\n", + " forked = await codex.thread_fork(unarchived.id, model='gpt-5.4')\n", " forked_result = await (await forked.turn(TextInput('Take a different angle in one short sentence.'))).run()\n", - " forked_info = f'{forked_result.turn_id} {forked_result.status}'\n", + " forked_info = f'{forked_result.id} {forked_result.status}'\n", " except Exception as e:\n", " forked_info = f'skipped({type(e).__name__})'\n", "\n", @@ -478,8 +493,8 @@ " compact_info = f'skipped({type(e).__name__})'\n", "\n", " print('Lifecycle OK:', thread.id)\n", - " print('first:', first.turn_id, first.status)\n", - " print('second:', second.turn_id, second.status)\n", + " print('first:', first.id, first.status)\n", + " print('second:', second.id, second.status)\n", " print('read.turns:', len(reading.thread.turns or []))\n", " print('list.active:', len(listing_active.data))\n", " print('list.archived:', len(listing_archived.data))\n", @@ -488,8 +503,7 @@ " print('compact:', compact_info)\n", "\n", "\n", - "await async_lifecycle_demo()\n", - "\n" + "await async_lifecycle_demo()\n" ] }, { @@ -504,7 +518,7 @@ "\n", "async def async_stream_demo():\n", " async with AsyncCodex() as codex:\n", - " thread = await codex.thread_start(model='gpt-5', config={'model_reasoning_effort': 'high'})\n", + " thread = await codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " turn = await thread.turn(TextInput('Count from 1 to 200 with commas, then one summary sentence.'))\n", "\n", " try:\n", @@ -527,8 +541,7 @@ " print('events.count:', event_count)\n", "\n", "\n", - "await async_stream_demo()\n", - "\n" + "await async_stream_demo()\n" ] } ], diff --git a/sdk/python/src/codex_app_server/__init__.py b/sdk/python/src/codex_app_server/__init__.py index 4e2af18fc4e..8d7650fa01f 100644 --- a/sdk/python/src/codex_app_server/__init__.py +++ b/sdk/python/src/codex_app_server/__init__.py @@ -14,29 +14,7 @@ TransportClosedError, is_retryable_error, ) -from .generated.v2_types import ( - ThreadItem, - ThreadTokenUsageUpdatedNotification, - TurnCompletedNotificationPayload, -) -from .public_api import ( - AsyncCodex, - AsyncThread, - AsyncTurn, - Codex, - ImageInput, - InitializeResult, - Input, - InputItem, - LocalImageInput, - MentionInput, - SkillInput, - TextInput, - Thread, - Turn, - TurnResult, -) -from .public_types import ( +from .generated.v2_all import ( AskForApproval, Personality, PlanType, @@ -45,16 +23,35 @@ SandboxMode, SandboxPolicy, ServiceTier, + ThreadItem, ThreadForkParams, ThreadListParams, ThreadResumeParams, ThreadSortKey, ThreadSourceKind, ThreadStartParams, + ThreadTokenUsageUpdatedNotification, + TurnCompletedNotification, TurnStartParams, TurnStatus, TurnSteerParams, ) +from .models import InitializeResponse +from .public_api import ( + AsyncCodex, + AsyncThread, + AsyncTurn, + Codex, + ImageInput, + Input, + InputItem, + LocalImageInput, + MentionInput, + SkillInput, + TextInput, + Thread, + Turn, +) from .retry import retry_on_overload __version__ = "0.2.0" @@ -70,8 +67,7 @@ "AsyncThread", "Turn", "AsyncTurn", - "TurnResult", - "InitializeResult", + "InitializeResponse", "Input", "InputItem", "TextInput", @@ -81,7 +77,7 @@ "MentionInput", "ThreadItem", "ThreadTokenUsageUpdatedNotification", - "TurnCompletedNotificationPayload", + "TurnCompletedNotification", "AskForApproval", "Personality", "PlanType", diff --git a/sdk/python/src/codex_app_server/generated/v2_all.py b/sdk/python/src/codex_app_server/generated/v2_all.py index f746fc771fe..2607124668e 100644 --- a/sdk/python/src/codex_app_server/generated/v2_all.py +++ b/sdk/python/src/codex_app_server/generated/v2_all.py @@ -42,21 +42,6 @@ class AccountLoginCompletedNotification(BaseModel): success: bool -class TextAgentMessageContent(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - text: str - type: Annotated[Literal["Text"], Field(title="TextAgentMessageContentType")] - - -class AgentMessageContent(RootModel[TextAgentMessageContent]): - model_config = ConfigDict( - populate_by_name=True, - ) - root: TextAgentMessageContent - - class AgentMessageDeltaNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -67,46 +52,6 @@ class AgentMessageDeltaNotification(BaseModel): turn_id: Annotated[str, Field(alias="turnId")] -class CompletedAgentStatus(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - completed: str | None = None - - -class ErroredAgentStatus(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - errored: str - - -class AgentStatus( - RootModel[ - Literal["pending_init"] - | Literal["running"] - | CompletedAgentStatus - | ErroredAgentStatus - | Literal["shutdown"] - | Literal["not_found"] - ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: Annotated[ - Literal["pending_init"] - | Literal["running"] - | CompletedAgentStatus - | ErroredAgentStatus - | Literal["shutdown"] - | Literal["not_found"], - Field(description="Agent lifecycle status, derived from emitted events."), - ] - - class AnalyticsConfig(BaseModel): model_config = ConfigDict( extra="allow", @@ -221,7 +166,7 @@ class AskForApprovalValue(Enum): never = "never" -class Reject(BaseModel): +class Granular(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -232,19 +177,19 @@ class Reject(BaseModel): skill_approval: bool | None = False -class RejectAskForApproval(BaseModel): +class GranularAskForApproval(BaseModel): model_config = ConfigDict( extra="forbid", populate_by_name=True, ) - reject: Reject + granular: Granular -class AskForApproval(RootModel[AskForApprovalValue | RejectAskForApproval]): +class AskForApproval(RootModel[AskForApprovalValue | GranularAskForApproval]): model_config = ConfigDict( populate_by_name=True, ) - root: AskForApprovalValue | RejectAskForApproval + root: AskForApprovalValue | GranularAskForApproval class AuthMode(Enum): @@ -261,16 +206,6 @@ class ByteRange(BaseModel): start: Annotated[int, Field(ge=0)] -class CallToolResult(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - field_meta: Annotated[Any | None, Field(alias="_meta")] = None - content: list - is_error: Annotated[bool | None, Field(alias="isError")] = None - structured_content: Annotated[Any | None, Field(alias="structuredContent")] = None - - class CancelLoginAccountParams(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -746,17 +681,6 @@ class CreditsSnapshot(BaseModel): unlimited: bool -class CustomPrompt(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - argument_hint: str | None = None - content: str - description: str | None = None - name: str - path: str - - class DeprecationNoticeNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -770,14 +694,6 @@ class DeprecationNoticeNotification(BaseModel): summary: Annotated[str, Field(description="Concise summary of what is deprecated.")] -class Duration(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - nanos: Annotated[int, Field(ge=0)] - secs: Annotated[int, Field(ge=0)] - - class InputTextDynamicToolCallOutputContentItem(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -830,5059 +746,3361 @@ class DynamicToolSpec(BaseModel): name: str -class FormElicitationRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - field_meta: Annotated[Any | None, Field(alias="_meta")] = None - message: str - mode: Annotated[Literal["form"], Field(title="FormElicitationRequestMode")] - requested_schema: Any - - -class UrlElicitationRequest(BaseModel): +class ExperimentalFeatureListParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - field_meta: Annotated[Any | None, Field(alias="_meta")] = None - elicitation_id: str - message: str - mode: Annotated[Literal["url"], Field(title="UrlElicitationRequestMode")] - url: str + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + limit: Annotated[ + int | None, + Field( + description="Optional page size; defaults to a reasonable server-side value.", + ge=0, + ), + ] = None -class ElicitationRequest(RootModel[FormElicitationRequest | UrlElicitationRequest]): - model_config = ConfigDict( - populate_by_name=True, - ) - root: FormElicitationRequest | UrlElicitationRequest +class ExperimentalFeatureStage(Enum): + beta = "beta" + under_development = "underDevelopment" + stable = "stable" + deprecated = "deprecated" + removed = "removed" -class ErrorEventMsg(BaseModel): +class ExternalAgentConfigDetectParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - codex_error_info: CodexErrorInfo | None = None - message: str - type: Annotated[Literal["error"], Field(title="ErrorEventMsgType")] + cwds: Annotated[ + list[str] | None, + Field( + description="Zero or more working directories to include for repo-scoped detection." + ), + ] = None + include_home: Annotated[ + bool | None, + Field( + alias="includeHome", + description="If true, include detection under the user's home (~/.claude, ~/.codex, etc.).", + ), + ] = None -class WarningEventMsg(BaseModel): +class ExternalAgentConfigImportResponse(BaseModel): + pass model_config = ConfigDict( populate_by_name=True, ) - message: str - type: Annotated[Literal["warning"], Field(title="WarningEventMsgType")] -class RealtimeConversationStartedEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - session_id: str | None = None - type: Annotated[ - Literal["realtime_conversation_started"], - Field(title="RealtimeConversationStartedEventMsgType"), - ] +class ExternalAgentConfigMigrationItemType(Enum): + agents_md = "AGENTS_MD" + config = "CONFIG" + skills = "SKILLS" + mcp_server_config = "MCP_SERVER_CONFIG" -class RealtimeConversationClosedEventMsg(BaseModel): +class FeedbackUploadParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + classification: str + extra_log_files: Annotated[list[str] | None, Field(alias="extraLogFiles")] = None + include_logs: Annotated[bool, Field(alias="includeLogs")] reason: str | None = None - type: Annotated[ - Literal["realtime_conversation_closed"], - Field(title="RealtimeConversationClosedEventMsgType"), - ] - - -class ContextCompactedEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - type: Annotated[ - Literal["context_compacted"], Field(title="ContextCompactedEventMsgType") - ] - - -class ThreadRolledBackEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - num_turns: Annotated[ - int, - Field(description="Number of user turns that were removed from context.", ge=0), - ] - type: Annotated[ - Literal["thread_rolled_back"], Field(title="ThreadRolledBackEventMsgType") - ] + thread_id: Annotated[str | None, Field(alias="threadId")] = None -class TaskCompleteEventMsg(BaseModel): +class FeedbackUploadResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - last_agent_message: str | None = None - turn_id: str - type: Annotated[Literal["task_complete"], Field(title="TaskCompleteEventMsgType")] + thread_id: Annotated[str, Field(alias="threadId")] -class AgentMessageDeltaEventMsg(BaseModel): +class FileChangeOutputDeltaNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) delta: str - type: Annotated[ - Literal["agent_message_delta"], Field(title="AgentMessageDeltaEventMsgType") - ] - - -class AgentReasoningEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - text: str - type: Annotated[ - Literal["agent_reasoning"], Field(title="AgentReasoningEventMsgType") - ] + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class AgentReasoningDeltaEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - delta: str - type: Annotated[ - Literal["agent_reasoning_delta"], Field(title="AgentReasoningDeltaEventMsgType") - ] +class ForcedLoginMethod(Enum): + chatgpt = "chatgpt" + api = "api" -class AgentReasoningRawContentEventMsg(BaseModel): +class InputTextFunctionCallOutputContentItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) text: str type: Annotated[ - Literal["agent_reasoning_raw_content"], - Field(title="AgentReasoningRawContentEventMsgType"), + Literal["input_text"], Field(title="InputTextFunctionCallOutputContentItemType") ] -class AgentReasoningRawContentDeltaEventMsg(BaseModel): +class FuzzyFileSearchParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - delta: str - type: Annotated[ - Literal["agent_reasoning_raw_content_delta"], - Field(title="AgentReasoningRawContentDeltaEventMsgType"), - ] + cancellation_token: Annotated[str | None, Field(alias="cancellationToken")] = None + query: str + roots: list[str] -class AgentReasoningSectionBreakEventMsg(BaseModel): +class Indice(RootModel[int]): model_config = ConfigDict( populate_by_name=True, ) - item_id: str | None = "" - summary_index: int | None = 0 - type: Annotated[ - Literal["agent_reasoning_section_break"], - Field(title="AgentReasoningSectionBreakEventMsgType"), - ] + root: Annotated[int, Field(ge=0)] -class WebSearchBeginEventMsg(BaseModel): +class FuzzyFileSearchResult(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: str - type: Annotated[ - Literal["web_search_begin"], Field(title="WebSearchBeginEventMsgType") - ] + file_name: str + indices: list[Indice] | None = None + path: str + root: str + score: Annotated[int, Field(ge=0)] -class ImageGenerationBeginEventMsg(BaseModel): +class FuzzyFileSearchSessionCompletedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: str - type: Annotated[ - Literal["image_generation_begin"], - Field(title="ImageGenerationBeginEventMsgType"), - ] + session_id: Annotated[str, Field(alias="sessionId")] -class ImageGenerationEndEventMsg(BaseModel): +class FuzzyFileSearchSessionUpdatedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: str - result: str - revised_prompt: str | None = None - saved_path: str | None = None - status: str - type: Annotated[ - Literal["image_generation_end"], Field(title="ImageGenerationEndEventMsgType") - ] + files: list[FuzzyFileSearchResult] + query: str + session_id: Annotated[str, Field(alias="sessionId")] -class TerminalInteractionEventMsg(BaseModel): +class GetAccountParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[ - str, + refresh_token: Annotated[ + bool | None, Field( - description="Identifier for the ExecCommandBegin that produced this chunk." + alias="refreshToken", + description="When `true`, requests a proactive token refresh before returning.\n\nIn managed auth mode this triggers the normal refresh-token flow. In external auth mode this flag is ignored. Clients should refresh tokens themselves and call `account/login/start` with `chatgptAuthTokens`.", ), - ] - process_id: Annotated[ - str, Field(description="Process id associated with the running command.") - ] - stdin: Annotated[str, Field(description="Stdin sent to the running session.")] - type: Annotated[ - Literal["terminal_interaction"], Field(title="TerminalInteractionEventMsgType") - ] + ] = False -class ViewImageToolCallEventMsg(BaseModel): +class GhostCommit(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[ - str, Field(description="Identifier for the originating tool call.") - ] - path: Annotated[ - str, Field(description="Local filesystem path provided to the tool.") - ] - type: Annotated[ - Literal["view_image_tool_call"], Field(title="ViewImageToolCallEventMsgType") - ] + id: str + parent: str | None = None + preexisting_untracked_dirs: list[str] + preexisting_untracked_files: list[str] -class DynamicToolCallRequestEventMsg(BaseModel): +class GitInfo(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - arguments: Any - call_id: Annotated[str, Field(alias="callId")] - tool: str - turn_id: Annotated[str, Field(alias="turnId")] - type: Annotated[ - Literal["dynamic_tool_call_request"], - Field(title="DynamicToolCallRequestEventMsgType"), - ] + branch: str | None = None + origin_url: Annotated[str | None, Field(alias="originUrl")] = None + sha: str | None = None -class DynamicToolCallResponseEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - arguments: Annotated[Any, Field(description="Dynamic tool call arguments.")] - call_id: Annotated[ - str, - Field(description="Identifier for the corresponding DynamicToolCallRequest."), - ] - content_items: Annotated[ - list[DynamicToolCallOutputContentItem], - Field(description="Dynamic tool response content items."), - ] - duration: Annotated[ - Duration, Field(description="The duration of the dynamic tool call.") - ] - error: Annotated[ - str | None, - Field( - description="Optional error text when the tool call failed before producing a response." - ), - ] = None - success: Annotated[bool, Field(description="Whether the tool call succeeded.")] - tool: Annotated[str, Field(description="Dynamic tool name.")] - turn_id: Annotated[ - str, Field(description="Turn ID that this dynamic tool call belongs to.") - ] - type: Annotated[ - Literal["dynamic_tool_call_response"], - Field(title="DynamicToolCallResponseEventMsgType"), - ] +class HazelnutScope(Enum): + example = "example" + workspace_shared = "workspace-shared" + all_shared = "all-shared" + personal = "personal" -class DeprecationNoticeEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - details: Annotated[ - str | None, - Field( - description="Optional extra guidance, such as migration steps or rationale." - ), - ] = None - summary: Annotated[str, Field(description="Concise summary of what is deprecated.")] - type: Annotated[ - Literal["deprecation_notice"], Field(title="DeprecationNoticeEventMsgType") - ] +class HookEventName(Enum): + session_start = "sessionStart" + stop = "stop" -class BackgroundEventEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - message: str - type: Annotated[ - Literal["background_event"], Field(title="BackgroundEventEventMsgType") - ] +class HookExecutionMode(Enum): + sync = "sync" + async_ = "async" + + +class HookHandlerType(Enum): + command = "command" + prompt = "prompt" + agent = "agent" + + +class HookOutputEntryKind(Enum): + warning = "warning" + stop = "stop" + feedback = "feedback" + context = "context" + error = "error" + + +class HookRunStatus(Enum): + running = "running" + completed = "completed" + failed = "failed" + blocked = "blocked" + stopped = "stopped" + + +class HookScope(Enum): + thread = "thread" + turn = "turn" + + +class ImageDetail(Enum): + auto = "auto" + low = "low" + high = "high" + original = "original" -class UndoStartedEventMsg(BaseModel): +class InitializeCapabilities(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - message: str | None = None - type: Annotated[Literal["undo_started"], Field(title="UndoStartedEventMsgType")] + experimental_api: Annotated[ + bool | None, + Field( + alias="experimentalApi", + description="Opt into receiving experimental API methods and fields.", + ), + ] = False + opt_out_notification_methods: Annotated[ + list[str] | None, + Field( + alias="optOutNotificationMethods", + description="Exact notification method names that should be suppressed for this connection (for example `thread/started`).", + ), + ] = None -class UndoCompletedEventMsg(BaseModel): +class InitializeParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - message: str | None = None - success: bool - type: Annotated[Literal["undo_completed"], Field(title="UndoCompletedEventMsgType")] + capabilities: InitializeCapabilities | None = None + client_info: Annotated[ClientInfo, Field(alias="clientInfo")] + + +class InputModality(Enum): + text = "text" + image = "image" -class StreamErrorEventMsg(BaseModel): +class ListMcpServerStatusParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - additional_details: Annotated[ + cursor: Annotated[ str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + limit: Annotated[ + int | None, Field( - description="Optional details about the underlying stream failure (often the same human-readable message that is surfaced as the terminal error if retries are exhausted)." + description="Optional page size; defaults to a server-defined value.", ge=0 ), ] = None - codex_error_info: CodexErrorInfo | None = None - message: str - type: Annotated[Literal["stream_error"], Field(title="StreamErrorEventMsgType")] -class TurnDiffEventMsg(BaseModel): +class ExecLocalShellAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["turn_diff"], Field(title="TurnDiffEventMsgType")] - unified_diff: str + command: list[str] + env: dict[str, Any] | None = None + timeout_ms: Annotated[int | None, Field(ge=0)] = None + type: Annotated[Literal["exec"], Field(title="ExecLocalShellActionType")] + user: str | None = None + working_directory: str | None = None -class ListCustomPromptsResponseEventMsg(BaseModel): +class LocalShellAction(RootModel[ExecLocalShellAction]): model_config = ConfigDict( populate_by_name=True, ) - custom_prompts: list[CustomPrompt] - type: Annotated[ - Literal["list_custom_prompts_response"], - Field(title="ListCustomPromptsResponseEventMsgType"), - ] + root: ExecLocalShellAction + + +class LocalShellStatus(Enum): + completed = "completed" + in_progress = "in_progress" + incomplete = "incomplete" -class RemoteSkillDownloadedEventMsg(BaseModel): +class ApiKeyLoginAccountParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - name: str - path: str - type: Annotated[ - Literal["remote_skill_downloaded"], - Field(title="RemoteSkillDownloadedEventMsgType"), - ] + api_key: Annotated[str, Field(alias="apiKey")] + type: Annotated[Literal["apiKey"], Field(title="ApiKeyv2::LoginAccountParamsType")] -class SkillsUpdateAvailableEventMsg(BaseModel): +class ChatgptLoginAccountParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) type: Annotated[ - Literal["skills_update_available"], - Field(title="SkillsUpdateAvailableEventMsgType"), + Literal["chatgpt"], Field(title="Chatgptv2::LoginAccountParamsType") ] -class ShutdownCompleteEventMsg(BaseModel): +class ChatgptAuthTokensLoginAccountParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + access_token: Annotated[ + str, + Field( + alias="accessToken", + description="Access token (JWT) supplied by the client. This token is used for backend API requests and email extraction.", + ), + ] + chatgpt_account_id: Annotated[ + str, + Field( + alias="chatgptAccountId", + description="Workspace/account identifier supplied by the client.", + ), + ] + chatgpt_plan_type: Annotated[ + str | None, + Field( + alias="chatgptPlanType", + description="Optional plan type supplied by the client.\n\nWhen `null`, Codex attempts to derive the plan type from access-token claims. If unavailable, the plan defaults to `unknown`.", + ), + ] = None type: Annotated[ - Literal["shutdown_complete"], Field(title="ShutdownCompleteEventMsgType") + Literal["chatgptAuthTokens"], + Field(title="ChatgptAuthTokensv2::LoginAccountParamsType"), ] -class AgentMessageContentDeltaEventMsg(BaseModel): +class LoginAccountParams( + RootModel[ + ApiKeyLoginAccountParams + | ChatgptLoginAccountParams + | ChatgptAuthTokensLoginAccountParams + ] +): model_config = ConfigDict( populate_by_name=True, ) - delta: str - item_id: str - thread_id: str - turn_id: str - type: Annotated[ - Literal["agent_message_content_delta"], - Field(title="AgentMessageContentDeltaEventMsgType"), + root: Annotated[ + ApiKeyLoginAccountParams + | ChatgptLoginAccountParams + | ChatgptAuthTokensLoginAccountParams, + Field(title="LoginAccountParams"), ] -class PlanDeltaEventMsg(BaseModel): +class ApiKeyLoginAccountResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - delta: str - item_id: str - thread_id: str - turn_id: str - type: Annotated[Literal["plan_delta"], Field(title="PlanDeltaEventMsgType")] + type: Annotated[ + Literal["apiKey"], Field(title="ApiKeyv2::LoginAccountResponseType") + ] -class ReasoningContentDeltaEventMsg(BaseModel): +class ChatgptLoginAccountResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - delta: str - item_id: str - summary_index: int | None = 0 - thread_id: str - turn_id: str + auth_url: Annotated[ + str, + Field( + alias="authUrl", + description="URL the client should open in a browser to initiate the OAuth flow.", + ), + ] + login_id: Annotated[str, Field(alias="loginId")] type: Annotated[ - Literal["reasoning_content_delta"], - Field(title="ReasoningContentDeltaEventMsgType"), + Literal["chatgpt"], Field(title="Chatgptv2::LoginAccountResponseType") ] -class ReasoningRawContentDeltaEventMsg(BaseModel): +class ChatgptAuthTokensLoginAccountResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content_index: int | None = 0 - delta: str - item_id: str - thread_id: str - turn_id: str type: Annotated[ - Literal["reasoning_raw_content_delta"], - Field(title="ReasoningRawContentDeltaEventMsgType"), + Literal["chatgptAuthTokens"], + Field(title="ChatgptAuthTokensv2::LoginAccountResponseType"), ] -class ExecApprovalRequestSkillMetadata(BaseModel): +class LoginAccountResponse( + RootModel[ + ApiKeyLoginAccountResponse + | ChatgptLoginAccountResponse + | ChatgptAuthTokensLoginAccountResponse + ] +): model_config = ConfigDict( populate_by_name=True, ) - path_to_skills_md: str - - -class ExecCommandSource(Enum): - agent = "agent" - user_shell = "user_shell" - unified_exec_startup = "unified_exec_startup" - unified_exec_interaction = "unified_exec_interaction" + root: Annotated[ + ApiKeyLoginAccountResponse + | ChatgptLoginAccountResponse + | ChatgptAuthTokensLoginAccountResponse, + Field(title="LoginAccountResponse"), + ] -class ExecCommandStatus(Enum): - completed = "completed" - failed = "failed" - declined = "declined" +class LogoutAccountResponse(BaseModel): + pass + model_config = ConfigDict( + populate_by_name=True, + ) -class ExecOutputStream(Enum): - stdout = "stdout" - stderr = "stderr" +class McpAuthStatus(Enum): + unsupported = "unsupported" + not_logged_in = "notLoggedIn" + bearer_token = "bearerToken" + o_auth = "oAuth" -class ExperimentalFeatureListParams(BaseModel): +class McpServerOauthLoginCompletedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cursor: Annotated[ - str | None, - Field(description="Opaque pagination cursor returned by a previous call."), - ] = None - limit: Annotated[ - int | None, - Field( - description="Optional page size; defaults to a reasonable server-side value.", - ge=0, - ), - ] = None + error: str | None = None + name: str + success: bool -class ExperimentalFeatureStage(Enum): - beta = "beta" - under_development = "underDevelopment" - stable = "stable" - deprecated = "deprecated" - removed = "removed" +class McpServerOauthLoginParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + name: str + scopes: list[str] | None = None + timeout_secs: Annotated[int | None, Field(alias="timeoutSecs")] = None -class ExternalAgentConfigDetectParams(BaseModel): +class McpServerOauthLoginResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cwds: Annotated[ - list[str] | None, - Field( - description="Zero or more working directories to include for repo-scoped detection." - ), - ] = None - include_home: Annotated[ - bool | None, - Field( - alias="includeHome", - description="If true, include detection under the user's home (~/.claude, ~/.codex, etc.).", - ), - ] = None + authorization_url: Annotated[str, Field(alias="authorizationUrl")] -class ExternalAgentConfigImportResponse(BaseModel): +class McpServerRefreshResponse(BaseModel): pass model_config = ConfigDict( populate_by_name=True, ) -class ExternalAgentConfigMigrationItemType(Enum): - agents_md = "AGENTS_MD" - config = "CONFIG" - skills = "SKILLS" - mcp_server_config = "MCP_SERVER_CONFIG" - - -class FeedbackUploadParams(BaseModel): +class McpToolCallError(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - classification: str - extra_log_files: Annotated[list[str] | None, Field(alias="extraLogFiles")] = None - include_logs: Annotated[bool, Field(alias="includeLogs")] - reason: str | None = None - thread_id: Annotated[str | None, Field(alias="threadId")] = None + message: str -class FeedbackUploadResponse(BaseModel): +class McpToolCallProgressNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + item_id: Annotated[str, Field(alias="itemId")] + message: str thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class AddFileChange(BaseModel): +class McpToolCallResult(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content: str - type: Annotated[Literal["add"], Field(title="AddFileChangeType")] + content: list + structured_content: Annotated[Any | None, Field(alias="structuredContent")] = None + + +class McpToolCallStatus(Enum): + in_progress = "inProgress" + completed = "completed" + failed = "failed" + + +class MergeStrategy(Enum): + replace = "replace" + upsert = "upsert" + +class MessagePhase(Enum): + commentary = "commentary" + final_answer = "final_answer" + + +class ModeKind(Enum): + plan = "plan" + default = "default" -class DeleteFileChange(BaseModel): + +class ModelAvailabilityNux(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content: str - type: Annotated[Literal["delete"], Field(title="DeleteFileChangeType")] + message: str -class UpdateFileChange(BaseModel): +class ModelListParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - move_path: str | None = None - type: Annotated[Literal["update"], Field(title="UpdateFileChangeType")] - unified_diff: str + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + include_hidden: Annotated[ + bool | None, + Field( + alias="includeHidden", + description="When true, include models that are hidden from the default picker list.", + ), + ] = None + limit: Annotated[ + int | None, + Field( + description="Optional page size; defaults to a reasonable server-side value.", + ge=0, + ), + ] = None -class FileChange(RootModel[AddFileChange | DeleteFileChange | UpdateFileChange]): +class ModelRerouteReason(RootModel[Literal["highRiskCyberActivity"]]): model_config = ConfigDict( populate_by_name=True, ) - root: AddFileChange | DeleteFileChange | UpdateFileChange + root: Literal["highRiskCyberActivity"] -class FileChangeOutputDeltaNotification(BaseModel): +class ModelReroutedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - delta: str - item_id: Annotated[str, Field(alias="itemId")] + from_model: Annotated[str, Field(alias="fromModel")] + reason: ModelRerouteReason thread_id: Annotated[str, Field(alias="threadId")] + to_model: Annotated[str, Field(alias="toModel")] turn_id: Annotated[str, Field(alias="turnId")] -class FileSystemPermissions(BaseModel): +class ModelUpgradeInfo(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - read: list[AbsolutePathBuf] | None = None - write: list[AbsolutePathBuf] | None = None + migration_markdown: Annotated[str | None, Field(alias="migrationMarkdown")] = None + model: str + model_link: Annotated[str | None, Field(alias="modelLink")] = None + upgrade_copy: Annotated[str | None, Field(alias="upgradeCopy")] = None -class ForcedLoginMethod(Enum): - chatgpt = "chatgpt" - api = "api" +class NetworkAccess(Enum): + restricted = "restricted" + enabled = "enabled" -class InputTextFunctionCallOutputContentItem(BaseModel): +class NetworkRequirements(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - text: str - type: Annotated[ - Literal["input_text"], Field(title="InputTextFunctionCallOutputContentItemType") - ] + allow_local_binding: Annotated[bool | None, Field(alias="allowLocalBinding")] = None + allow_unix_sockets: Annotated[list[str] | None, Field(alias="allowUnixSockets")] = ( + None + ) + allow_upstream_proxy: Annotated[bool | None, Field(alias="allowUpstreamProxy")] = ( + None + ) + allowed_domains: Annotated[list[str] | None, Field(alias="allowedDomains")] = None + dangerously_allow_all_unix_sockets: Annotated[ + bool | None, Field(alias="dangerouslyAllowAllUnixSockets") + ] = None + dangerously_allow_non_loopback_proxy: Annotated[ + bool | None, Field(alias="dangerouslyAllowNonLoopbackProxy") + ] = None + denied_domains: Annotated[list[str] | None, Field(alias="deniedDomains")] = None + enabled: bool | None = None + http_port: Annotated[int | None, Field(alias="httpPort", ge=0)] = None + socks_port: Annotated[int | None, Field(alias="socksPort", ge=0)] = None -class FuzzyFileSearchParams(BaseModel): +class PatchApplyStatus(Enum): + in_progress = "inProgress" + completed = "completed" + failed = "failed" + declined = "declined" + + +class AddPatchChangeKind(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cancellation_token: Annotated[str | None, Field(alias="cancellationToken")] = None - query: str - roots: list[str] + type: Annotated[Literal["add"], Field(title="AddPatchChangeKindType")] -class Indice(RootModel[int]): +class DeletePatchChangeKind(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: Annotated[int, Field(ge=0)] + type: Annotated[Literal["delete"], Field(title="DeletePatchChangeKindType")] -class FuzzyFileSearchResult(BaseModel): +class UpdatePatchChangeKind(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - file_name: str - indices: list[Indice] | None = None - path: str - root: str - score: Annotated[int, Field(ge=0)] + move_path: str | None = None + type: Annotated[Literal["update"], Field(title="UpdatePatchChangeKindType")] -class FuzzyFileSearchSessionCompletedNotification(BaseModel): +class PatchChangeKind( + RootModel[AddPatchChangeKind | DeletePatchChangeKind | UpdatePatchChangeKind] +): model_config = ConfigDict( populate_by_name=True, ) - session_id: Annotated[str, Field(alias="sessionId")] + root: AddPatchChangeKind | DeletePatchChangeKind | UpdatePatchChangeKind -class FuzzyFileSearchSessionUpdatedNotification(BaseModel): +class Personality(Enum): + none = "none" + friendly = "friendly" + pragmatic = "pragmatic" + + +class PlanDeltaNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - files: list[FuzzyFileSearchResult] - query: str - session_id: Annotated[str, Field(alias="sessionId")] + delta: str + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class GetAccountParams(BaseModel): +class PlanType(Enum): + free = "free" + go = "go" + plus = "plus" + pro = "pro" + team = "team" + business = "business" + enterprise = "enterprise" + edu = "edu" + unknown = "unknown" + + +class PluginAuthPolicy(Enum): + on_install = "ON_INSTALL" + on_use = "ON_USE" + + +class PluginInstallParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - refresh_token: Annotated[ - bool | None, - Field( - alias="refreshToken", - description="When `true`, requests a proactive token refresh before returning.\n\nIn managed auth mode this triggers the normal refresh-token flow. In external auth mode this flag is ignored. Clients should refresh tokens themselves and call `account/login/start` with `chatgptAuthTokens`.", - ), - ] = False + marketplace_path: Annotated[AbsolutePathBuf, Field(alias="marketplacePath")] + plugin_name: Annotated[str, Field(alias="pluginName")] -class GhostCommit(BaseModel): +class PluginInstallPolicy(Enum): + not_available = "NOT_AVAILABLE" + available = "AVAILABLE" + installed_by_default = "INSTALLED_BY_DEFAULT" + + +class PluginInstallResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - parent: str | None = None - preexisting_untracked_dirs: list[str] - preexisting_untracked_files: list[str] + apps_needing_auth: Annotated[list[AppSummary], Field(alias="appsNeedingAuth")] + auth_policy: Annotated[PluginAuthPolicy, Field(alias="authPolicy")] -class GitInfo(BaseModel): +class PluginInterface(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - branch: str | None = None - origin_url: Annotated[str | None, Field(alias="originUrl")] = None - sha: str | None = None - - -class HazelnutScope(Enum): - example = "example" - workspace_shared = "workspace-shared" - all_shared = "all-shared" - personal = "personal" + brand_color: Annotated[str | None, Field(alias="brandColor")] = None + capabilities: list[str] + category: str | None = None + composer_icon: Annotated[AbsolutePathBuf | None, Field(alias="composerIcon")] = None + default_prompt: Annotated[str | None, Field(alias="defaultPrompt")] = None + developer_name: Annotated[str | None, Field(alias="developerName")] = None + display_name: Annotated[str | None, Field(alias="displayName")] = None + logo: AbsolutePathBuf | None = None + long_description: Annotated[str | None, Field(alias="longDescription")] = None + privacy_policy_url: Annotated[str | None, Field(alias="privacyPolicyUrl")] = None + screenshots: list[AbsolutePathBuf] + short_description: Annotated[str | None, Field(alias="shortDescription")] = None + terms_of_service_url: Annotated[str | None, Field(alias="termsOfServiceUrl")] = None + website_url: Annotated[str | None, Field(alias="websiteUrl")] = None -class HistoryEntry(BaseModel): +class PluginListParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - conversation_id: str - text: str - ts: Annotated[int, Field(ge=0)] + cwds: Annotated[ + list[AbsolutePathBuf] | None, + Field( + description="Optional working directories used to discover repo marketplaces. When omitted, only home-scoped marketplaces and the official curated marketplace are considered." + ), + ] = None + force_remote_sync: Annotated[ + bool | None, + Field( + alias="forceRemoteSync", + description="When true, reconcile the official curated marketplace against the remote plugin state before listing marketplaces.", + ), + ] = None -class HookEventName(Enum): - session_start = "sessionStart" - stop = "stop" +class PluginReadParams(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + marketplace_path: Annotated[AbsolutePathBuf, Field(alias="marketplacePath")] + plugin_name: Annotated[str, Field(alias="pluginName")] -class HookExecutionMode(Enum): - sync = "sync" - async_ = "async" +class LocalPluginSource(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + path: AbsolutePathBuf + type: Annotated[Literal["local"], Field(title="LocalPluginSourceType")] -class HookHandlerType(Enum): - command = "command" - prompt = "prompt" - agent = "agent" - - -class HookOutputEntryKind(Enum): - warning = "warning" - stop = "stop" - feedback = "feedback" - context = "context" - error = "error" - - -class HookRunStatus(Enum): - running = "running" - completed = "completed" - failed = "failed" - blocked = "blocked" - stopped = "stopped" - - -class HookScope(Enum): - thread = "thread" - turn = "turn" +class PluginSource(RootModel[LocalPluginSource]): + model_config = ConfigDict( + populate_by_name=True, + ) + root: LocalPluginSource -class ImageDetail(Enum): - auto = "auto" - low = "low" - high = "high" - original = "original" +class PluginSummary(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + auth_policy: Annotated[PluginAuthPolicy, Field(alias="authPolicy")] + enabled: bool + id: str + install_policy: Annotated[PluginInstallPolicy, Field(alias="installPolicy")] + installed: bool + interface: PluginInterface | None = None + name: str + source: PluginSource -class InitializeCapabilities(BaseModel): +class PluginUninstallParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - experimental_api: Annotated[ - bool | None, - Field( - alias="experimentalApi", - description="Opt into receiving experimental API methods and fields.", - ), - ] = False - opt_out_notification_methods: Annotated[ - list[str] | None, - Field( - alias="optOutNotificationMethods", - description="Exact notification method names that should be suppressed for this connection (for example `thread/started`).", - ), - ] = None + plugin_id: Annotated[str, Field(alias="pluginId")] -class InitializeParams(BaseModel): +class PluginUninstallResponse(BaseModel): + pass model_config = ConfigDict( populate_by_name=True, ) - capabilities: InitializeCapabilities | None = None - client_info: Annotated[ClientInfo, Field(alias="clientInfo")] -class InputModality(Enum): - text = "text" - image = "image" +class ProductSurface(Enum): + chatgpt = "chatgpt" + codex = "codex" + api = "api" + atlas = "atlas" -class ListMcpServerStatusParams(BaseModel): +class RateLimitWindow(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cursor: Annotated[ - str | None, - Field(description="Opaque pagination cursor returned by a previous call."), - ] = None - limit: Annotated[ - int | None, - Field( - description="Optional page size; defaults to a server-defined value.", ge=0 - ), - ] = None + resets_at: Annotated[int | None, Field(alias="resetsAt")] = None + used_percent: Annotated[int, Field(alias="usedPercent")] + window_duration_mins: Annotated[int | None, Field(alias="windowDurationMins")] = ( + None + ) -class ExecLocalShellAction(BaseModel): +class RestrictedReadOnlyAccess(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - command: list[str] - env: dict[str, Any] | None = None - timeout_ms: Annotated[int | None, Field(ge=0)] = None - type: Annotated[Literal["exec"], Field(title="ExecLocalShellActionType")] - user: str | None = None - working_directory: str | None = None + include_platform_defaults: Annotated[ + bool | None, Field(alias="includePlatformDefaults") + ] = True + readable_roots: Annotated[ + list[AbsolutePathBuf] | None, Field(alias="readableRoots") + ] = [] + type: Annotated[Literal["restricted"], Field(title="RestrictedReadOnlyAccessType")] -class LocalShellAction(RootModel[ExecLocalShellAction]): +class FullAccessReadOnlyAccess(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: ExecLocalShellAction - - -class LocalShellStatus(Enum): - completed = "completed" - in_progress = "in_progress" - incomplete = "incomplete" + type: Annotated[Literal["fullAccess"], Field(title="FullAccessReadOnlyAccessType")] -class ApiKeyLoginAccountParams(BaseModel): +class ReadOnlyAccess(RootModel[RestrictedReadOnlyAccess | FullAccessReadOnlyAccess]): model_config = ConfigDict( populate_by_name=True, ) - api_key: Annotated[str, Field(alias="apiKey")] - type: Annotated[Literal["apiKey"], Field(title="ApiKeyv2::LoginAccountParamsType")] + root: RestrictedReadOnlyAccess | FullAccessReadOnlyAccess -class ChatgptLoginAccountParams(BaseModel): +class ReasoningEffort(Enum): + none = "none" + minimal = "minimal" + low = "low" + medium = "medium" + high = "high" + xhigh = "xhigh" + + +class ReasoningEffortOption(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[ - Literal["chatgpt"], Field(title="Chatgptv2::LoginAccountParamsType") - ] + description: str + reasoning_effort: Annotated[ReasoningEffort, Field(alias="reasoningEffort")] -class ChatgptAuthTokensLoginAccountParams(BaseModel): +class ReasoningTextReasoningItemContent(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - access_token: Annotated[ - str, - Field( - alias="accessToken", - description="Access token (JWT) supplied by the client. This token is used for backend API requests and email extraction.", - ), - ] - chatgpt_account_id: Annotated[ - str, - Field( - alias="chatgptAccountId", - description="Workspace/account identifier supplied by the client.", - ), - ] - chatgpt_plan_type: Annotated[ - str | None, - Field( - alias="chatgptPlanType", - description="Optional plan type supplied by the client.\n\nWhen `null`, Codex attempts to derive the plan type from access-token claims. If unavailable, the plan defaults to `unknown`.", - ), - ] = None + text: str type: Annotated[ - Literal["chatgptAuthTokens"], - Field(title="ChatgptAuthTokensv2::LoginAccountParamsType"), + Literal["reasoning_text"], Field(title="ReasoningTextReasoningItemContentType") ] -class LoginAccountParams( - RootModel[ - ApiKeyLoginAccountParams - | ChatgptLoginAccountParams - | ChatgptAuthTokensLoginAccountParams - ] -): +class TextReasoningItemContent(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: Annotated[ - ApiKeyLoginAccountParams - | ChatgptLoginAccountParams - | ChatgptAuthTokensLoginAccountParams, - Field(title="LoginAccountParams"), - ] + text: str + type: Annotated[Literal["text"], Field(title="TextReasoningItemContentType")] -class ApiKeyLoginAccountResponse(BaseModel): +class ReasoningItemContent( + RootModel[ReasoningTextReasoningItemContent | TextReasoningItemContent] +): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[ - Literal["apiKey"], Field(title="ApiKeyv2::LoginAccountResponseType") - ] + root: ReasoningTextReasoningItemContent | TextReasoningItemContent -class ChatgptLoginAccountResponse(BaseModel): +class SummaryTextReasoningItemReasoningSummary(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - auth_url: Annotated[ - str, - Field( - alias="authUrl", - description="URL the client should open in a browser to initiate the OAuth flow.", - ), - ] - login_id: Annotated[str, Field(alias="loginId")] + text: str type: Annotated[ - Literal["chatgpt"], Field(title="Chatgptv2::LoginAccountResponseType") + Literal["summary_text"], + Field(title="SummaryTextReasoningItemReasoningSummaryType"), ] -class ChatgptAuthTokensLoginAccountResponse(BaseModel): +class ReasoningItemReasoningSummary( + RootModel[SummaryTextReasoningItemReasoningSummary] +): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[ - Literal["chatgptAuthTokens"], - Field(title="ChatgptAuthTokensv2::LoginAccountResponseType"), - ] + root: SummaryTextReasoningItemReasoningSummary -class LoginAccountResponse( - RootModel[ - ApiKeyLoginAccountResponse - | ChatgptLoginAccountResponse - | ChatgptAuthTokensLoginAccountResponse - ] -): +class ReasoningSummaryValue(Enum): + auto = "auto" + concise = "concise" + detailed = "detailed" + + +class ReasoningSummary(RootModel[ReasoningSummaryValue | Literal["none"]]): model_config = ConfigDict( populate_by_name=True, ) root: Annotated[ - ApiKeyLoginAccountResponse - | ChatgptLoginAccountResponse - | ChatgptAuthTokensLoginAccountResponse, - Field(title="LoginAccountResponse"), + ReasoningSummaryValue | Literal["none"], + Field( + description="A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries" + ), ] -class LogoutAccountResponse(BaseModel): - pass +class ReasoningSummaryPartAddedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + item_id: Annotated[str, Field(alias="itemId")] + summary_index: Annotated[int, Field(alias="summaryIndex")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class MacOsAutomationPermissionValue(Enum): - none = "none" - all = "all" - - -class BundleIdsMacOsAutomationPermission(BaseModel): +class ReasoningSummaryTextDeltaNotification(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - bundle_ids: list[str] + delta: str + item_id: Annotated[str, Field(alias="itemId")] + summary_index: Annotated[int, Field(alias="summaryIndex")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class MacOsAutomationPermission( - RootModel[MacOsAutomationPermissionValue | BundleIdsMacOsAutomationPermission] -): +class ReasoningTextDeltaNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: MacOsAutomationPermissionValue | BundleIdsMacOsAutomationPermission - - -class MacOsContactsPermission(Enum): - none = "none" - read_only = "read_only" - read_write = "read_write" - - -class MacOsPreferencesPermission(Enum): - none = "none" - read_only = "read_only" - read_write = "read_write" + content_index: Annotated[int, Field(alias="contentIndex")] + delta: str + item_id: Annotated[str, Field(alias="itemId")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class MacOsSeatbeltProfileExtensions(BaseModel): +class RemoteSkillSummary(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - macos_accessibility: bool | None = False - macos_automation: Annotated[MacOsAutomationPermission | None, Field()] = "none" - macos_calendar: bool | None = False - macos_contacts: MacOsContactsPermission | None = "none" - macos_launch_services: bool | None = False - macos_preferences: MacOsPreferencesPermission | None = "read_only" - macos_reminders: bool | None = False - - -class McpAuthStatus(Enum): - unsupported = "unsupported" - not_logged_in = "notLoggedIn" - bearer_token = "bearerToken" - o_auth = "oAuth" + description: str + id: str + name: str -class McpInvocation(BaseModel): +class RequestId(RootModel[str | int]): model_config = ConfigDict( populate_by_name=True, ) - arguments: Annotated[ - Any | None, Field(description="Arguments to the tool call.") - ] = None - server: Annotated[ - str, Field(description="Name of the MCP server as defined in the config.") - ] - tool: Annotated[ - str, Field(description="Name of the tool as given by the MCP server.") - ] + root: str | int -class McpServerOauthLoginCompletedNotification(BaseModel): +class ResidencyRequirement(RootModel[Literal["us"]]): model_config = ConfigDict( populate_by_name=True, ) - error: str | None = None - name: str - success: bool + root: Literal["us"] -class McpServerOauthLoginParams(BaseModel): +class Resource(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + field_meta: Annotated[Any | None, Field(alias="_meta")] = None + annotations: Any | None = None + description: str | None = None + icons: list | None = None + mime_type: Annotated[str | None, Field(alias="mimeType")] = None name: str - scopes: list[str] | None = None - timeout_secs: Annotated[int | None, Field(alias="timeoutSecs")] = None + size: int | None = None + title: str | None = None + uri: str -class McpServerOauthLoginResponse(BaseModel): +class ResourceTemplate(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - authorization_url: Annotated[str, Field(alias="authorizationUrl")] + annotations: Any | None = None + description: str | None = None + mime_type: Annotated[str | None, Field(alias="mimeType")] = None + name: str + title: str | None = None + uri_template: Annotated[str, Field(alias="uriTemplate")] -class McpServerRefreshResponse(BaseModel): - pass +class MessageResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + content: list[ContentItem] + end_turn: bool | None = None + id: str | None = None + phase: MessagePhase | None = None + role: str + type: Annotated[Literal["message"], Field(title="MessageResponseItemType")] -class McpStartupFailure(BaseModel): +class ReasoningResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - error: str - server: str + content: list[ReasoningItemContent] | None = None + encrypted_content: str | None = None + id: str + summary: list[ReasoningItemReasoningSummary] + type: Annotated[Literal["reasoning"], Field(title="ReasoningResponseItemType")] -class StartingMcpStartupStatus(BaseModel): +class LocalShellCallResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - state: Annotated[Literal["starting"], Field(title="StartingMcpStartupStatusState")] + action: LocalShellAction + call_id: Annotated[ + str | None, Field(description="Set when using the Responses API.") + ] = None + id: Annotated[ + str | None, + Field( + description="Legacy id field retained for compatibility with older payloads." + ), + ] = None + status: LocalShellStatus + type: Annotated[ + Literal["local_shell_call"], Field(title="LocalShellCallResponseItemType") + ] -class ReadyMcpStartupStatus(BaseModel): +class FunctionCallResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - state: Annotated[Literal["ready"], Field(title="ReadyMcpStartupStatusState")] + arguments: str + call_id: str + id: str | None = None + name: str + namespace: str | None = None + type: Annotated[ + Literal["function_call"], Field(title="FunctionCallResponseItemType") + ] -class FailedMcpStartupStatus(BaseModel): +class ToolSearchCallResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - error: str - state: Annotated[Literal["failed"], Field(title="FailedMcpStartupStatusState")] + arguments: Any + call_id: str | None = None + execution: str + id: str | None = None + status: str | None = None + type: Annotated[ + Literal["tool_search_call"], Field(title="ToolSearchCallResponseItemType") + ] -class CancelledMcpStartupStatus(BaseModel): +class CustomToolCallResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - state: Annotated[ - Literal["cancelled"], Field(title="CancelledMcpStartupStatusState") + call_id: str + id: str | None = None + input: str + name: str + status: str | None = None + type: Annotated[ + Literal["custom_tool_call"], Field(title="CustomToolCallResponseItemType") ] -class McpStartupStatus( - RootModel[ - StartingMcpStartupStatus - | ReadyMcpStartupStatus - | FailedMcpStartupStatus - | CancelledMcpStartupStatus - ] -): +class ToolSearchOutputResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: ( - StartingMcpStartupStatus - | ReadyMcpStartupStatus - | FailedMcpStartupStatus - | CancelledMcpStartupStatus - ) + call_id: str | None = None + execution: str + status: str + tools: list + type: Annotated[ + Literal["tool_search_output"], Field(title="ToolSearchOutputResponseItemType") + ] -class McpToolCallError(BaseModel): +class ImageGenerationCallResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - message: str + id: str + result: str + revised_prompt: str | None = None + status: str + type: Annotated[ + Literal["image_generation_call"], + Field(title="ImageGenerationCallResponseItemType"), + ] -class McpToolCallProgressNotification(BaseModel): +class GhostSnapshotResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - item_id: Annotated[str, Field(alias="itemId")] - message: str - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] + ghost_commit: GhostCommit + type: Annotated[ + Literal["ghost_snapshot"], Field(title="GhostSnapshotResponseItemType") + ] -class McpToolCallResult(BaseModel): +class CompactionResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content: list - structured_content: Annotated[Any | None, Field(alias="structuredContent")] = None - - -class McpToolCallStatus(Enum): - in_progress = "inProgress" - completed = "completed" - failed = "failed" - - -class MergeStrategy(Enum): - replace = "replace" - upsert = "upsert" - - -class MessagePhase(Enum): - commentary = "commentary" - final_answer = "final_answer" - - -class ModeKind(Enum): - plan = "plan" - default = "default" + encrypted_content: str + type: Annotated[Literal["compaction"], Field(title="CompactionResponseItemType")] -class ModelAvailabilityNux(BaseModel): +class OtherResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - message: str + type: Annotated[Literal["other"], Field(title="OtherResponseItemType")] -class ModelListParams(BaseModel): +class SearchResponsesApiWebSearchAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cursor: Annotated[ - str | None, - Field(description="Opaque pagination cursor returned by a previous call."), - ] = None - include_hidden: Annotated[ - bool | None, - Field( - alias="includeHidden", - description="When true, include models that are hidden from the default picker list.", - ), - ] = None - limit: Annotated[ - int | None, - Field( - description="Optional page size; defaults to a reasonable server-side value.", - ge=0, - ), - ] = None + queries: list[str] | None = None + query: str | None = None + type: Annotated[ + Literal["search"], Field(title="SearchResponsesApiWebSearchActionType") + ] -class ModelRerouteReason(RootModel[Literal["highRiskCyberActivity"]]): +class OpenPageResponsesApiWebSearchAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: Literal["highRiskCyberActivity"] + type: Annotated[ + Literal["open_page"], Field(title="OpenPageResponsesApiWebSearchActionType") + ] + url: str | None = None -class ModelReroutedNotification(BaseModel): +class FindInPageResponsesApiWebSearchAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - from_model: Annotated[str, Field(alias="fromModel")] - reason: ModelRerouteReason - thread_id: Annotated[str, Field(alias="threadId")] - to_model: Annotated[str, Field(alias="toModel")] - turn_id: Annotated[str, Field(alias="turnId")] + pattern: str | None = None + type: Annotated[ + Literal["find_in_page"], + Field(title="FindInPageResponsesApiWebSearchActionType"), + ] + url: str | None = None -class ModelUpgradeInfo(BaseModel): +class OtherResponsesApiWebSearchAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - migration_markdown: Annotated[str | None, Field(alias="migrationMarkdown")] = None - model: str - model_link: Annotated[str | None, Field(alias="modelLink")] = None - upgrade_copy: Annotated[str | None, Field(alias="upgradeCopy")] = None - - -class NetworkAccess(Enum): - restricted = "restricted" - enabled = "enabled" - - -class NetworkApprovalProtocol(Enum): - http = "http" - https = "https" - socks5_tcp = "socks5Tcp" - socks5_udp = "socks5Udp" + type: Annotated[ + Literal["other"], Field(title="OtherResponsesApiWebSearchActionType") + ] -class NetworkPermissions(BaseModel): +class ResponsesApiWebSearchAction( + RootModel[ + SearchResponsesApiWebSearchAction + | OpenPageResponsesApiWebSearchAction + | FindInPageResponsesApiWebSearchAction + | OtherResponsesApiWebSearchAction + ] +): model_config = ConfigDict( populate_by_name=True, ) - enabled: bool | None = None + root: ( + SearchResponsesApiWebSearchAction + | OpenPageResponsesApiWebSearchAction + | FindInPageResponsesApiWebSearchAction + | OtherResponsesApiWebSearchAction + ) -class NetworkPolicyRuleAction(Enum): - allow = "allow" - deny = "deny" +class ReviewDelivery(Enum): + inline = "inline" + detached = "detached" -class NetworkRequirements(BaseModel): +class UncommittedChangesReviewTarget(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - allow_local_binding: Annotated[bool | None, Field(alias="allowLocalBinding")] = None - allow_unix_sockets: Annotated[list[str] | None, Field(alias="allowUnixSockets")] = ( - None - ) - allow_upstream_proxy: Annotated[bool | None, Field(alias="allowUpstreamProxy")] = ( - None - ) - allowed_domains: Annotated[list[str] | None, Field(alias="allowedDomains")] = None - dangerously_allow_all_unix_sockets: Annotated[ - bool | None, Field(alias="dangerouslyAllowAllUnixSockets") - ] = None - dangerously_allow_non_loopback_proxy: Annotated[ - bool | None, Field(alias="dangerouslyAllowNonLoopbackProxy") - ] = None - denied_domains: Annotated[list[str] | None, Field(alias="deniedDomains")] = None - enabled: bool | None = None - http_port: Annotated[int | None, Field(alias="httpPort", ge=0)] = None - socks_port: Annotated[int | None, Field(alias="socksPort", ge=0)] = None - - -class ReadParsedCommand(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - cmd: str - name: str - path: Annotated[ - str, - Field( - description="(Best effort) Path to the file being read by the command. When possible, this is an absolute path, though when relative, it should be resolved against the `cwd`` that will be used to run the command to derive the absolute path." - ), + type: Annotated[ + Literal["uncommittedChanges"], Field(title="UncommittedChangesReviewTargetType") ] - type: Annotated[Literal["read"], Field(title="ReadParsedCommandType")] -class ListFilesParsedCommand(BaseModel): +class BaseBranchReviewTarget(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cmd: str - path: str | None = None - type: Annotated[Literal["list_files"], Field(title="ListFilesParsedCommandType")] + branch: str + type: Annotated[Literal["baseBranch"], Field(title="BaseBranchReviewTargetType")] -class SearchParsedCommand(BaseModel): +class CommitReviewTarget(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cmd: str - path: str | None = None - query: str | None = None - type: Annotated[Literal["search"], Field(title="SearchParsedCommandType")] + sha: str + title: Annotated[ + str | None, + Field( + description="Optional human-readable label (e.g., commit subject) for UIs." + ), + ] = None + type: Annotated[Literal["commit"], Field(title="CommitReviewTargetType")] -class UnknownParsedCommand(BaseModel): +class CustomReviewTarget(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cmd: str - type: Annotated[Literal["unknown"], Field(title="UnknownParsedCommandType")] + instructions: str + type: Annotated[Literal["custom"], Field(title="CustomReviewTargetType")] -class ParsedCommand( +class ReviewTarget( RootModel[ - ReadParsedCommand - | ListFilesParsedCommand - | SearchParsedCommand - | UnknownParsedCommand + UncommittedChangesReviewTarget + | BaseBranchReviewTarget + | CommitReviewTarget + | CustomReviewTarget ] ): model_config = ConfigDict( populate_by_name=True, ) root: ( - ReadParsedCommand - | ListFilesParsedCommand - | SearchParsedCommand - | UnknownParsedCommand + UncommittedChangesReviewTarget + | BaseBranchReviewTarget + | CommitReviewTarget + | CustomReviewTarget ) -class PatchApplyStatus(Enum): - in_progress = "inProgress" - completed = "completed" - failed = "failed" - declined = "declined" +class SandboxMode(Enum): + read_only = "read-only" + workspace_write = "workspace-write" + danger_full_access = "danger-full-access" -class AddPatchChangeKind(BaseModel): +class DangerFullAccessSandboxPolicy(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["add"], Field(title="AddPatchChangeKindType")] + type: Annotated[ + Literal["dangerFullAccess"], Field(title="DangerFullAccessSandboxPolicyType") + ] -class DeletePatchChangeKind(BaseModel): +class ReadOnlySandboxPolicy(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["delete"], Field(title="DeletePatchChangeKindType")] + access: Annotated[ReadOnlyAccess | None, Field()] = {"type": "fullAccess"} + network_access: Annotated[bool | None, Field(alias="networkAccess")] = False + type: Annotated[Literal["readOnly"], Field(title="ReadOnlySandboxPolicyType")] -class UpdatePatchChangeKind(BaseModel): +class ExternalSandboxSandboxPolicy(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - move_path: str | None = None - type: Annotated[Literal["update"], Field(title="UpdatePatchChangeKindType")] + network_access: Annotated[NetworkAccess | None, Field(alias="networkAccess")] = ( + "restricted" + ) + type: Annotated[ + Literal["externalSandbox"], Field(title="ExternalSandboxSandboxPolicyType") + ] -class PatchChangeKind( - RootModel[AddPatchChangeKind | DeletePatchChangeKind | UpdatePatchChangeKind] -): +class WorkspaceWriteSandboxPolicy(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: AddPatchChangeKind | DeletePatchChangeKind | UpdatePatchChangeKind + exclude_slash_tmp: Annotated[bool | None, Field(alias="excludeSlashTmp")] = False + exclude_tmpdir_env_var: Annotated[ + bool | None, Field(alias="excludeTmpdirEnvVar") + ] = False + network_access: Annotated[bool | None, Field(alias="networkAccess")] = False + read_only_access: Annotated[ + ReadOnlyAccess | None, Field(alias="readOnlyAccess") + ] = {"type": "fullAccess"} + type: Annotated[ + Literal["workspaceWrite"], Field(title="WorkspaceWriteSandboxPolicyType") + ] + writable_roots: Annotated[ + list[AbsolutePathBuf] | None, Field(alias="writableRoots") + ] = [] -class PermissionProfile(BaseModel): +class SandboxPolicy( + RootModel[ + DangerFullAccessSandboxPolicy + | ReadOnlySandboxPolicy + | ExternalSandboxSandboxPolicy + | WorkspaceWriteSandboxPolicy + ] +): model_config = ConfigDict( populate_by_name=True, ) - file_system: FileSystemPermissions | None = None - macos: MacOsSeatbeltProfileExtensions | None = None - network: NetworkPermissions | None = None - - -class Personality(Enum): - none = "none" - friendly = "friendly" - pragmatic = "pragmatic" + root: ( + DangerFullAccessSandboxPolicy + | ReadOnlySandboxPolicy + | ExternalSandboxSandboxPolicy + | WorkspaceWriteSandboxPolicy + ) -class PlanDeltaNotification(BaseModel): +class SandboxWorkspaceWrite(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - delta: str - item_id: Annotated[str, Field(alias="itemId")] - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] - - -class PlanType(Enum): - free = "free" - go = "go" - plus = "plus" - pro = "pro" - team = "team" - business = "business" - enterprise = "enterprise" - edu = "edu" - unknown = "unknown" - - -class PluginAuthPolicy(Enum): - on_install = "ON_INSTALL" - on_use = "ON_USE" + exclude_slash_tmp: bool | None = False + exclude_tmpdir_env_var: bool | None = False + network_access: bool | None = False + writable_roots: list[str] | None = [] -class PluginInstallParams(BaseModel): +class ItemAgentMessageDeltaServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - marketplace_path: Annotated[AbsolutePathBuf, Field(alias="marketplacePath")] - plugin_name: Annotated[str, Field(alias="pluginName")] - - -class PluginInstallPolicy(Enum): - not_available = "NOT_AVAILABLE" - available = "AVAILABLE" - installed_by_default = "INSTALLED_BY_DEFAULT" + method: Annotated[ + Literal["item/agentMessage/delta"], + Field(title="Item/agentMessage/deltaNotificationMethod"), + ] + params: AgentMessageDeltaNotification -class PluginInstallResponse(BaseModel): +class ItemPlanDeltaServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - apps_needing_auth: Annotated[list[AppSummary], Field(alias="appsNeedingAuth")] - auth_policy: Annotated[PluginAuthPolicy, Field(alias="authPolicy")] + method: Annotated[ + Literal["item/plan/delta"], Field(title="Item/plan/deltaNotificationMethod") + ] + params: PlanDeltaNotification -class PluginInterface(BaseModel): +class ItemCommandExecutionOutputDeltaServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - brand_color: Annotated[str | None, Field(alias="brandColor")] = None - capabilities: list[str] - category: str | None = None - composer_icon: Annotated[AbsolutePathBuf | None, Field(alias="composerIcon")] = None - default_prompt: Annotated[str | None, Field(alias="defaultPrompt")] = None - developer_name: Annotated[str | None, Field(alias="developerName")] = None - display_name: Annotated[str | None, Field(alias="displayName")] = None - logo: AbsolutePathBuf | None = None - long_description: Annotated[str | None, Field(alias="longDescription")] = None - privacy_policy_url: Annotated[str | None, Field(alias="privacyPolicyUrl")] = None - screenshots: list[AbsolutePathBuf] - short_description: Annotated[str | None, Field(alias="shortDescription")] = None - terms_of_service_url: Annotated[str | None, Field(alias="termsOfServiceUrl")] = None - website_url: Annotated[str | None, Field(alias="websiteUrl")] = None + method: Annotated[ + Literal["item/commandExecution/outputDelta"], + Field(title="Item/commandExecution/outputDeltaNotificationMethod"), + ] + params: CommandExecutionOutputDeltaNotification -class PluginListParams(BaseModel): +class ItemFileChangeOutputDeltaServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cwds: Annotated[ - list[AbsolutePathBuf] | None, - Field( - description="Optional working directories used to discover repo marketplaces. When omitted, only home-scoped marketplaces and the official curated marketplace are considered." - ), - ] = None - force_remote_sync: Annotated[ - bool | None, - Field( - alias="forceRemoteSync", - description="When true, reconcile the official curated marketplace against the remote plugin state before listing marketplaces.", - ), - ] = None + method: Annotated[ + Literal["item/fileChange/outputDelta"], + Field(title="Item/fileChange/outputDeltaNotificationMethod"), + ] + params: FileChangeOutputDeltaNotification -class LocalPluginSource(BaseModel): +class ItemMcpToolCallProgressServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - path: AbsolutePathBuf - type: Annotated[Literal["local"], Field(title="LocalPluginSourceType")] + method: Annotated[ + Literal["item/mcpToolCall/progress"], + Field(title="Item/mcpToolCall/progressNotificationMethod"), + ] + params: McpToolCallProgressNotification -class PluginSource(RootModel[LocalPluginSource]): +class McpServerOauthLoginCompletedServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: LocalPluginSource + method: Annotated[ + Literal["mcpServer/oauthLogin/completed"], + Field(title="McpServer/oauthLogin/completedNotificationMethod"), + ] + params: McpServerOauthLoginCompletedNotification -class PluginSummary(BaseModel): +class ItemReasoningSummaryTextDeltaServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - auth_policy: Annotated[PluginAuthPolicy, Field(alias="authPolicy")] - enabled: bool - id: str - install_policy: Annotated[PluginInstallPolicy, Field(alias="installPolicy")] - installed: bool - interface: PluginInterface | None = None - name: str - source: PluginSource + method: Annotated[ + Literal["item/reasoning/summaryTextDelta"], + Field(title="Item/reasoning/summaryTextDeltaNotificationMethod"), + ] + params: ReasoningSummaryTextDeltaNotification -class PluginUninstallParams(BaseModel): +class ItemReasoningSummaryPartAddedServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - plugin_id: Annotated[str, Field(alias="pluginId")] - - -class PluginUninstallResponse(BaseModel): - pass - model_config = ConfigDict( - populate_by_name=True, - ) - - -class ProductSurface(Enum): - chatgpt = "chatgpt" - codex = "codex" - api = "api" - atlas = "atlas" - - -class RateLimitWindow(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - resets_at: Annotated[int | None, Field(alias="resetsAt")] = None - used_percent: Annotated[int, Field(alias="usedPercent")] - window_duration_mins: Annotated[int | None, Field(alias="windowDurationMins")] = ( - None - ) + method: Annotated[ + Literal["item/reasoning/summaryPartAdded"], + Field(title="Item/reasoning/summaryPartAddedNotificationMethod"), + ] + params: ReasoningSummaryPartAddedNotification -class RestrictedReadOnlyAccess(BaseModel): +class ItemReasoningTextDeltaServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - include_platform_defaults: Annotated[ - bool | None, Field(alias="includePlatformDefaults") - ] = True - readable_roots: Annotated[ - list[AbsolutePathBuf] | None, Field(alias="readableRoots") - ] = [] - type: Annotated[Literal["restricted"], Field(title="RestrictedReadOnlyAccessType")] + method: Annotated[ + Literal["item/reasoning/textDelta"], + Field(title="Item/reasoning/textDeltaNotificationMethod"), + ] + params: ReasoningTextDeltaNotification -class FullAccessReadOnlyAccess(BaseModel): +class ThreadCompactedServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["fullAccess"], Field(title="FullAccessReadOnlyAccessType")] + method: Annotated[ + Literal["thread/compacted"], Field(title="Thread/compactedNotificationMethod") + ] + params: ContextCompactedNotification -class ReadOnlyAccess(RootModel[RestrictedReadOnlyAccess | FullAccessReadOnlyAccess]): +class ModelReroutedServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: RestrictedReadOnlyAccess | FullAccessReadOnlyAccess + method: Annotated[ + Literal["model/rerouted"], Field(title="Model/reroutedNotificationMethod") + ] + params: ModelReroutedNotification -class RealtimeAudioFrame(BaseModel): +class DeprecationNoticeServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - data: str - num_channels: Annotated[int, Field(ge=0)] - sample_rate: Annotated[int, Field(ge=0)] - samples_per_channel: Annotated[int | None, Field(ge=0)] = None + method: Annotated[ + Literal["deprecationNotice"], Field(title="DeprecationNoticeNotificationMethod") + ] + params: DeprecationNoticeNotification -class SessionUpdated(BaseModel): +class FuzzyFileSearchSessionUpdatedServerNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - instructions: str | None = None - session_id: str + method: Annotated[ + Literal["fuzzyFileSearch/sessionUpdated"], + Field(title="FuzzyFileSearch/sessionUpdatedNotificationMethod"), + ] + params: FuzzyFileSearchSessionUpdatedNotification -class SessionUpdatedRealtimeEvent(BaseModel): +class FuzzyFileSearchSessionCompletedServerNotification(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - session_updated: Annotated[SessionUpdated, Field(alias="SessionUpdated")] + method: Annotated[ + Literal["fuzzyFileSearch/sessionCompleted"], + Field(title="FuzzyFileSearch/sessionCompletedNotificationMethod"), + ] + params: FuzzyFileSearchSessionCompletedNotification -class AudioOutRealtimeEvent(BaseModel): +class AccountLoginCompletedServerNotification(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - audio_out: Annotated[RealtimeAudioFrame, Field(alias="AudioOut")] + method: Annotated[ + Literal["account/login/completed"], + Field(title="Account/login/completedNotificationMethod"), + ] + params: AccountLoginCompletedNotification -class ConversationItemAddedRealtimeEvent(BaseModel): +class ServerRequestResolvedNotification(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - conversation_item_added: Annotated[Any, Field(alias="ConversationItemAdded")] + request_id: Annotated[RequestId, Field(alias="requestId")] + thread_id: Annotated[str, Field(alias="threadId")] -class ConversationItemDone(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - item_id: str +class ServiceTier(Enum): + fast = "fast" + flex = "flex" -class ConversationItemDoneRealtimeEvent(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - conversation_item_done: Annotated[ - ConversationItemDone, Field(alias="ConversationItemDone") - ] +class SessionSourceValue(Enum): + cli = "cli" + vscode = "vscode" + exec = "exec" + app_server = "appServer" + unknown = "unknown" -class ErrorRealtimeEvent(BaseModel): +class Settings(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - error: Annotated[str, Field(alias="Error")] + developer_instructions: str | None = None + model: str + reasoning_effort: ReasoningEffort | None = None -class RealtimeTranscriptDelta(BaseModel): +class SkillErrorInfo(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - delta: str + message: str + path: str -class RealtimeTranscriptEntry(BaseModel): +class SkillInterface(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - role: str - text: str + brand_color: Annotated[str | None, Field(alias="brandColor")] = None + default_prompt: Annotated[str | None, Field(alias="defaultPrompt")] = None + display_name: Annotated[str | None, Field(alias="displayName")] = None + icon_large: Annotated[str | None, Field(alias="iconLarge")] = None + icon_small: Annotated[str | None, Field(alias="iconSmall")] = None + short_description: Annotated[str | None, Field(alias="shortDescription")] = None -class ReasoningEffort(Enum): - none = "none" - minimal = "minimal" - low = "low" - medium = "medium" - high = "high" - xhigh = "xhigh" +class SkillScope(Enum): + user = "user" + repo = "repo" + system = "system" + admin = "admin" -class ReasoningEffortOption(BaseModel): +class SkillSummary(BaseModel): model_config = ConfigDict( populate_by_name=True, ) description: str - reasoning_effort: Annotated[ReasoningEffort, Field(alias="reasoningEffort")] + interface: SkillInterface | None = None + name: str + path: str + short_description: Annotated[str | None, Field(alias="shortDescription")] = None -class ReasoningTextReasoningItemContent(BaseModel): +class SkillToolDependency(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - text: str - type: Annotated[ - Literal["reasoning_text"], Field(title="ReasoningTextReasoningItemContentType") - ] + command: str | None = None + description: str | None = None + transport: str | None = None + type: str + url: str | None = None + value: str -class TextReasoningItemContent(BaseModel): +class SkillsChangedNotification(BaseModel): + pass model_config = ConfigDict( populate_by_name=True, ) - text: str - type: Annotated[Literal["text"], Field(title="TextReasoningItemContentType")] -class ReasoningItemContent( - RootModel[ReasoningTextReasoningItemContent | TextReasoningItemContent] -): +class SkillsConfigWriteParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: ReasoningTextReasoningItemContent | TextReasoningItemContent + enabled: bool + path: str -class SummaryTextReasoningItemReasoningSummary(BaseModel): +class SkillsConfigWriteResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - text: str - type: Annotated[ - Literal["summary_text"], - Field(title="SummaryTextReasoningItemReasoningSummaryType"), - ] + effective_enabled: Annotated[bool, Field(alias="effectiveEnabled")] -class ReasoningItemReasoningSummary( - RootModel[SummaryTextReasoningItemReasoningSummary] -): +class SkillsListExtraRootsForCwd(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: SummaryTextReasoningItemReasoningSummary - - -class ReasoningSummaryValue(Enum): - auto = "auto" - concise = "concise" - detailed = "detailed" + cwd: str + extra_user_roots: Annotated[list[str], Field(alias="extraUserRoots")] -class ReasoningSummary(RootModel[ReasoningSummaryValue | Literal["none"]]): +class SkillsListParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: Annotated[ - ReasoningSummaryValue | Literal["none"], + cwds: Annotated[ + list[str] | None, Field( - description="A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries" + description="When empty, defaults to the current session working directory." ), - ] + ] = None + force_reload: Annotated[ + bool | None, + Field( + alias="forceReload", + description="When true, bypass the skills cache and re-scan skills from disk.", + ), + ] = None + per_cwd_extra_user_roots: Annotated[ + list[SkillsListExtraRootsForCwd] | None, + Field( + alias="perCwdExtraUserRoots", + description="Optional per-cwd extra roots to scan as user-scoped skills.", + ), + ] = None -class ReasoningSummaryPartAddedNotification(BaseModel): +class SkillsRemoteReadParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - item_id: Annotated[str, Field(alias="itemId")] - summary_index: Annotated[int, Field(alias="summaryIndex")] - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] + enabled: bool | None = False + hazelnut_scope: Annotated[HazelnutScope | None, Field(alias="hazelnutScope")] = ( + "example" + ) + product_surface: Annotated[ProductSurface | None, Field(alias="productSurface")] = ( + "codex" + ) -class ReasoningSummaryTextDeltaNotification(BaseModel): +class SkillsRemoteReadResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - delta: str - item_id: Annotated[str, Field(alias="itemId")] - summary_index: Annotated[int, Field(alias="summaryIndex")] - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] + data: list[RemoteSkillSummary] -class ReasoningTextDeltaNotification(BaseModel): +class SkillsRemoteWriteParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content_index: Annotated[int, Field(alias="contentIndex")] - delta: str - item_id: Annotated[str, Field(alias="itemId")] - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] + hazelnut_id: Annotated[str, Field(alias="hazelnutId")] -class RemoteSkillSummary(BaseModel): +class SkillsRemoteWriteResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - description: str id: str - name: str + path: str -class RequestId(RootModel[str | int]): - model_config = ConfigDict( - populate_by_name=True, - ) - root: str | int +class SubAgentSourceValue(Enum): + review = "review" + compact = "compact" + memory_consolidation = "memory_consolidation" -class RequestUserInputQuestionOption(BaseModel): +class OtherSubAgentSource(BaseModel): model_config = ConfigDict( + extra="forbid", populate_by_name=True, ) - description: str - label: str + other: str -class ResidencyRequirement(RootModel[Literal["us"]]): +class TerminalInteractionNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: Literal["us"] + item_id: Annotated[str, Field(alias="itemId")] + process_id: Annotated[str, Field(alias="processId")] + stdin: str + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class Resource(BaseModel): +class TextElement(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - field_meta: Annotated[Any | None, Field(alias="_meta")] = None - annotations: Any | None = None - description: str | None = None - icons: list | None = None - mime_type: Annotated[str | None, Field(alias="mimeType")] = None - name: str - size: int | None = None - title: str | None = None - uri: str + byte_range: Annotated[ + ByteRange, + Field( + alias="byteRange", + description="Byte range in the parent `text` buffer that this element occupies.", + ), + ] + placeholder: Annotated[ + str | None, + Field( + description="Optional human-readable placeholder for the element, displayed in the UI." + ), + ] = None -class ResourceTemplate(BaseModel): +class TextPosition(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - annotations: Any | None = None - description: str | None = None - mime_type: Annotated[str | None, Field(alias="mimeType")] = None - name: str - title: str | None = None - uri_template: Annotated[str, Field(alias="uriTemplate")] + column: Annotated[ + int, + Field(description="1-based column number (in Unicode scalar values).", ge=0), + ] + line: Annotated[int, Field(description="1-based line number.", ge=0)] -class MessageResponseItem(BaseModel): +class TextRange(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content: list[ContentItem] - end_turn: bool | None = None - id: str | None = None - phase: MessagePhase | None = None - role: str - type: Annotated[Literal["message"], Field(title="MessageResponseItemType")] + end: TextPosition + start: TextPosition -class ReasoningResponseItem(BaseModel): +class ThreadActiveFlag(Enum): + waiting_on_approval = "waitingOnApproval" + waiting_on_user_input = "waitingOnUserInput" + + +class ThreadArchiveParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content: list[ReasoningItemContent] | None = None - encrypted_content: str | None = None - id: str - summary: list[ReasoningItemReasoningSummary] - type: Annotated[Literal["reasoning"], Field(title="ReasoningResponseItemType")] + thread_id: Annotated[str, Field(alias="threadId")] -class LocalShellCallResponseItem(BaseModel): +class ThreadArchiveResponse(BaseModel): + pass model_config = ConfigDict( populate_by_name=True, ) - action: LocalShellAction - call_id: Annotated[ - str | None, Field(description="Set when using the Responses API.") - ] = None - id: Annotated[ - str | None, - Field( - description="Legacy id field retained for compatibility with older payloads." - ), - ] = None - status: LocalShellStatus - type: Annotated[ - Literal["local_shell_call"], Field(title="LocalShellCallResponseItemType") - ] -class FunctionCallResponseItem(BaseModel): +class ThreadArchivedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - arguments: str - call_id: str - id: str | None = None - name: str - namespace: str | None = None - type: Annotated[ - Literal["function_call"], Field(title="FunctionCallResponseItemType") - ] + thread_id: Annotated[str, Field(alias="threadId")] -class ToolSearchCallResponseItem(BaseModel): +class ThreadClosedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - arguments: Any - call_id: str | None = None - execution: str - id: str | None = None - status: str | None = None - type: Annotated[ - Literal["tool_search_call"], Field(title="ToolSearchCallResponseItemType") - ] + thread_id: Annotated[str, Field(alias="threadId")] -class CustomToolCallResponseItem(BaseModel): +class ThreadCompactStartParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: str - id: str | None = None - input: str - name: str - status: str | None = None - type: Annotated[ - Literal["custom_tool_call"], Field(title="CustomToolCallResponseItemType") - ] + thread_id: Annotated[str, Field(alias="threadId")] -class ToolSearchOutputResponseItem(BaseModel): +class ThreadCompactStartResponse(BaseModel): + pass model_config = ConfigDict( populate_by_name=True, ) - call_id: str | None = None - execution: str - status: str - tools: list - type: Annotated[ - Literal["tool_search_output"], Field(title="ToolSearchOutputResponseItemType") - ] -class ImageGenerationCallResponseItem(BaseModel): +class ThreadForkParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - result: str - revised_prompt: str | None = None - status: str - type: Annotated[ - Literal["image_generation_call"], - Field(title="ImageGenerationCallResponseItemType"), - ] + approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( + None + ) + base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None + config: dict[str, Any] | None = None + cwd: str | None = None + developer_instructions: Annotated[ + str | None, Field(alias="developerInstructions") + ] = None + ephemeral: bool | None = None + model: Annotated[ + str | None, + Field(description="Configuration overrides for the forked thread, if any."), + ] = None + model_provider: Annotated[str | None, Field(alias="modelProvider")] = None + sandbox: SandboxMode | None = None + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + thread_id: Annotated[str, Field(alias="threadId")] -class GhostSnapshotResponseItem(BaseModel): +class ThreadId(RootModel[str]): model_config = ConfigDict( populate_by_name=True, ) - ghost_commit: GhostCommit - type: Annotated[ - Literal["ghost_snapshot"], Field(title="GhostSnapshotResponseItemType") - ] + root: str -class CompactionResponseItem(BaseModel): +class AgentMessageThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - encrypted_content: str - type: Annotated[Literal["compaction"], Field(title="CompactionResponseItemType")] + id: str + phase: MessagePhase | None = None + text: str + type: Annotated[Literal["agentMessage"], Field(title="AgentMessageThreadItemType")] -class OtherResponseItem(BaseModel): +class PlanThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["other"], Field(title="OtherResponseItemType")] + id: str + text: str + type: Annotated[Literal["plan"], Field(title="PlanThreadItemType")] -class SearchResponsesApiWebSearchAction(BaseModel): +class ReasoningThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - queries: list[str] | None = None - query: str | None = None - type: Annotated[ - Literal["search"], Field(title="SearchResponsesApiWebSearchActionType") - ] + content: list[str] | None = [] + id: str + summary: list[str] | None = [] + type: Annotated[Literal["reasoning"], Field(title="ReasoningThreadItemType")] -class OpenPageResponsesApiWebSearchAction(BaseModel): +class CommandExecutionThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + aggregated_output: Annotated[ + str | None, + Field( + alias="aggregatedOutput", + description="The command's output, aggregated from stdout and stderr.", + ), + ] = None + command: Annotated[str, Field(description="The command to be executed.")] + command_actions: Annotated[ + list[CommandAction], + Field( + alias="commandActions", + description="A best-effort parsing of the command to understand the action(s) it will perform. This returns a list of CommandAction objects because a single shell command may be composed of many commands piped together.", + ), + ] + cwd: Annotated[str, Field(description="The command's working directory.")] + duration_ms: Annotated[ + int | None, + Field( + alias="durationMs", + description="The duration of the command execution in milliseconds.", + ), + ] = None + exit_code: Annotated[ + int | None, Field(alias="exitCode", description="The command's exit code.") + ] = None + id: str + process_id: Annotated[ + str | None, + Field( + alias="processId", + description="Identifier for the underlying PTY process (when available).", + ), + ] = None + status: CommandExecutionStatus type: Annotated[ - Literal["open_page"], Field(title="OpenPageResponsesApiWebSearchActionType") + Literal["commandExecution"], Field(title="CommandExecutionThreadItemType") ] - url: str | None = None -class FindInPageResponsesApiWebSearchAction(BaseModel): +class McpToolCallThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - pattern: str | None = None - type: Annotated[ - Literal["find_in_page"], - Field(title="FindInPageResponsesApiWebSearchActionType"), - ] - url: str | None = None + arguments: Any + duration_ms: Annotated[ + int | None, + Field( + alias="durationMs", + description="The duration of the MCP tool call in milliseconds.", + ), + ] = None + error: McpToolCallError | None = None + id: str + result: McpToolCallResult | None = None + server: str + status: McpToolCallStatus + tool: str + type: Annotated[Literal["mcpToolCall"], Field(title="McpToolCallThreadItemType")] -class OtherResponsesApiWebSearchAction(BaseModel): +class DynamicToolCallThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[ - Literal["other"], Field(title="OtherResponsesApiWebSearchActionType") - ] - - -class ResponsesApiWebSearchAction( - RootModel[ - SearchResponsesApiWebSearchAction - | OpenPageResponsesApiWebSearchAction - | FindInPageResponsesApiWebSearchAction - | OtherResponsesApiWebSearchAction + arguments: Any + content_items: Annotated[ + list[DynamicToolCallOutputContentItem] | None, Field(alias="contentItems") + ] = None + duration_ms: Annotated[ + int | None, + Field( + alias="durationMs", + description="The duration of the dynamic tool call in milliseconds.", + ), + ] = None + id: str + status: DynamicToolCallStatus + success: bool | None = None + tool: str + type: Annotated[ + Literal["dynamicToolCall"], Field(title="DynamicToolCallThreadItemType") ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: ( - SearchResponsesApiWebSearchAction - | OpenPageResponsesApiWebSearchAction - | FindInPageResponsesApiWebSearchAction - | OtherResponsesApiWebSearchAction - ) - - -class OkResultOfCallToolResultOrString(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - ok: Annotated[CallToolResult, Field(alias="Ok")] - - -class ErrResultOfCallToolResultOrString(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - err: Annotated[str, Field(alias="Err")] -class ResultOfCallToolResultOrString( - RootModel[OkResultOfCallToolResultOrString | ErrResultOfCallToolResultOrString] -): +class ImageViewThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: OkResultOfCallToolResultOrString | ErrResultOfCallToolResultOrString + id: str + path: str + type: Annotated[Literal["imageView"], Field(title="ImageViewThreadItemType")] -class ApprovedExecpolicyAmendment(BaseModel): +class ImageGenerationThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - proposed_execpolicy_amendment: list[str] + id: str + result: str + revised_prompt: Annotated[str | None, Field(alias="revisedPrompt")] = None + status: str + type: Annotated[ + Literal["imageGeneration"], Field(title="ImageGenerationThreadItemType") + ] -class ApprovedExecpolicyAmendmentReviewDecision(BaseModel): +class EnteredReviewModeThreadItem(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - approved_execpolicy_amendment: ApprovedExecpolicyAmendment - - -class ReviewDelivery(Enum): - inline = "inline" - detached = "detached" + id: str + review: str + type: Annotated[ + Literal["enteredReviewMode"], Field(title="EnteredReviewModeThreadItemType") + ] -class ReviewLineRange(BaseModel): +class ExitedReviewModeThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - end: Annotated[int, Field(ge=0)] - start: Annotated[int, Field(ge=0)] + id: str + review: str + type: Annotated[ + Literal["exitedReviewMode"], Field(title="ExitedReviewModeThreadItemType") + ] -class UncommittedChangesReviewTarget(BaseModel): +class ContextCompactionThreadItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + id: str type: Annotated[ - Literal["uncommittedChanges"], Field(title="UncommittedChangesReviewTargetType") + Literal["contextCompaction"], Field(title="ContextCompactionThreadItemType") ] -class BaseBranchReviewTarget(BaseModel): +class ThreadLoadedListParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - branch: str - type: Annotated[Literal["baseBranch"], Field(title="BaseBranchReviewTargetType")] + cursor: Annotated[ + str | None, + Field(description="Opaque pagination cursor returned by a previous call."), + ] = None + limit: Annotated[ + int | None, Field(description="Optional page size; defaults to no limit.", ge=0) + ] = None -class CommitReviewTarget(BaseModel): +class ThreadLoadedListResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - sha: str - title: Annotated[ + data: Annotated[ + list[str], + Field(description="Thread ids for sessions currently loaded in memory."), + ] + next_cursor: Annotated[ str | None, Field( - description="Optional human-readable label (e.g., commit subject) for UIs." + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. if None, there are no more items to return.", ), ] = None - type: Annotated[Literal["commit"], Field(title="CommitReviewTargetType")] -class CustomReviewTarget(BaseModel): +class ThreadMetadataGitInfoUpdateParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - instructions: str - type: Annotated[Literal["custom"], Field(title="CustomReviewTargetType")] + branch: Annotated[ + str | None, + Field( + description="Omit to leave the stored branch unchanged, set to `null` to clear it, or provide a non-empty string to replace it." + ), + ] = None + origin_url: Annotated[ + str | None, + Field( + alias="originUrl", + description="Omit to leave the stored origin URL unchanged, set to `null` to clear it, or provide a non-empty string to replace it.", + ), + ] = None + sha: Annotated[ + str | None, + Field( + description="Omit to leave the stored commit unchanged, set to `null` to clear it, or provide a non-empty string to replace it." + ), + ] = None -class ReviewTarget( - RootModel[ - UncommittedChangesReviewTarget - | BaseBranchReviewTarget - | CommitReviewTarget - | CustomReviewTarget - ] -): +class ThreadMetadataUpdateParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: ( - UncommittedChangesReviewTarget - | BaseBranchReviewTarget - | CommitReviewTarget - | CustomReviewTarget - ) - - -class SandboxMode(Enum): - read_only = "read-only" - workspace_write = "workspace-write" - danger_full_access = "danger-full-access" + git_info: Annotated[ + ThreadMetadataGitInfoUpdateParams | None, + Field( + alias="gitInfo", + description="Patch the stored Git metadata for this thread. Omit a field to leave it unchanged, set it to `null` to clear it, or provide a string to replace the stored value.", + ), + ] = None + thread_id: Annotated[str, Field(alias="threadId")] -class DangerFullAccessSandboxPolicy(BaseModel): +class ThreadNameUpdatedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[ - Literal["dangerFullAccess"], Field(title="DangerFullAccessSandboxPolicyType") - ] + thread_id: Annotated[str, Field(alias="threadId")] + thread_name: Annotated[str | None, Field(alias="threadName")] = None -class ReadOnlySandboxPolicy(BaseModel): +class ThreadReadParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - access: Annotated[ReadOnlyAccess | None, Field()] = {"type": "fullAccess"} - network_access: Annotated[bool | None, Field(alias="networkAccess")] = False - type: Annotated[Literal["readOnly"], Field(title="ReadOnlySandboxPolicyType")] + include_turns: Annotated[ + bool | None, + Field( + alias="includeTurns", + description="When true, include turns and their items from rollout history.", + ), + ] = False + thread_id: Annotated[str, Field(alias="threadId")] -class ExternalSandboxSandboxPolicy(BaseModel): +class ThreadRealtimeAudioChunk(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - network_access: Annotated[NetworkAccess | None, Field(alias="networkAccess")] = ( - "restricted" - ) - type: Annotated[ - Literal["externalSandbox"], Field(title="ExternalSandboxSandboxPolicyType") - ] + data: str + num_channels: Annotated[int, Field(alias="numChannels", ge=0)] + sample_rate: Annotated[int, Field(alias="sampleRate", ge=0)] + samples_per_channel: Annotated[ + int | None, Field(alias="samplesPerChannel", ge=0) + ] = None -class WorkspaceWriteSandboxPolicy(BaseModel): +class ThreadRealtimeClosedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - exclude_slash_tmp: Annotated[bool | None, Field(alias="excludeSlashTmp")] = False - exclude_tmpdir_env_var: Annotated[ - bool | None, Field(alias="excludeTmpdirEnvVar") - ] = False - network_access: Annotated[bool | None, Field(alias="networkAccess")] = False - read_only_access: Annotated[ - ReadOnlyAccess | None, Field(alias="readOnlyAccess") - ] = {"type": "fullAccess"} - type: Annotated[ - Literal["workspaceWrite"], Field(title="WorkspaceWriteSandboxPolicyType") - ] - writable_roots: Annotated[ - list[AbsolutePathBuf] | None, Field(alias="writableRoots") - ] = [] + reason: str | None = None + thread_id: Annotated[str, Field(alias="threadId")] -class SandboxPolicy( - RootModel[ - DangerFullAccessSandboxPolicy - | ReadOnlySandboxPolicy - | ExternalSandboxSandboxPolicy - | WorkspaceWriteSandboxPolicy - ] -): +class ThreadRealtimeErrorNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: ( - DangerFullAccessSandboxPolicy - | ReadOnlySandboxPolicy - | ExternalSandboxSandboxPolicy - | WorkspaceWriteSandboxPolicy - ) + message: str + thread_id: Annotated[str, Field(alias="threadId")] -class SandboxWorkspaceWrite(BaseModel): +class ThreadRealtimeItemAddedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - exclude_slash_tmp: bool | None = False - exclude_tmpdir_env_var: bool | None = False - network_access: bool | None = False - writable_roots: list[str] | None = [] + item: Any + thread_id: Annotated[str, Field(alias="threadId")] -class ItemAgentMessageDeltaServerNotification(BaseModel): +class ThreadRealtimeOutputAudioDeltaNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["item/agentMessage/delta"], - Field(title="Item/agentMessage/deltaNotificationMethod"), - ] - params: AgentMessageDeltaNotification + audio: ThreadRealtimeAudioChunk + thread_id: Annotated[str, Field(alias="threadId")] -class ItemPlanDeltaServerNotification(BaseModel): +class ThreadRealtimeStartedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["item/plan/delta"], Field(title="Item/plan/deltaNotificationMethod") - ] - params: PlanDeltaNotification + session_id: Annotated[str | None, Field(alias="sessionId")] = None + thread_id: Annotated[str, Field(alias="threadId")] -class ItemCommandExecutionOutputDeltaServerNotification(BaseModel): +class ThreadResumeParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["item/commandExecution/outputDelta"], - Field(title="Item/commandExecution/outputDeltaNotificationMethod"), - ] - params: CommandExecutionOutputDeltaNotification + approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( + None + ) + base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None + config: dict[str, Any] | None = None + cwd: str | None = None + developer_instructions: Annotated[ + str | None, Field(alias="developerInstructions") + ] = None + model: Annotated[ + str | None, + Field(description="Configuration overrides for the resumed thread, if any."), + ] = None + model_provider: Annotated[str | None, Field(alias="modelProvider")] = None + personality: Personality | None = None + sandbox: SandboxMode | None = None + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + thread_id: Annotated[str, Field(alias="threadId")] -class ItemFileChangeOutputDeltaServerNotification(BaseModel): +class ThreadRollbackParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["item/fileChange/outputDelta"], - Field(title="Item/fileChange/outputDeltaNotificationMethod"), + num_turns: Annotated[ + int, + Field( + alias="numTurns", + description="The number of turns to drop from the end of the thread. Must be >= 1.\n\nThis only modifies the thread's history and does not revert local file changes that have been made by the agent. Clients are responsible for reverting these changes.", + ge=0, + ), ] - params: FileChangeOutputDeltaNotification + thread_id: Annotated[str, Field(alias="threadId")] -class ItemMcpToolCallProgressServerNotification(BaseModel): +class ThreadSetNameParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["item/mcpToolCall/progress"], - Field(title="Item/mcpToolCall/progressNotificationMethod"), - ] - params: McpToolCallProgressNotification + name: str + thread_id: Annotated[str, Field(alias="threadId")] -class McpServerOauthLoginCompletedServerNotification(BaseModel): +class ThreadSetNameResponse(BaseModel): + pass model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["mcpServer/oauthLogin/completed"], - Field(title="McpServer/oauthLogin/completedNotificationMethod"), - ] - params: McpServerOauthLoginCompletedNotification -class ItemReasoningSummaryTextDeltaServerNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - method: Annotated[ - Literal["item/reasoning/summaryTextDelta"], - Field(title="Item/reasoning/summaryTextDeltaNotificationMethod"), - ] - params: ReasoningSummaryTextDeltaNotification +class ThreadSortKey(Enum): + created_at = "created_at" + updated_at = "updated_at" -class ItemReasoningSummaryPartAddedServerNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - method: Annotated[ - Literal["item/reasoning/summaryPartAdded"], - Field(title="Item/reasoning/summaryPartAddedNotificationMethod"), - ] - params: ReasoningSummaryPartAddedNotification +class ThreadSourceKind(Enum): + cli = "cli" + vscode = "vscode" + exec = "exec" + app_server = "appServer" + sub_agent = "subAgent" + sub_agent_review = "subAgentReview" + sub_agent_compact = "subAgentCompact" + sub_agent_thread_spawn = "subAgentThreadSpawn" + sub_agent_other = "subAgentOther" + unknown = "unknown" -class ItemReasoningTextDeltaServerNotification(BaseModel): +class ThreadStartParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["item/reasoning/textDelta"], - Field(title="Item/reasoning/textDeltaNotificationMethod"), - ] - params: ReasoningTextDeltaNotification + approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( + None + ) + base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None + config: dict[str, Any] | None = None + cwd: str | None = None + developer_instructions: Annotated[ + str | None, Field(alias="developerInstructions") + ] = None + ephemeral: bool | None = None + model: str | None = None + model_provider: Annotated[str | None, Field(alias="modelProvider")] = None + personality: Personality | None = None + sandbox: SandboxMode | None = None + service_name: Annotated[str | None, Field(alias="serviceName")] = None + service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None -class ThreadCompactedServerNotification(BaseModel): +class NotLoadedThreadStatus(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["thread/compacted"], Field(title="Thread/compactedNotificationMethod") - ] - params: ContextCompactedNotification + type: Annotated[Literal["notLoaded"], Field(title="NotLoadedThreadStatusType")] -class ModelReroutedServerNotification(BaseModel): +class IdleThreadStatus(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["model/rerouted"], Field(title="Model/reroutedNotificationMethod") - ] - params: ModelReroutedNotification + type: Annotated[Literal["idle"], Field(title="IdleThreadStatusType")] -class DeprecationNoticeServerNotification(BaseModel): +class SystemErrorThreadStatus(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["deprecationNotice"], Field(title="DeprecationNoticeNotificationMethod") - ] - params: DeprecationNoticeNotification + type: Annotated[Literal["systemError"], Field(title="SystemErrorThreadStatusType")] -class FuzzyFileSearchSessionUpdatedServerNotification(BaseModel): +class ActiveThreadStatus(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["fuzzyFileSearch/sessionUpdated"], - Field(title="FuzzyFileSearch/sessionUpdatedNotificationMethod"), - ] - params: FuzzyFileSearchSessionUpdatedNotification + active_flags: Annotated[list[ThreadActiveFlag], Field(alias="activeFlags")] + type: Annotated[Literal["active"], Field(title="ActiveThreadStatusType")] -class FuzzyFileSearchSessionCompletedServerNotification(BaseModel): +class ThreadStatus( + RootModel[ + NotLoadedThreadStatus + | IdleThreadStatus + | SystemErrorThreadStatus + | ActiveThreadStatus + ] +): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["fuzzyFileSearch/sessionCompleted"], - Field(title="FuzzyFileSearch/sessionCompletedNotificationMethod"), - ] - params: FuzzyFileSearchSessionCompletedNotification + root: ( + NotLoadedThreadStatus + | IdleThreadStatus + | SystemErrorThreadStatus + | ActiveThreadStatus + ) -class AccountLoginCompletedServerNotification(BaseModel): +class ThreadStatusChangedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - method: Annotated[ - Literal["account/login/completed"], - Field(title="Account/login/completedNotificationMethod"), - ] - params: AccountLoginCompletedNotification + status: ThreadStatus + thread_id: Annotated[str, Field(alias="threadId")] -class ServerRequestResolvedNotification(BaseModel): +class ThreadUnarchiveParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - request_id: Annotated[RequestId, Field(alias="requestId")] thread_id: Annotated[str, Field(alias="threadId")] -class ServiceTier(Enum): - fast = "fast" - flex = "flex" - - -class SessionNetworkProxyRuntime(BaseModel): +class ThreadUnarchivedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - http_addr: str - socks_addr: str - - -class SessionSourceValue(Enum): - cli = "cli" - vscode = "vscode" - exec = "exec" - app_server = "appServer" - unknown = "unknown" + thread_id: Annotated[str, Field(alias="threadId")] -class Settings(BaseModel): +class ThreadUnsubscribeParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - developer_instructions: str | None = None - model: str - reasoning_effort: ReasoningEffort | None = None + thread_id: Annotated[str, Field(alias="threadId")] -class SkillErrorInfo(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - message: str - path: str +class ThreadUnsubscribeStatus(Enum): + not_loaded = "notLoaded" + not_subscribed = "notSubscribed" + unsubscribed = "unsubscribed" -class SkillInterface(BaseModel): +class TokenUsageBreakdown(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - brand_color: Annotated[str | None, Field(alias="brandColor")] = None - default_prompt: Annotated[str | None, Field(alias="defaultPrompt")] = None - display_name: Annotated[str | None, Field(alias="displayName")] = None - icon_large: Annotated[str | None, Field(alias="iconLarge")] = None - icon_small: Annotated[str | None, Field(alias="iconSmall")] = None - short_description: Annotated[str | None, Field(alias="shortDescription")] = None - - -class SkillScope(Enum): - user = "user" - repo = "repo" - system = "system" - admin = "admin" + cached_input_tokens: Annotated[int, Field(alias="cachedInputTokens")] + input_tokens: Annotated[int, Field(alias="inputTokens")] + output_tokens: Annotated[int, Field(alias="outputTokens")] + reasoning_output_tokens: Annotated[int, Field(alias="reasoningOutputTokens")] + total_tokens: Annotated[int, Field(alias="totalTokens")] -class SkillToolDependency(BaseModel): +class Tool(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - command: str | None = None + field_meta: Annotated[Any | None, Field(alias="_meta")] = None + annotations: Any | None = None description: str | None = None - transport: str | None = None - type: str - url: str | None = None - value: str + icons: list | None = None + input_schema: Annotated[Any, Field(alias="inputSchema")] + name: str + output_schema: Annotated[Any | None, Field(alias="outputSchema")] = None + title: str | None = None -class SkillsChangedNotification(BaseModel): - pass +class TurnDiffUpdatedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + diff: str + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class SkillsConfigWriteParams(BaseModel): +class TurnError(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - enabled: bool - path: str + additional_details: Annotated[str | None, Field(alias="additionalDetails")] = None + codex_error_info: Annotated[ + CodexErrorInfo | None, Field(alias="codexErrorInfo") + ] = None + message: str -class SkillsConfigWriteResponse(BaseModel): +class TurnInterruptParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - effective_enabled: Annotated[bool, Field(alias="effectiveEnabled")] + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] -class SkillsListExtraRootsForCwd(BaseModel): +class TurnInterruptResponse(BaseModel): + pass model_config = ConfigDict( populate_by_name=True, ) - cwd: str - extra_user_roots: Annotated[list[str], Field(alias="extraUserRoots")] -class SkillsListParams(BaseModel): +class TurnPlanStepStatus(Enum): + pending = "pending" + in_progress = "inProgress" + completed = "completed" + + +class TurnStatus(Enum): + completed = "completed" + interrupted = "interrupted" + failed = "failed" + in_progress = "inProgress" + + +class TurnSteerResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cwds: Annotated[ - list[str] | None, - Field( - description="When empty, defaults to the current session working directory." - ), - ] = None - force_reload: Annotated[ - bool | None, - Field( - alias="forceReload", - description="When true, bypass the skills cache and re-scan skills from disk.", - ), - ] = None - per_cwd_extra_user_roots: Annotated[ - list[SkillsListExtraRootsForCwd] | None, + turn_id: Annotated[str, Field(alias="turnId")] + + +class TextUserInput(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + text: str + text_elements: Annotated[ + list[TextElement] | None, Field( - alias="perCwdExtraUserRoots", - description="Optional per-cwd extra roots to scan as user-scoped skills.", + description="UI-defined spans within `text` used to render or persist special elements." ), - ] = None + ] = [] + type: Annotated[Literal["text"], Field(title="TextUserInputType")] -class SkillsRemoteReadParams(BaseModel): +class ImageUserInput(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - enabled: bool | None = False - hazelnut_scope: Annotated[HazelnutScope | None, Field(alias="hazelnutScope")] = ( - "example" - ) - product_surface: Annotated[ProductSurface | None, Field(alias="productSurface")] = ( - "codex" - ) + type: Annotated[Literal["image"], Field(title="ImageUserInputType")] + url: str -class SkillsRemoteReadResponse(BaseModel): +class LocalImageUserInput(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - data: list[RemoteSkillSummary] + path: str + type: Annotated[Literal["localImage"], Field(title="LocalImageUserInputType")] -class SkillsRemoteWriteParams(BaseModel): +class SkillUserInput(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - hazelnut_id: Annotated[str, Field(alias="hazelnutId")] + name: str + path: str + type: Annotated[Literal["skill"], Field(title="SkillUserInputType")] -class SkillsRemoteWriteResponse(BaseModel): +class MentionUserInput(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str + name: str path: str + type: Annotated[Literal["mention"], Field(title="MentionUserInputType")] -class StepStatus(Enum): - pending = "pending" - in_progress = "in_progress" - completed = "completed" +class UserInput( + RootModel[ + TextUserInput + | ImageUserInput + | LocalImageUserInput + | SkillUserInput + | MentionUserInput + ] +): + model_config = ConfigDict( + populate_by_name=True, + ) + root: ( + TextUserInput + | ImageUserInput + | LocalImageUserInput + | SkillUserInput + | MentionUserInput + ) -class SubAgentSourceValue(Enum): - review = "review" - compact = "compact" - memory_consolidation = "memory_consolidation" +class Verbosity(Enum): + low = "low" + medium = "medium" + high = "high" -class OtherSubAgentSource(BaseModel): +class SearchWebSearchAction(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - other: str + queries: list[str] | None = None + query: str | None = None + type: Annotated[Literal["search"], Field(title="SearchWebSearchActionType")] -class TerminalInteractionNotification(BaseModel): +class OpenPageWebSearchAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - item_id: Annotated[str, Field(alias="itemId")] - process_id: Annotated[str, Field(alias="processId")] - stdin: str - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] + type: Annotated[Literal["openPage"], Field(title="OpenPageWebSearchActionType")] + url: str | None = None -class TextElement(BaseModel): +class FindInPageWebSearchAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - byte_range: Annotated[ - ByteRange, - Field( - alias="byteRange", - description="Byte range in the parent `text` buffer that this element occupies.", - ), - ] - placeholder: Annotated[ - str | None, - Field( - description="Optional human-readable placeholder for the element, displayed in the UI." - ), - ] = None + pattern: str | None = None + type: Annotated[Literal["findInPage"], Field(title="FindInPageWebSearchActionType")] + url: str | None = None -class TextPosition(BaseModel): +class OtherWebSearchAction(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - column: Annotated[ - int, - Field(description="1-based column number (in Unicode scalar values).", ge=0), - ] - line: Annotated[int, Field(description="1-based line number.", ge=0)] + type: Annotated[Literal["other"], Field(title="OtherWebSearchActionType")] -class TextRange(BaseModel): +class WebSearchAction( + RootModel[ + SearchWebSearchAction + | OpenPageWebSearchAction + | FindInPageWebSearchAction + | OtherWebSearchAction + ] +): model_config = ConfigDict( populate_by_name=True, ) - end: TextPosition - start: TextPosition + root: ( + SearchWebSearchAction + | OpenPageWebSearchAction + | FindInPageWebSearchAction + | OtherWebSearchAction + ) -class ThreadActiveFlag(Enum): - waiting_on_approval = "waitingOnApproval" - waiting_on_user_input = "waitingOnUserInput" +class WebSearchContextSize(Enum): + low = "low" + medium = "medium" + high = "high" -class ThreadArchiveParams(BaseModel): +class WebSearchLocation(BaseModel): model_config = ConfigDict( + extra="forbid", populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] + city: str | None = None + country: str | None = None + region: str | None = None + timezone: str | None = None -class ThreadArchiveResponse(BaseModel): - pass +class WebSearchMode(Enum): + disabled = "disabled" + cached = "cached" + live = "live" + + +class WebSearchToolConfig(BaseModel): model_config = ConfigDict( + extra="forbid", populate_by_name=True, ) + allowed_domains: list[str] | None = None + context_size: WebSearchContextSize | None = None + location: WebSearchLocation | None = None -class ThreadArchivedNotification(BaseModel): +class WindowsSandboxSetupMode(Enum): + elevated = "elevated" + unelevated = "unelevated" + + +class WindowsSandboxSetupStartParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] + cwd: AbsolutePathBuf | None = None + mode: WindowsSandboxSetupMode -class ThreadClosedNotification(BaseModel): +class WindowsSandboxSetupStartResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] + started: bool -class ThreadCompactStartParams(BaseModel): +class WindowsWorldWritableWarningNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] + extra_count: Annotated[int, Field(alias="extraCount", ge=0)] + failed_scan: Annotated[bool, Field(alias="failedScan")] + sample_paths: Annotated[list[str], Field(alias="samplePaths")] -class ThreadCompactStartResponse(BaseModel): - pass +class WriteStatus(Enum): + ok = "ok" + ok_overridden = "okOverridden" + + +class ChatgptAccount(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + email: str + plan_type: Annotated[PlanType, Field(alias="planType")] + type: Annotated[Literal["chatgpt"], Field(title="ChatgptAccountType")] -class ThreadForkParams(BaseModel): +class Account(RootModel[ApiKeyAccount | ChatgptAccount]): model_config = ConfigDict( populate_by_name=True, ) - approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( - None - ) - base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None - config: dict[str, Any] | None = None - cwd: str | None = None - developer_instructions: Annotated[ - str | None, Field(alias="developerInstructions") - ] = None - ephemeral: bool | None = None - model: Annotated[ - str | None, - Field(description="Configuration overrides for the forked thread, if any."), - ] = None - model_provider: Annotated[str | None, Field(alias="modelProvider")] = None - sandbox: SandboxMode | None = None - service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None - thread_id: Annotated[str, Field(alias="threadId")] + root: ApiKeyAccount | ChatgptAccount -class ThreadId(RootModel[str]): +class AccountUpdatedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: str + auth_mode: Annotated[AuthMode | None, Field(alias="authMode")] = None + plan_type: Annotated[PlanType | None, Field(alias="planType")] = None -class AgentMessageThreadItem(BaseModel): +class AppConfig(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - phase: MessagePhase | None = None - text: str - type: Annotated[Literal["agentMessage"], Field(title="AgentMessageThreadItemType")] - + default_tools_approval_mode: AppToolApproval | None = None + default_tools_enabled: bool | None = None + destructive_enabled: bool | None = None + enabled: bool | None = True + open_world_enabled: bool | None = None + tools: AppToolsConfig | None = None -class PlanThreadItem(BaseModel): + +class AppMetadata(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - text: str - type: Annotated[Literal["plan"], Field(title="PlanThreadItemType")] + categories: list[str] | None = None + developer: str | None = None + first_party_requires_install: Annotated[ + bool | None, Field(alias="firstPartyRequiresInstall") + ] = None + first_party_type: Annotated[str | None, Field(alias="firstPartyType")] = None + review: AppReview | None = None + screenshots: list[AppScreenshot] | None = None + seo_description: Annotated[str | None, Field(alias="seoDescription")] = None + show_in_composer_when_unlinked: Annotated[ + bool | None, Field(alias="showInComposerWhenUnlinked") + ] = None + sub_categories: Annotated[list[str] | None, Field(alias="subCategories")] = None + version: str | None = None + version_id: Annotated[str | None, Field(alias="versionId")] = None + version_notes: Annotated[str | None, Field(alias="versionNotes")] = None -class ReasoningThreadItem(BaseModel): +class AppsConfig(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - content: list[str] | None = [] - id: str - summary: list[str] | None = [] - type: Annotated[Literal["reasoning"], Field(title="ReasoningThreadItemType")] + field_default: Annotated[AppsDefaultConfig | None, Field(alias="_default")] = None -class CommandExecutionThreadItem(BaseModel): +class CancelLoginAccountResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - aggregated_output: Annotated[ - str | None, - Field( - alias="aggregatedOutput", - description="The command's output, aggregated from stdout and stderr.", - ), - ] = None - command: Annotated[str, Field(description="The command to be executed.")] - command_actions: Annotated[ - list[CommandAction], - Field( - alias="commandActions", - description="A best-effort parsing of the command to understand the action(s) it will perform. This returns a list of CommandAction objects because a single shell command may be composed of many commands piped together.", - ), - ] - cwd: Annotated[str, Field(description="The command's working directory.")] - duration_ms: Annotated[ - int | None, - Field( - alias="durationMs", - description="The duration of the command execution in milliseconds.", - ), - ] = None - exit_code: Annotated[ - int | None, Field(alias="exitCode", description="The command's exit code.") - ] = None - id: str - process_id: Annotated[ - str | None, - Field( - alias="processId", - description="Identifier for the underlying PTY process (when available).", - ), - ] = None - status: CommandExecutionStatus - type: Annotated[ - Literal["commandExecution"], Field(title="CommandExecutionThreadItemType") - ] + status: CancelLoginAccountStatus -class McpToolCallThreadItem(BaseModel): +class InitializeRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - arguments: Any - duration_ms: Annotated[ - int | None, - Field( - alias="durationMs", - description="The duration of the MCP tool call in milliseconds.", - ), - ] = None - error: McpToolCallError | None = None - id: str - result: McpToolCallResult | None = None - server: str - status: McpToolCallStatus - tool: str - type: Annotated[Literal["mcpToolCall"], Field(title="McpToolCallThreadItemType")] + id: RequestId + method: Annotated[Literal["initialize"], Field(title="InitializeRequestMethod")] + params: InitializeParams -class DynamicToolCallThreadItem(BaseModel): +class ThreadStartRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - arguments: Any - content_items: Annotated[ - list[DynamicToolCallOutputContentItem] | None, Field(alias="contentItems") - ] = None - duration_ms: Annotated[ - int | None, - Field( - alias="durationMs", - description="The duration of the dynamic tool call in milliseconds.", - ), - ] = None - id: str - status: DynamicToolCallStatus - success: bool | None = None - tool: str - type: Annotated[ - Literal["dynamicToolCall"], Field(title="DynamicToolCallThreadItemType") - ] + id: RequestId + method: Annotated[Literal["thread/start"], Field(title="Thread/startRequestMethod")] + params: ThreadStartParams -class ImageViewThreadItem(BaseModel): +class ThreadResumeRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - path: str - type: Annotated[Literal["imageView"], Field(title="ImageViewThreadItemType")] + id: RequestId + method: Annotated[ + Literal["thread/resume"], Field(title="Thread/resumeRequestMethod") + ] + params: ThreadResumeParams -class ImageGenerationThreadItem(BaseModel): +class ThreadForkRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - result: str - revised_prompt: Annotated[str | None, Field(alias="revisedPrompt")] = None - status: str - type: Annotated[ - Literal["imageGeneration"], Field(title="ImageGenerationThreadItemType") - ] + id: RequestId + method: Annotated[Literal["thread/fork"], Field(title="Thread/forkRequestMethod")] + params: ThreadForkParams -class EnteredReviewModeThreadItem(BaseModel): +class ThreadArchiveRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - review: str - type: Annotated[ - Literal["enteredReviewMode"], Field(title="EnteredReviewModeThreadItemType") + id: RequestId + method: Annotated[ + Literal["thread/archive"], Field(title="Thread/archiveRequestMethod") ] + params: ThreadArchiveParams -class ExitedReviewModeThreadItem(BaseModel): +class ThreadUnsubscribeRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - review: str - type: Annotated[ - Literal["exitedReviewMode"], Field(title="ExitedReviewModeThreadItemType") + id: RequestId + method: Annotated[ + Literal["thread/unsubscribe"], Field(title="Thread/unsubscribeRequestMethod") ] + params: ThreadUnsubscribeParams -class ContextCompactionThreadItem(BaseModel): +class ThreadNameSetRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - id: str - type: Annotated[ - Literal["contextCompaction"], Field(title="ContextCompactionThreadItemType") + id: RequestId + method: Annotated[ + Literal["thread/name/set"], Field(title="Thread/name/setRequestMethod") ] + params: ThreadSetNameParams -class ThreadLoadedListParams(BaseModel): +class ThreadMetadataUpdateRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cursor: Annotated[ - str | None, - Field(description="Opaque pagination cursor returned by a previous call."), - ] = None - limit: Annotated[ - int | None, Field(description="Optional page size; defaults to no limit.", ge=0) - ] = None + id: RequestId + method: Annotated[ + Literal["thread/metadata/update"], + Field(title="Thread/metadata/updateRequestMethod"), + ] + params: ThreadMetadataUpdateParams -class ThreadLoadedListResponse(BaseModel): +class ThreadUnarchiveRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - data: Annotated[ - list[str], - Field(description="Thread ids for sessions currently loaded in memory."), + id: RequestId + method: Annotated[ + Literal["thread/unarchive"], Field(title="Thread/unarchiveRequestMethod") ] - next_cursor: Annotated[ - str | None, - Field( - alias="nextCursor", - description="Opaque cursor to pass to the next call to continue after the last item. if None, there are no more items to return.", - ), - ] = None + params: ThreadUnarchiveParams -class ThreadMetadataGitInfoUpdateParams(BaseModel): +class ThreadCompactStartRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - branch: Annotated[ - str | None, - Field( - description="Omit to leave the stored branch unchanged, set to `null` to clear it, or provide a non-empty string to replace it." - ), - ] = None - origin_url: Annotated[ - str | None, - Field( - alias="originUrl", - description="Omit to leave the stored origin URL unchanged, set to `null` to clear it, or provide a non-empty string to replace it.", - ), - ] = None - sha: Annotated[ - str | None, - Field( - description="Omit to leave the stored commit unchanged, set to `null` to clear it, or provide a non-empty string to replace it." - ), - ] = None + id: RequestId + method: Annotated[ + Literal["thread/compact/start"], + Field(title="Thread/compact/startRequestMethod"), + ] + params: ThreadCompactStartParams -class ThreadMetadataUpdateParams(BaseModel): +class ThreadRollbackRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - git_info: Annotated[ - ThreadMetadataGitInfoUpdateParams | None, - Field( - alias="gitInfo", - description="Patch the stored Git metadata for this thread. Omit a field to leave it unchanged, set it to `null` to clear it, or provide a string to replace the stored value.", - ), - ] = None - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["thread/rollback"], Field(title="Thread/rollbackRequestMethod") + ] + params: ThreadRollbackParams -class ThreadNameUpdatedNotification(BaseModel): +class ThreadLoadedListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] - thread_name: Annotated[str | None, Field(alias="threadName")] = None + id: RequestId + method: Annotated[ + Literal["thread/loaded/list"], Field(title="Thread/loaded/listRequestMethod") + ] + params: ThreadLoadedListParams -class ThreadReadParams(BaseModel): +class ThreadReadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - include_turns: Annotated[ - bool | None, - Field( - alias="includeTurns", - description="When true, include turns and their items from rollout history.", - ), - ] = False - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[Literal["thread/read"], Field(title="Thread/readRequestMethod")] + params: ThreadReadParams -class ThreadRealtimeAudioChunk(BaseModel): +class SkillsListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - data: str - num_channels: Annotated[int, Field(alias="numChannels", ge=0)] - sample_rate: Annotated[int, Field(alias="sampleRate", ge=0)] - samples_per_channel: Annotated[ - int | None, Field(alias="samplesPerChannel", ge=0) - ] = None + id: RequestId + method: Annotated[Literal["skills/list"], Field(title="Skills/listRequestMethod")] + params: SkillsListParams -class ThreadRealtimeClosedNotification(BaseModel): +class PluginListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - reason: str | None = None - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[Literal["plugin/list"], Field(title="Plugin/listRequestMethod")] + params: PluginListParams -class ThreadRealtimeErrorNotification(BaseModel): +class PluginReadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - message: str - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[Literal["plugin/read"], Field(title="Plugin/readRequestMethod")] + params: PluginReadParams -class ThreadRealtimeItemAddedNotification(BaseModel): +class SkillsRemoteListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - item: Any - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["skills/remote/list"], Field(title="Skills/remote/listRequestMethod") + ] + params: SkillsRemoteReadParams -class ThreadRealtimeOutputAudioDeltaNotification(BaseModel): +class SkillsRemoteExportRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - audio: ThreadRealtimeAudioChunk - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["skills/remote/export"], + Field(title="Skills/remote/exportRequestMethod"), + ] + params: SkillsRemoteWriteParams -class ThreadRealtimeStartedNotification(BaseModel): +class AppListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - session_id: Annotated[str | None, Field(alias="sessionId")] = None - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[Literal["app/list"], Field(title="App/listRequestMethod")] + params: AppsListParams -class ThreadResumeParams(BaseModel): +class SkillsConfigWriteRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( - None - ) - base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None - config: dict[str, Any] | None = None - cwd: str | None = None - developer_instructions: Annotated[ - str | None, Field(alias="developerInstructions") - ] = None - model: Annotated[ - str | None, - Field(description="Configuration overrides for the resumed thread, if any."), - ] = None - model_provider: Annotated[str | None, Field(alias="modelProvider")] = None - personality: Personality | None = None - sandbox: SandboxMode | None = None - service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["skills/config/write"], Field(title="Skills/config/writeRequestMethod") + ] + params: SkillsConfigWriteParams -class ThreadRollbackParams(BaseModel): +class PluginInstallRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - num_turns: Annotated[ - int, - Field( - alias="numTurns", - description="The number of turns to drop from the end of the thread. Must be >= 1.\n\nThis only modifies the thread's history and does not revert local file changes that have been made by the agent. Clients are responsible for reverting these changes.", - ge=0, - ), + id: RequestId + method: Annotated[ + Literal["plugin/install"], Field(title="Plugin/installRequestMethod") ] - thread_id: Annotated[str, Field(alias="threadId")] + params: PluginInstallParams -class ThreadSetNameParams(BaseModel): +class PluginUninstallRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - name: str - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["plugin/uninstall"], Field(title="Plugin/uninstallRequestMethod") + ] + params: PluginUninstallParams -class ThreadSetNameResponse(BaseModel): - pass +class TurnInterruptRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + id: RequestId + method: Annotated[ + Literal["turn/interrupt"], Field(title="Turn/interruptRequestMethod") + ] + params: TurnInterruptParams -class ThreadSortKey(Enum): - created_at = "created_at" - updated_at = "updated_at" - - -class ThreadSourceKind(Enum): - cli = "cli" - vscode = "vscode" - exec = "exec" - app_server = "appServer" - sub_agent = "subAgent" - sub_agent_review = "subAgentReview" - sub_agent_compact = "subAgentCompact" - sub_agent_thread_spawn = "subAgentThreadSpawn" - sub_agent_other = "subAgentOther" - unknown = "unknown" - - -class ThreadStartParams(BaseModel): +class ModelListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - approval_policy: Annotated[AskForApproval | None, Field(alias="approvalPolicy")] = ( - None - ) - base_instructions: Annotated[str | None, Field(alias="baseInstructions")] = None - config: dict[str, Any] | None = None - cwd: str | None = None - developer_instructions: Annotated[ - str | None, Field(alias="developerInstructions") - ] = None - ephemeral: bool | None = None - model: str | None = None - model_provider: Annotated[str | None, Field(alias="modelProvider")] = None - personality: Personality | None = None - sandbox: SandboxMode | None = None - service_name: Annotated[str | None, Field(alias="serviceName")] = None - service_tier: Annotated[ServiceTier | None, Field(alias="serviceTier")] = None + id: RequestId + method: Annotated[Literal["model/list"], Field(title="Model/listRequestMethod")] + params: ModelListParams -class NotLoadedThreadStatus(BaseModel): +class ExperimentalFeatureListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["notLoaded"], Field(title="NotLoadedThreadStatusType")] + id: RequestId + method: Annotated[ + Literal["experimentalFeature/list"], + Field(title="ExperimentalFeature/listRequestMethod"), + ] + params: ExperimentalFeatureListParams -class IdleThreadStatus(BaseModel): +class McpServerOauthLoginRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["idle"], Field(title="IdleThreadStatusType")] + id: RequestId + method: Annotated[ + Literal["mcpServer/oauth/login"], + Field(title="McpServer/oauth/loginRequestMethod"), + ] + params: McpServerOauthLoginParams -class SystemErrorThreadStatus(BaseModel): +class ConfigMcpServerReloadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Annotated[Literal["systemError"], Field(title="SystemErrorThreadStatusType")] + id: RequestId + method: Annotated[ + Literal["config/mcpServer/reload"], + Field(title="Config/mcpServer/reloadRequestMethod"), + ] + params: None = None -class ActiveThreadStatus(BaseModel): +class McpServerStatusListRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - active_flags: Annotated[list[ThreadActiveFlag], Field(alias="activeFlags")] - type: Annotated[Literal["active"], Field(title="ActiveThreadStatusType")] - - -class ThreadStatus( - RootModel[ - NotLoadedThreadStatus - | IdleThreadStatus - | SystemErrorThreadStatus - | ActiveThreadStatus + id: RequestId + method: Annotated[ + Literal["mcpServerStatus/list"], + Field(title="McpServerStatus/listRequestMethod"), ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: ( - NotLoadedThreadStatus - | IdleThreadStatus - | SystemErrorThreadStatus - | ActiveThreadStatus - ) + params: ListMcpServerStatusParams -class ThreadStatusChangedNotification(BaseModel): +class WindowsSandboxSetupStartRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - status: ThreadStatus - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["windowsSandbox/setupStart"], + Field(title="WindowsSandbox/setupStartRequestMethod"), + ] + params: WindowsSandboxSetupStartParams -class ThreadUnarchiveParams(BaseModel): +class AccountLoginStartRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["account/login/start"], Field(title="Account/login/startRequestMethod") + ] + params: LoginAccountParams -class ThreadUnarchivedNotification(BaseModel): +class AccountLoginCancelRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] + id: RequestId + method: Annotated[ + Literal["account/login/cancel"], + Field(title="Account/login/cancelRequestMethod"), + ] + params: CancelLoginAccountParams -class ThreadUnsubscribeParams(BaseModel): +class AccountLogoutRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: Annotated[str, Field(alias="threadId")] - - -class ThreadUnsubscribeStatus(Enum): - not_loaded = "notLoaded" - not_subscribed = "notSubscribed" - unsubscribed = "unsubscribed" + id: RequestId + method: Annotated[ + Literal["account/logout"], Field(title="Account/logoutRequestMethod") + ] + params: None = None -class TokenUsage(BaseModel): +class AccountRateLimitsReadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cached_input_tokens: int - input_tokens: int - output_tokens: int - reasoning_output_tokens: int - total_tokens: int + id: RequestId + method: Annotated[ + Literal["account/rateLimits/read"], + Field(title="Account/rateLimits/readRequestMethod"), + ] + params: None = None -class TokenUsageBreakdown(BaseModel): +class FeedbackUploadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cached_input_tokens: Annotated[int, Field(alias="cachedInputTokens")] - input_tokens: Annotated[int, Field(alias="inputTokens")] - output_tokens: Annotated[int, Field(alias="outputTokens")] - reasoning_output_tokens: Annotated[int, Field(alias="reasoningOutputTokens")] - total_tokens: Annotated[int, Field(alias="totalTokens")] - - -class TokenUsageInfo(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - last_token_usage: TokenUsage - model_context_window: int | None = None - total_token_usage: TokenUsage - - -class Tool(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - field_meta: Annotated[Any | None, Field(alias="_meta")] = None - annotations: Any | None = None - description: str | None = None - icons: list | None = None - input_schema: Annotated[Any, Field(alias="inputSchema")] - name: str - output_schema: Annotated[Any | None, Field(alias="outputSchema")] = None - title: str | None = None - - -class TurnAbortReason(Enum): - interrupted = "interrupted" - replaced = "replaced" - review_ended = "review_ended" - - -class TurnDiffUpdatedNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - diff: str - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] - - -class TurnError(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - additional_details: Annotated[str | None, Field(alias="additionalDetails")] = None - codex_error_info: Annotated[ - CodexErrorInfo | None, Field(alias="codexErrorInfo") - ] = None - message: str - - -class TurnInterruptParams(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] - - -class TurnInterruptResponse(BaseModel): - pass - model_config = ConfigDict( - populate_by_name=True, - ) - - -class AgentMessageTurnItem(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - content: list[AgentMessageContent] - id: str - phase: Annotated[ - MessagePhase | None, - Field( - description="Optional phase metadata carried through from `ResponseItem::Message`.\n\nThis is currently used by TUI rendering to distinguish mid-turn commentary from a final answer and avoid status-indicator jitter." - ), - ] = None - type: Annotated[Literal["AgentMessage"], Field(title="AgentMessageTurnItemType")] - - -class PlanTurnItem(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: str - text: str - type: Annotated[Literal["Plan"], Field(title="PlanTurnItemType")] - - -class ReasoningTurnItem(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: str - raw_content: list[str] | None = [] - summary_text: list[str] - type: Annotated[Literal["Reasoning"], Field(title="ReasoningTurnItemType")] - - -class WebSearchTurnItem(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - action: ResponsesApiWebSearchAction - id: str - query: str - type: Annotated[Literal["WebSearch"], Field(title="WebSearchTurnItemType")] - - -class ImageGenerationTurnItem(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: str - result: str - revised_prompt: str | None = None - saved_path: str | None = None - status: str - type: Annotated[ - Literal["ImageGeneration"], Field(title="ImageGenerationTurnItemType") - ] - - -class ContextCompactionTurnItem(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: str - type: Annotated[ - Literal["ContextCompaction"], Field(title="ContextCompactionTurnItemType") - ] - - -class TurnPlanStepStatus(Enum): - pending = "pending" - in_progress = "inProgress" - completed = "completed" - - -class TurnStatus(Enum): - completed = "completed" - interrupted = "interrupted" - failed = "failed" - in_progress = "inProgress" - - -class TurnSteerResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - turn_id: Annotated[str, Field(alias="turnId")] - - -class TextUserInput(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - text: str - text_elements: Annotated[ - list[TextElement] | None, - Field( - description="UI-defined spans within `text` used to render or persist special elements." - ), - ] = [] - type: Annotated[Literal["text"], Field(title="TextUserInputType")] - - -class ImageUserInput(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - type: Annotated[Literal["image"], Field(title="ImageUserInputType")] - url: str - - -class LocalImageUserInput(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - path: str - type: Annotated[Literal["localImage"], Field(title="LocalImageUserInputType")] - - -class SkillUserInput(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - name: str - path: str - type: Annotated[Literal["skill"], Field(title="SkillUserInputType")] - - -class MentionUserInput(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - name: str - path: str - type: Annotated[Literal["mention"], Field(title="MentionUserInputType")] - - -class UserInput( - RootModel[ - TextUserInput - | ImageUserInput - | LocalImageUserInput - | SkillUserInput - | MentionUserInput - ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: ( - TextUserInput - | ImageUserInput - | LocalImageUserInput - | SkillUserInput - | MentionUserInput - ) - - -class Verbosity(Enum): - low = "low" - medium = "medium" - high = "high" - - -class SearchWebSearchAction(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - queries: list[str] | None = None - query: str | None = None - type: Annotated[Literal["search"], Field(title="SearchWebSearchActionType")] - - -class OpenPageWebSearchAction(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - type: Annotated[Literal["openPage"], Field(title="OpenPageWebSearchActionType")] - url: str | None = None - - -class FindInPageWebSearchAction(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - pattern: str | None = None - type: Annotated[Literal["findInPage"], Field(title="FindInPageWebSearchActionType")] - url: str | None = None - - -class OtherWebSearchAction(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - type: Annotated[Literal["other"], Field(title="OtherWebSearchActionType")] - - -class WebSearchAction( - RootModel[ - SearchWebSearchAction - | OpenPageWebSearchAction - | FindInPageWebSearchAction - | OtherWebSearchAction - ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: ( - SearchWebSearchAction - | OpenPageWebSearchAction - | FindInPageWebSearchAction - | OtherWebSearchAction - ) - - -class WebSearchContextSize(Enum): - low = "low" - medium = "medium" - high = "high" - - -class WebSearchLocation(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - city: str | None = None - country: str | None = None - region: str | None = None - timezone: str | None = None - - -class WebSearchMode(Enum): - disabled = "disabled" - cached = "cached" - live = "live" - - -class WebSearchToolConfig(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - allowed_domains: list[str] | None = None - context_size: WebSearchContextSize | None = None - location: WebSearchLocation | None = None - - -class WindowsSandboxSetupMode(Enum): - elevated = "elevated" - unelevated = "unelevated" - - -class WindowsSandboxSetupStartParams(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - cwd: AbsolutePathBuf | None = None - mode: WindowsSandboxSetupMode - - -class WindowsSandboxSetupStartResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - started: bool - - -class WindowsWorldWritableWarningNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - extra_count: Annotated[int, Field(alias="extraCount", ge=0)] - failed_scan: Annotated[bool, Field(alias="failedScan")] - sample_paths: Annotated[list[str], Field(alias="samplePaths")] - - -class WriteStatus(Enum): - ok = "ok" - ok_overridden = "okOverridden" - - -class ChatgptAccount(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - email: str - plan_type: Annotated[PlanType, Field(alias="planType")] - type: Annotated[Literal["chatgpt"], Field(title="ChatgptAccountType")] - - -class Account(RootModel[ApiKeyAccount | ChatgptAccount]): - model_config = ConfigDict( - populate_by_name=True, - ) - root: ApiKeyAccount | ChatgptAccount - - -class AccountUpdatedNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - auth_mode: Annotated[AuthMode | None, Field(alias="authMode")] = None - plan_type: Annotated[PlanType | None, Field(alias="planType")] = None - - -class AppConfig(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - default_tools_approval_mode: AppToolApproval | None = None - default_tools_enabled: bool | None = None - destructive_enabled: bool | None = None - enabled: bool | None = True - open_world_enabled: bool | None = None - tools: AppToolsConfig | None = None - - -class AppMetadata(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - categories: list[str] | None = None - developer: str | None = None - first_party_requires_install: Annotated[ - bool | None, Field(alias="firstPartyRequiresInstall") - ] = None - first_party_type: Annotated[str | None, Field(alias="firstPartyType")] = None - review: AppReview | None = None - screenshots: list[AppScreenshot] | None = None - seo_description: Annotated[str | None, Field(alias="seoDescription")] = None - show_in_composer_when_unlinked: Annotated[ - bool | None, Field(alias="showInComposerWhenUnlinked") - ] = None - sub_categories: Annotated[list[str] | None, Field(alias="subCategories")] = None - version: str | None = None - version_id: Annotated[str | None, Field(alias="versionId")] = None - version_notes: Annotated[str | None, Field(alias="versionNotes")] = None - - -class AppsConfig(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - field_default: Annotated[AppsDefaultConfig | None, Field(alias="_default")] = None - - -class CancelLoginAccountResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - status: CancelLoginAccountStatus - - -class InitializeRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["initialize"], Field(title="InitializeRequestMethod")] - params: InitializeParams - - -class ThreadStartRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["thread/start"], Field(title="Thread/startRequestMethod")] - params: ThreadStartParams - - -class ThreadResumeRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/resume"], Field(title="Thread/resumeRequestMethod") - ] - params: ThreadResumeParams - - -class ThreadForkRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["thread/fork"], Field(title="Thread/forkRequestMethod")] - params: ThreadForkParams - - -class ThreadArchiveRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/archive"], Field(title="Thread/archiveRequestMethod") - ] - params: ThreadArchiveParams - - -class ThreadUnsubscribeRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/unsubscribe"], Field(title="Thread/unsubscribeRequestMethod") - ] - params: ThreadUnsubscribeParams - - -class ThreadNameSetRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/name/set"], Field(title="Thread/name/setRequestMethod") - ] - params: ThreadSetNameParams - - -class ThreadMetadataUpdateRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/metadata/update"], - Field(title="Thread/metadata/updateRequestMethod"), - ] - params: ThreadMetadataUpdateParams - - -class ThreadUnarchiveRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/unarchive"], Field(title="Thread/unarchiveRequestMethod") - ] - params: ThreadUnarchiveParams - - -class ThreadCompactStartRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/compact/start"], - Field(title="Thread/compact/startRequestMethod"), - ] - params: ThreadCompactStartParams - - -class ThreadRollbackRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/rollback"], Field(title="Thread/rollbackRequestMethod") - ] - params: ThreadRollbackParams - - -class ThreadLoadedListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["thread/loaded/list"], Field(title="Thread/loaded/listRequestMethod") - ] - params: ThreadLoadedListParams - - -class ThreadReadRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["thread/read"], Field(title="Thread/readRequestMethod")] - params: ThreadReadParams - - -class SkillsListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["skills/list"], Field(title="Skills/listRequestMethod")] - params: SkillsListParams - - -class PluginListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["plugin/list"], Field(title="Plugin/listRequestMethod")] - params: PluginListParams - - -class SkillsRemoteListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["skills/remote/list"], Field(title="Skills/remote/listRequestMethod") - ] - params: SkillsRemoteReadParams - - -class SkillsRemoteExportRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["skills/remote/export"], - Field(title="Skills/remote/exportRequestMethod"), - ] - params: SkillsRemoteWriteParams - - -class AppListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["app/list"], Field(title="App/listRequestMethod")] - params: AppsListParams - - -class SkillsConfigWriteRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["skills/config/write"], Field(title="Skills/config/writeRequestMethod") - ] - params: SkillsConfigWriteParams - - -class PluginInstallRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["plugin/install"], Field(title="Plugin/installRequestMethod") - ] - params: PluginInstallParams - - -class PluginUninstallRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["plugin/uninstall"], Field(title="Plugin/uninstallRequestMethod") - ] - params: PluginUninstallParams - - -class TurnInterruptRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["turn/interrupt"], Field(title="Turn/interruptRequestMethod") - ] - params: TurnInterruptParams - - -class ModelListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["model/list"], Field(title="Model/listRequestMethod")] - params: ModelListParams - - -class ExperimentalFeatureListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["experimentalFeature/list"], - Field(title="ExperimentalFeature/listRequestMethod"), - ] - params: ExperimentalFeatureListParams - - -class McpServerOauthLoginRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["mcpServer/oauth/login"], - Field(title="McpServer/oauth/loginRequestMethod"), - ] - params: McpServerOauthLoginParams - - -class ConfigMcpServerReloadRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["config/mcpServer/reload"], - Field(title="Config/mcpServer/reloadRequestMethod"), - ] - params: None = None - - -class McpServerStatusListRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["mcpServerStatus/list"], - Field(title="McpServerStatus/listRequestMethod"), - ] - params: ListMcpServerStatusParams - - -class WindowsSandboxSetupStartRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["windowsSandbox/setupStart"], - Field(title="WindowsSandbox/setupStartRequestMethod"), - ] - params: WindowsSandboxSetupStartParams - - -class AccountLoginStartRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["account/login/start"], Field(title="Account/login/startRequestMethod") - ] - params: LoginAccountParams - - -class AccountLoginCancelRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["account/login/cancel"], - Field(title="Account/login/cancelRequestMethod"), - ] - params: CancelLoginAccountParams - - -class AccountLogoutRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["account/logout"], Field(title="Account/logoutRequestMethod") - ] - params: None = None - - -class AccountRateLimitsReadRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["account/rateLimits/read"], - Field(title="Account/rateLimits/readRequestMethod"), - ] - params: None = None - - -class FeedbackUploadRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["feedback/upload"], Field(title="Feedback/uploadRequestMethod") - ] - params: FeedbackUploadParams - - -class CommandExecWriteRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["command/exec/write"], Field(title="Command/exec/writeRequestMethod") - ] - params: CommandExecWriteParams - - -class CommandExecTerminateRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["command/exec/terminate"], - Field(title="Command/exec/terminateRequestMethod"), - ] - params: CommandExecTerminateParams - - -class ConfigReadRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["config/read"], Field(title="Config/readRequestMethod")] - params: ConfigReadParams - - -class ExternalAgentConfigDetectRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["externalAgentConfig/detect"], - Field(title="ExternalAgentConfig/detectRequestMethod"), - ] - params: ExternalAgentConfigDetectParams - - -class ConfigRequirementsReadRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["configRequirements/read"], - Field(title="ConfigRequirements/readRequestMethod"), - ] - params: None = None - - -class AccountReadRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[Literal["account/read"], Field(title="Account/readRequestMethod")] - params: GetAccountParams - - -class FuzzyFileSearchRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - method: Annotated[ - Literal["fuzzyFileSearch"], Field(title="FuzzyFileSearchRequestMethod") - ] - params: FuzzyFileSearchParams - - -class CollabAgentRef(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - agent_nickname: Annotated[ - str | None, - Field( - description="Optional nickname assigned to an AgentControl-spawned sub-agent." - ), - ] = None - agent_role: Annotated[ - str | None, - Field( - description="Optional role (agent_role) assigned to an AgentControl-spawned sub-agent." - ), - ] = None - thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver/new agent.") - ] - - -class CollabAgentState(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - message: str | None = None - status: CollabAgentStatus - - -class CollabAgentStatusEntry(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - agent_nickname: Annotated[ - str | None, - Field( - description="Optional nickname assigned to an AgentControl-spawned sub-agent." - ), - ] = None - agent_role: Annotated[ - str | None, - Field( - description="Optional role (agent_role) assigned to an AgentControl-spawned sub-agent." - ), - ] = None - status: Annotated[AgentStatus, Field(description="Last known status of the agent.")] - thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver/new agent.") - ] - - -class CollaborationMode(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - mode: ModeKind - settings: Settings - - -class CollaborationModeMask(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - mode: ModeKind | None = None - model: str | None = None - name: str - reasoning_effort: ReasoningEffort | None = None - - -class CommandExecOutputDeltaNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - cap_reached: Annotated[ - bool, - Field( - alias="capReached", - description="`true` on the final streamed chunk for a stream when `outputBytesCap` truncated later output on that stream.", - ), - ] - delta_base64: Annotated[ - str, Field(alias="deltaBase64", description="Base64-encoded output bytes.") - ] - process_id: Annotated[ - str, - Field( - alias="processId", - description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", - ), - ] - stream: Annotated[ - CommandExecOutputStream, Field(description="Output stream for this chunk.") + id: RequestId + method: Annotated[ + Literal["feedback/upload"], Field(title="Feedback/uploadRequestMethod") ] + params: FeedbackUploadParams -class CommandExecParams(BaseModel): +class CommandExecWriteRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - command: Annotated[ - list[str], Field(description="Command argv vector. Empty arrays are rejected.") + id: RequestId + method: Annotated[ + Literal["command/exec/write"], Field(title="Command/exec/writeRequestMethod") ] - cwd: Annotated[ - str | None, - Field(description="Optional working directory. Defaults to the server cwd."), - ] = None - disable_output_cap: Annotated[ - bool | None, - Field( - alias="disableOutputCap", - description="Disable stdout/stderr capture truncation for this request.\n\nCannot be combined with `outputBytesCap`.", - ), - ] = None - disable_timeout: Annotated[ - bool | None, - Field( - alias="disableTimeout", - description="Disable the timeout entirely for this request.\n\nCannot be combined with `timeoutMs`.", - ), - ] = None - env: Annotated[ - dict[str, Any] | None, - Field( - description="Optional environment overrides merged into the server-computed environment.\n\nMatching names override inherited values. Set a key to `null` to unset an inherited variable." - ), - ] = None - output_bytes_cap: Annotated[ - int | None, - Field( - alias="outputBytesCap", - description="Optional per-stream stdout/stderr capture cap in bytes.\n\nWhen omitted, the server default applies. Cannot be combined with `disableOutputCap`.", - ge=0, - ), - ] = None - process_id: Annotated[ - str | None, - Field( - alias="processId", - description="Optional client-supplied, connection-scoped process id.\n\nRequired for `tty`, `streamStdin`, `streamStdoutStderr`, and follow-up `command/exec/write`, `command/exec/resize`, and `command/exec/terminate` calls. When omitted, buffered execution gets an internal id that is not exposed to the client.", - ), - ] = None - sandbox_policy: Annotated[ - SandboxPolicy | None, - Field( - alias="sandboxPolicy", - description="Optional sandbox policy for this command.\n\nUses the same shape as thread/turn execution sandbox configuration and defaults to the user's configured policy when omitted.", - ), - ] = None - size: Annotated[ - CommandExecTerminalSize | None, - Field( - description="Optional initial PTY size in character cells. Only valid when `tty` is true." - ), - ] = None - stream_stdin: Annotated[ - bool | None, - Field( - alias="streamStdin", - description="Allow follow-up `command/exec/write` requests to write stdin bytes.\n\nRequires a client-supplied `processId`.", - ), - ] = None - stream_stdout_stderr: Annotated[ - bool | None, - Field( - alias="streamStdoutStderr", - description="Stream stdout/stderr via `command/exec/outputDelta` notifications.\n\nStreamed bytes are not duplicated into the final response and require a client-supplied `processId`.", - ), - ] = None - timeout_ms: Annotated[ - int | None, - Field( - alias="timeoutMs", - description="Optional timeout in milliseconds.\n\nWhen omitted, the server default applies. Cannot be combined with `disableTimeout`.", - ), - ] = None - tty: Annotated[ - bool | None, - Field( - description="Enable PTY mode.\n\nThis implies `streamStdin` and `streamStdoutStderr`." - ), - ] = None + params: CommandExecWriteParams -class CommandExecResizeParams(BaseModel): +class CommandExecTerminateRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - process_id: Annotated[ - str, - Field( - alias="processId", - description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", - ), - ] - size: Annotated[ - CommandExecTerminalSize, Field(description="New PTY size in character cells.") + id: RequestId + method: Annotated[ + Literal["command/exec/terminate"], + Field(title="Command/exec/terminateRequestMethod"), ] + params: CommandExecTerminateParams -class ConfigEdit(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - key_path: Annotated[str, Field(alias="keyPath")] - merge_strategy: Annotated[MergeStrategy, Field(alias="mergeStrategy")] - value: Any - - -class ConfigLayer(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - config: Any - disabled_reason: Annotated[str | None, Field(alias="disabledReason")] = None - name: ConfigLayerSource - version: str - - -class ConfigLayerMetadata(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - name: ConfigLayerSource - version: str - - -class ConfigRequirements(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - allowed_approval_policies: Annotated[ - list[AskForApproval] | None, Field(alias="allowedApprovalPolicies") - ] = None - allowed_sandbox_modes: Annotated[ - list[SandboxMode] | None, Field(alias="allowedSandboxModes") - ] = None - allowed_web_search_modes: Annotated[ - list[WebSearchMode] | None, Field(alias="allowedWebSearchModes") - ] = None - enforce_residency: Annotated[ - ResidencyRequirement | None, Field(alias="enforceResidency") - ] = None - feature_requirements: Annotated[ - dict[str, Any] | None, Field(alias="featureRequirements") - ] = None - - -class ConfigRequirementsReadResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - requirements: Annotated[ - ConfigRequirements | None, - Field( - description="Null if no requirements are configured (e.g. no requirements.toml/MDM entries)." - ), - ] = None - - -class ConfigValueWriteParams(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - expected_version: Annotated[str | None, Field(alias="expectedVersion")] = None - file_path: Annotated[ - str | None, - Field( - alias="filePath", - description="Path to the config file to write; defaults to the user's `config.toml` when omitted.", - ), - ] = None - key_path: Annotated[str, Field(alias="keyPath")] - merge_strategy: Annotated[MergeStrategy, Field(alias="mergeStrategy")] - value: Any - - -class ConfigWarningNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - details: Annotated[ - str | None, Field(description="Optional extra guidance or error details.") - ] = None - path: Annotated[ - str | None, - Field( - description="Optional path to the config file that triggered the warning." - ), - ] = None - range: Annotated[ - TextRange | None, - Field( - description="Optional range for the error location inside the config file." - ), - ] = None - summary: Annotated[str, Field(description="Concise summary of the warning.")] - - -class ErrorNotification(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - error: TurnError - thread_id: Annotated[str, Field(alias="threadId")] - turn_id: Annotated[str, Field(alias="turnId")] - will_retry: Annotated[bool, Field(alias="willRetry")] - - -class ModelRerouteEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - from_model: str - reason: ModelRerouteReason - to_model: str - type: Annotated[Literal["model_reroute"], Field(title="ModelRerouteEventMsgType")] - - -class TaskStartedEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - collaboration_mode_kind: ModeKind | None = "default" - model_context_window: int | None = None - turn_id: str - type: Annotated[Literal["task_started"], Field(title="TaskStartedEventMsgType")] - - -class AgentMessageEventMsg(BaseModel): +class ConfigReadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - message: str - phase: MessagePhase | None = None - type: Annotated[Literal["agent_message"], Field(title="AgentMessageEventMsgType")] + id: RequestId + method: Annotated[Literal["config/read"], Field(title="Config/readRequestMethod")] + params: ConfigReadParams -class UserMessageEventMsg(BaseModel): +class ExternalAgentConfigDetectRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - images: Annotated[ - list[str] | None, - Field( - description="Image URLs sourced from `UserInput::Image`. These are safe to replay in legacy UI history events and correspond to images sent to the model." - ), - ] = None - local_images: Annotated[ - list[str] | None, - Field( - description="Local file paths sourced from `UserInput::LocalImage`. These are kept so the UI can reattach images when editing history, and should not be sent to the model or treated as API-ready URLs." - ), - ] = [] - message: str - text_elements: Annotated[ - list[TextElement] | None, - Field( - description="UI-defined spans within `message` used to render or persist special elements." - ), - ] = [] - type: Annotated[Literal["user_message"], Field(title="UserMessageEventMsgType")] + id: RequestId + method: Annotated[ + Literal["externalAgentConfig/detect"], + Field(title="ExternalAgentConfig/detectRequestMethod"), + ] + params: ExternalAgentConfigDetectParams -class ThreadNameUpdatedEventMsg(BaseModel): +class ConfigRequirementsReadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - thread_id: ThreadId - thread_name: str | None = None - type: Annotated[ - Literal["thread_name_updated"], Field(title="ThreadNameUpdatedEventMsgType") + id: RequestId + method: Annotated[ + Literal["configRequirements/read"], + Field(title="ConfigRequirements/readRequestMethod"), ] + params: None = None -class McpStartupUpdateEventMsg(BaseModel): +class AccountReadRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - server: Annotated[str, Field(description="Server name being started.")] - status: Annotated[McpStartupStatus, Field(description="Current startup status.")] - type: Annotated[ - Literal["mcp_startup_update"], Field(title="McpStartupUpdateEventMsgType") - ] + id: RequestId + method: Annotated[Literal["account/read"], Field(title="Account/readRequestMethod")] + params: GetAccountParams -class McpStartupCompleteEventMsg(BaseModel): +class FuzzyFileSearchRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - cancelled: list[str] - failed: list[McpStartupFailure] - ready: list[str] - type: Annotated[ - Literal["mcp_startup_complete"], Field(title="McpStartupCompleteEventMsgType") + id: RequestId + method: Annotated[ + Literal["fuzzyFileSearch"], Field(title="FuzzyFileSearchRequestMethod") ] + params: FuzzyFileSearchParams -class McpToolCallBeginEventMsg(BaseModel): +class CollabAgentState(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[ - str, - Field( - description="Identifier so this can be paired with the McpToolCallEnd event." - ), - ] - invocation: McpInvocation - type: Annotated[ - Literal["mcp_tool_call_begin"], Field(title="McpToolCallBeginEventMsgType") - ] + message: str | None = None + status: CollabAgentStatus -class McpToolCallEndEventMsg(BaseModel): +class CollaborationMode(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[ - str, - Field( - description="Identifier for the corresponding McpToolCallBegin that finished." - ), - ] - duration: Duration - invocation: McpInvocation - result: Annotated[ - ResultOfCallToolResultOrString, - Field(description="Result of the tool call. Note this could be an error."), - ] - type: Annotated[ - Literal["mcp_tool_call_end"], Field(title="McpToolCallEndEventMsgType") - ] + mode: ModeKind + settings: Settings -class WebSearchEndEventMsg(BaseModel): +class CollaborationModeMask(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - action: ResponsesApiWebSearchAction - call_id: str - query: str - type: Annotated[Literal["web_search_end"], Field(title="WebSearchEndEventMsgType")] + mode: ModeKind | None = None + model: str | None = None + name: str + reasoning_effort: ReasoningEffort | None = None -class ExecCommandBeginEventMsg(BaseModel): +class CommandExecOutputDeltaNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[ - str, + cap_reached: Annotated[ + bool, Field( - description="Identifier so this can be paired with the ExecCommandEnd event." + alias="capReached", + description="`true` on the final streamed chunk for a stream when `outputBytesCap` truncated later output on that stream.", ), ] - command: Annotated[list[str], Field(description="The command to be executed.")] - cwd: Annotated[ - str, - Field( - description="The command's working directory if not the default cwd for the agent." - ), + delta_base64: Annotated[ + str, Field(alias="deltaBase64", description="Base64-encoded output bytes.") ] - interaction_input: Annotated[ - str | None, - Field( - description="Raw input sent to a unified exec session (if this is an interaction event)." - ), - ] = None - parsed_cmd: list[ParsedCommand] process_id: Annotated[ - str | None, - Field( - description="Identifier for the underlying PTY process (when available)." - ), - ] = None - source: Annotated[ - ExecCommandSource | None, - Field( - description="Where the command originated. Defaults to Agent for backward compatibility." - ), - ] = "agent" - turn_id: Annotated[str, Field(description="Turn ID that this command belongs to.")] - type: Annotated[ - Literal["exec_command_begin"], Field(title="ExecCommandBeginEventMsgType") - ] - - -class ExecCommandOutputDeltaEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[ str, Field( - description="Identifier for the ExecCommandBegin that produced this chunk." + alias="processId", + description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", ), ] - chunk: Annotated[ - str, Field(description="Raw bytes from the stream (may not be valid UTF-8).") - ] stream: Annotated[ - ExecOutputStream, Field(description="Which stream produced this chunk.") - ] - type: Annotated[ - Literal["exec_command_output_delta"], - Field(title="ExecCommandOutputDeltaEventMsgType"), + CommandExecOutputStream, Field(description="Output stream for this chunk.") ] -class ExecCommandEndEventMsg(BaseModel): +class CommandExecParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - aggregated_output: Annotated[ - str | None, Field(description="Captured aggregated output") - ] = "" - call_id: Annotated[ - str, Field(description="Identifier for the ExecCommandBegin that finished.") + command: Annotated[ + list[str], Field(description="Command argv vector. Empty arrays are rejected.") ] - command: Annotated[list[str], Field(description="The command that was executed.")] cwd: Annotated[ - str, - Field( - description="The command's working directory if not the default cwd for the agent." - ), - ] - duration: Annotated[ - Duration, Field(description="The duration of the command execution.") - ] - exit_code: Annotated[int, Field(description="The command's exit code.")] - formatted_output: Annotated[ - str, - Field(description="Formatted output from the command, as seen by the model."), - ] - interaction_input: Annotated[ str | None, - Field( - description="Raw input sent to a unified exec session (if this is an interaction event)." - ), + Field(description="Optional working directory. Defaults to the server cwd."), ] = None - parsed_cmd: list[ParsedCommand] - process_id: Annotated[ - str | None, + disable_output_cap: Annotated[ + bool | None, Field( - description="Identifier for the underlying PTY process (when available)." + alias="disableOutputCap", + description="Disable stdout/stderr capture truncation for this request.\n\nCannot be combined with `outputBytesCap`.", ), ] = None - source: Annotated[ - ExecCommandSource | None, - Field( - description="Where the command originated. Defaults to Agent for backward compatibility." - ), - ] = "agent" - status: Annotated[ - ExecCommandStatus, - Field(description="Completion status for this command execution."), - ] - stderr: Annotated[str, Field(description="Captured stderr")] - stdout: Annotated[str, Field(description="Captured stdout")] - turn_id: Annotated[str, Field(description="Turn ID that this command belongs to.")] - type: Annotated[ - Literal["exec_command_end"], Field(title="ExecCommandEndEventMsgType") - ] - - -class RequestPermissionsEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[ - str, - Field( - description="Responses API call id for the associated tool call, if available." - ), - ] - permissions: PermissionProfile - reason: str | None = None - turn_id: Annotated[ - str | None, + disable_timeout: Annotated[ + bool | None, Field( - description="Turn ID that this request belongs to. Uses `#[serde(default)]` for backwards compatibility." + alias="disableTimeout", + description="Disable the timeout entirely for this request.\n\nCannot be combined with `timeoutMs`.", ), - ] = "" - type: Annotated[ - Literal["request_permissions"], Field(title="RequestPermissionsEventMsgType") - ] - - -class ElicitationRequestEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - id: RequestId - request: ElicitationRequest - server_name: str - turn_id: Annotated[ - str | None, - Field(description="Turn ID that this elicitation belongs to, when known."), ] = None - type: Annotated[ - Literal["elicitation_request"], Field(title="ElicitationRequestEventMsgType") - ] - - -class ApplyPatchApprovalRequestEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[ - str, - Field( - description="Responses API call id for the associated patch apply call, if available." - ), - ] - changes: dict[str, FileChange] - grant_root: Annotated[ - str | None, + env: Annotated[ + dict[str, Any] | None, Field( - description="When set, the agent is asking the user to allow writes under this root for the remainder of the session." + description="Optional environment overrides merged into the server-computed environment.\n\nMatching names override inherited values. Set a key to `null` to unset an inherited variable." ), ] = None - reason: Annotated[ - str | None, + output_bytes_cap: Annotated[ + int | None, Field( - description="Optional explanatory reason (e.g. request for extra write access)." + alias="outputBytesCap", + description="Optional per-stream stdout/stderr capture cap in bytes.\n\nWhen omitted, the server default applies. Cannot be combined with `disableOutputCap`.", + ge=0, ), ] = None - turn_id: Annotated[ + process_id: Annotated[ str | None, Field( - description="Turn ID that this patch belongs to. Uses `#[serde(default)]` for backwards compatibility with older senders." + alias="processId", + description="Optional client-supplied, connection-scoped process id.\n\nRequired for `tty`, `streamStdin`, `streamStdoutStderr`, and follow-up `command/exec/write`, `command/exec/resize`, and `command/exec/terminate` calls. When omitted, buffered execution gets an internal id that is not exposed to the client.", ), - ] = "" - type: Annotated[ - Literal["apply_patch_approval_request"], - Field(title="ApplyPatchApprovalRequestEventMsgType"), - ] - - -class PatchApplyBeginEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - auto_approved: Annotated[ - bool, + ] = None + sandbox_policy: Annotated[ + SandboxPolicy | None, Field( - description="If true, there was no ApplyPatchApprovalRequest for this patch." + alias="sandboxPolicy", + description="Optional sandbox policy for this command.\n\nUses the same shape as thread/turn execution sandbox configuration and defaults to the user's configured policy when omitted.", ), - ] - call_id: Annotated[ - str, + ] = None + size: Annotated[ + CommandExecTerminalSize | None, Field( - description="Identifier so this can be paired with the PatchApplyEnd event." + description="Optional initial PTY size in character cells. Only valid when `tty` is true." ), - ] - changes: Annotated[ - dict[str, FileChange], Field(description="The changes to be applied.") - ] - turn_id: Annotated[ - str | None, + ] = None + stream_stdin: Annotated[ + bool | None, Field( - description="Turn ID that this patch belongs to. Uses `#[serde(default)]` for backwards compatibility." + alias="streamStdin", + description="Allow follow-up `command/exec/write` requests to write stdin bytes.\n\nRequires a client-supplied `processId`.", ), - ] = "" - type: Annotated[ - Literal["patch_apply_begin"], Field(title="PatchApplyBeginEventMsgType") - ] - - -class PatchApplyEndEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[ - str, Field(description="Identifier for the PatchApplyBegin that finished.") - ] - changes: Annotated[ - dict[str, FileChange] | None, + ] = None + stream_stdout_stderr: Annotated[ + bool | None, Field( - description="The changes that were applied (mirrors PatchApplyBeginEvent::changes)." + alias="streamStdoutStderr", + description="Stream stdout/stderr via `command/exec/outputDelta` notifications.\n\nStreamed bytes are not duplicated into the final response and require a client-supplied `processId`.", ), - ] = {} - status: Annotated[ - PatchApplyStatus, - Field(description="Completion status for this patch application."), - ] - stderr: Annotated[ - str, Field(description="Captured stderr (parser errors, IO failures, etc.).") - ] - stdout: Annotated[ - str, Field(description="Captured stdout (summary printed by apply_patch).") - ] - success: Annotated[ - bool, Field(description="Whether the patch was applied successfully.") - ] - turn_id: Annotated[ - str | None, + ] = None + timeout_ms: Annotated[ + int | None, Field( - description="Turn ID that this patch belongs to. Uses `#[serde(default)]` for backwards compatibility." + alias="timeoutMs", + description="Optional timeout in milliseconds.\n\nWhen omitted, the server default applies. Cannot be combined with `disableTimeout`.", ), - ] = "" - type: Annotated[ - Literal["patch_apply_end"], Field(title="PatchApplyEndEventMsgType") - ] - - -class GetHistoryEntryResponseEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - entry: Annotated[ - HistoryEntry | None, + ] = None + tty: Annotated[ + bool | None, Field( - description="The entry at the requested offset, if available and parseable." + description="Enable PTY mode.\n\nThis implies `streamStdin` and `streamStdoutStderr`." ), ] = None - log_id: Annotated[int, Field(ge=0)] - offset: Annotated[int, Field(ge=0)] - type: Annotated[ - Literal["get_history_entry_response"], - Field(title="GetHistoryEntryResponseEventMsgType"), - ] -class McpListToolsResponseEventMsg(BaseModel): +class CommandExecResizeParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - auth_statuses: Annotated[ - dict[str, McpAuthStatus], - Field(description="Authentication status for each configured MCP server."), - ] - resource_templates: Annotated[ - dict[str, list[ResourceTemplate]], - Field(description="Known resource templates grouped by server name."), - ] - resources: Annotated[ - dict[str, list[Resource]], - Field(description="Known resources grouped by server name."), - ] - tools: Annotated[ - dict[str, Tool], - Field(description="Fully qualified tool name -> tool definition."), - ] - type: Annotated[ - Literal["mcp_list_tools_response"], - Field(title="McpListToolsResponseEventMsgType"), + process_id: Annotated[ + str, + Field( + alias="processId", + description="Client-supplied, connection-scoped `processId` from the original `command/exec` request.", + ), ] - - -class ListRemoteSkillsResponseEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - skills: list[RemoteSkillSummary] - type: Annotated[ - Literal["list_remote_skills_response"], - Field(title="ListRemoteSkillsResponseEventMsgType"), + size: Annotated[ + CommandExecTerminalSize, Field(description="New PTY size in character cells.") ] -class TurnAbortedEventMsg(BaseModel): +class ConfigEdit(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - reason: TurnAbortReason - turn_id: str | None = None - type: Annotated[Literal["turn_aborted"], Field(title="TurnAbortedEventMsgType")] + key_path: Annotated[str, Field(alias="keyPath")] + merge_strategy: Annotated[MergeStrategy, Field(alias="mergeStrategy")] + value: Any -class EnteredReviewModeEventMsg(BaseModel): +class ConfigLayer(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - target: ReviewTarget - type: Annotated[ - Literal["entered_review_mode"], Field(title="EnteredReviewModeEventMsgType") - ] - user_facing_hint: str | None = None + config: Any + disabled_reason: Annotated[str | None, Field(alias="disabledReason")] = None + name: ConfigLayerSource + version: str -class CollabAgentSpawnBeginEventMsg(BaseModel): +class ConfigLayerMetadata(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - model: str - prompt: Annotated[ - str, - Field( - description="Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning." - ), - ] - reasoning_effort: ReasoningEffort - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - type: Annotated[ - Literal["collab_agent_spawn_begin"], - Field(title="CollabAgentSpawnBeginEventMsgType"), - ] + name: ConfigLayerSource + version: str -class CollabAgentSpawnEndEventMsg(BaseModel): +class ConfigRequirements(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - model: Annotated[str, Field(description="Model requested for the spawned agent.")] - new_agent_nickname: Annotated[ - str | None, Field(description="Optional nickname assigned to the new agent.") + allowed_approval_policies: Annotated[ + list[AskForApproval] | None, Field(alias="allowedApprovalPolicies") ] = None - new_agent_role: Annotated[ - str | None, Field(description="Optional role assigned to the new agent.") + allowed_sandbox_modes: Annotated[ + list[SandboxMode] | None, Field(alias="allowedSandboxModes") ] = None - new_thread_id: Annotated[ - ThreadId | None, - Field(description="Thread ID of the newly spawned agent, if it was created."), + allowed_web_search_modes: Annotated[ + list[WebSearchMode] | None, Field(alias="allowedWebSearchModes") + ] = None + enforce_residency: Annotated[ + ResidencyRequirement | None, Field(alias="enforceResidency") + ] = None + feature_requirements: Annotated[ + dict[str, Any] | None, Field(alias="featureRequirements") ] = None - prompt: Annotated[ - str, - Field( - description="Initial prompt sent to the agent. Can be empty to prevent CoT leaking at the beginning." - ), - ] - reasoning_effort: Annotated[ - ReasoningEffort, - Field(description="Reasoning effort requested for the spawned agent."), - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - status: Annotated[ - AgentStatus, - Field( - description="Last known status of the new agent reported to the sender agent." - ), - ] - type: Annotated[ - Literal["collab_agent_spawn_end"], - Field(title="CollabAgentSpawnEndEventMsgType"), - ] -class CollabAgentInteractionBeginEventMsg(BaseModel): +class ConfigRequirementsReadResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - prompt: Annotated[ - str, + requirements: Annotated[ + ConfigRequirements | None, Field( - description="Prompt sent from the sender to the receiver. Can be empty to prevent CoT leaking at the beginning." + description="Null if no requirements are configured (e.g. no requirements.toml/MDM entries)." ), - ] - receiver_thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver.") - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - type: Annotated[ - Literal["collab_agent_interaction_begin"], - Field(title="CollabAgentInteractionBeginEventMsgType"), - ] + ] = None -class CollabAgentInteractionEndEventMsg(BaseModel): +class ConfigValueWriteParams(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - prompt: Annotated[ - str, - Field( - description="Prompt sent from the sender to the receiver. Can be empty to prevent CoT leaking at the beginning." - ), - ] - receiver_agent_nickname: Annotated[ + expected_version: Annotated[str | None, Field(alias="expectedVersion")] = None + file_path: Annotated[ str | None, - Field(description="Optional nickname assigned to the receiver agent."), - ] = None - receiver_agent_role: Annotated[ - str | None, Field(description="Optional role assigned to the receiver agent.") - ] = None - receiver_thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver.") - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - status: Annotated[ - AgentStatus, Field( - description="Last known status of the receiver agent reported to the sender agent." + alias="filePath", + description="Path to the config file to write; defaults to the user's `config.toml` when omitted.", ), - ] - type: Annotated[ - Literal["collab_agent_interaction_end"], - Field(title="CollabAgentInteractionEndEventMsgType"), - ] - - -class CollabWaitingBeginEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[str, Field(description="ID of the waiting call.")] - receiver_agents: Annotated[ - list[CollabAgentRef] | None, - Field(description="Optional nicknames/roles for receivers."), ] = None - receiver_thread_ids: Annotated[ - list[ThreadId], Field(description="Thread ID of the receivers.") - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - type: Annotated[ - Literal["collab_waiting_begin"], Field(title="CollabWaitingBeginEventMsgType") - ] + key_path: Annotated[str, Field(alias="keyPath")] + merge_strategy: Annotated[MergeStrategy, Field(alias="mergeStrategy")] + value: Any -class CollabWaitingEndEventMsg(BaseModel): +class ConfigWarningNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - agent_statuses: Annotated[ - list[CollabAgentStatusEntry] | None, - Field(description="Optional receiver metadata paired with final statuses."), + details: Annotated[ + str | None, Field(description="Optional extra guidance or error details.") ] = None - call_id: Annotated[str, Field(description="ID of the waiting call.")] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - statuses: Annotated[ - dict[str, AgentStatus], + path: Annotated[ + str | None, Field( - description="Last known status of the receiver agents reported to the sender agent." + description="Optional path to the config file that triggered the warning." ), - ] - type: Annotated[ - Literal["collab_waiting_end"], Field(title="CollabWaitingEndEventMsgType") - ] - - -class CollabCloseBeginEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - receiver_thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver.") - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - type: Annotated[ - Literal["collab_close_begin"], Field(title="CollabCloseBeginEventMsgType") - ] - - -class CollabCloseEndEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - receiver_agent_nickname: Annotated[ - str | None, - Field(description="Optional nickname assigned to the receiver agent."), - ] = None - receiver_agent_role: Annotated[ - str | None, Field(description="Optional role assigned to the receiver agent.") ] = None - receiver_thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver.") - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - status: Annotated[ - AgentStatus, + range: Annotated[ + TextRange | None, Field( - description="Last known status of the receiver agent reported to the sender agent before the close." + description="Optional range for the error location inside the config file." ), - ] - type: Annotated[ - Literal["collab_close_end"], Field(title="CollabCloseEndEventMsgType") - ] - - -class CollabResumeBeginEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - receiver_agent_nickname: Annotated[ - str | None, - Field(description="Optional nickname assigned to the receiver agent."), ] = None - receiver_agent_role: Annotated[ - str | None, Field(description="Optional role assigned to the receiver agent.") - ] = None - receiver_thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver.") - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - type: Annotated[ - Literal["collab_resume_begin"], Field(title="CollabResumeBeginEventMsgType") - ] + summary: Annotated[str, Field(description="Concise summary of the warning.")] -class CollabResumeEndEventMsg(BaseModel): +class ErrorNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - call_id: Annotated[str, Field(description="Identifier for the collab tool call.")] - receiver_agent_nickname: Annotated[ - str | None, - Field(description="Optional nickname assigned to the receiver agent."), - ] = None - receiver_agent_role: Annotated[ - str | None, Field(description="Optional role assigned to the receiver agent.") - ] = None - receiver_thread_id: Annotated[ - ThreadId, Field(description="Thread ID of the receiver.") - ] - sender_thread_id: Annotated[ThreadId, Field(description="Thread ID of the sender.")] - status: Annotated[ - AgentStatus, - Field( - description="Last known status of the receiver agent reported to the sender agent after resume." - ), - ] - type: Annotated[ - Literal["collab_resume_end"], Field(title="CollabResumeEndEventMsgType") - ] + error: TurnError + thread_id: Annotated[str, Field(alias="threadId")] + turn_id: Annotated[str, Field(alias="turnId")] + will_retry: Annotated[bool, Field(alias="willRetry")] class ExperimentalFeature(BaseModel): @@ -6082,195 +4300,80 @@ class Model(BaseModel): upgrade_info: Annotated[ModelUpgradeInfo | None, Field(alias="upgradeInfo")] = None -class ModelListResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - data: list[Model] - next_cursor: Annotated[ - str | None, - Field( - alias="nextCursor", - description="Opaque cursor to pass to the next call to continue after the last item. If None, there are no more items to return.", - ), - ] = None - - -class NetworkApprovalContext(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - host: str - protocol: NetworkApprovalProtocol - - -class NetworkPolicyAmendment(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - action: NetworkPolicyRuleAction - host: str - - -class OverriddenMetadata(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - effective_value: Annotated[Any, Field(alias="effectiveValue")] - message: str - overriding_layer: Annotated[ConfigLayerMetadata, Field(alias="overridingLayer")] - - -class PlanItemArg(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - status: StepStatus - step: str - - -class PluginMarketplaceEntry(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - name: str - path: AbsolutePathBuf - plugins: list[PluginSummary] - - -class RateLimitSnapshot(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - credits: CreditsSnapshot | None = None - limit_id: Annotated[str | None, Field(alias="limitId")] = None - limit_name: Annotated[str | None, Field(alias="limitName")] = None - plan_type: Annotated[PlanType | None, Field(alias="planType")] = None - primary: RateLimitWindow | None = None - secondary: RateLimitWindow | None = None - - -class InputTranscriptDeltaRealtimeEvent(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - input_transcript_delta: Annotated[ - RealtimeTranscriptDelta, Field(alias="InputTranscriptDelta") - ] - - -class OutputTranscriptDeltaRealtimeEvent(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - output_transcript_delta: Annotated[ - RealtimeTranscriptDelta, Field(alias="OutputTranscriptDelta") - ] - - -class RealtimeHandoffRequested(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - active_transcript: list[RealtimeTranscriptEntry] - handoff_id: str - input_transcript: str - item_id: str - - -class RequestUserInputQuestion(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - header: str - id: str - is_other: Annotated[bool | None, Field(alias="isOther")] = False - is_secret: Annotated[bool | None, Field(alias="isSecret")] = False - options: list[RequestUserInputQuestionOption] | None = None - question: str - - -class WebSearchCallResponseItem(BaseModel): +class ModelListResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - action: ResponsesApiWebSearchAction | None = None - id: str | None = None - status: str | None = None - type: Annotated[ - Literal["web_search_call"], Field(title="WebSearchCallResponseItemType") - ] + data: list[Model] + next_cursor: Annotated[ + str | None, + Field( + alias="nextCursor", + description="Opaque cursor to pass to the next call to continue after the last item. If None, there are no more items to return.", + ), + ] = None -class ReviewCodeLocation(BaseModel): +class OverriddenMetadata(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - absolute_file_path: str - line_range: ReviewLineRange + effective_value: Annotated[Any, Field(alias="effectiveValue")] + message: str + overriding_layer: Annotated[ConfigLayerMetadata, Field(alias="overridingLayer")] -class NetworkPolicyAmendment1(BaseModel): +class PluginDetail(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - network_policy_amendment: NetworkPolicyAmendment + apps: list[AppSummary] + description: str | None = None + marketplace_name: Annotated[str, Field(alias="marketplaceName")] + marketplace_path: Annotated[AbsolutePathBuf, Field(alias="marketplacePath")] + mcp_servers: Annotated[list[str], Field(alias="mcpServers")] + skills: list[SkillSummary] + summary: PluginSummary -class NetworkPolicyAmendmentReviewDecision(BaseModel): +class PluginMarketplaceEntry(BaseModel): model_config = ConfigDict( - extra="forbid", populate_by_name=True, ) - network_policy_amendment: NetworkPolicyAmendment1 + name: str + path: AbsolutePathBuf + plugins: list[PluginSummary] -class ReviewDecision( - RootModel[ - Literal["approved"] - | ApprovedExecpolicyAmendmentReviewDecision - | Literal["approved_for_session"] - | NetworkPolicyAmendmentReviewDecision - | Literal["denied"] - | Literal["abort"] - ] -): +class PluginReadResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - root: Annotated[ - Literal["approved"] - | ApprovedExecpolicyAmendmentReviewDecision - | Literal["approved_for_session"] - | NetworkPolicyAmendmentReviewDecision - | Literal["denied"] - | Literal["abort"], - Field(description="User's decision in response to an ExecApprovalRequest."), - ] + plugin: PluginDetail -class ReviewFinding(BaseModel): +class RateLimitSnapshot(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - body: str - code_location: ReviewCodeLocation - confidence_score: float - priority: int - title: str + credits: CreditsSnapshot | None = None + limit_id: Annotated[str | None, Field(alias="limitId")] = None + limit_name: Annotated[str | None, Field(alias="limitName")] = None + plan_type: Annotated[PlanType | None, Field(alias="planType")] = None + primary: RateLimitWindow | None = None + secondary: RateLimitWindow | None = None -class ReviewOutputEvent(BaseModel): +class WebSearchCallResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - findings: list[ReviewFinding] - overall_confidence_score: float - overall_correctness: str - overall_explanation: str + action: ResponsesApiWebSearchAction | None = None + id: str | None = None + status: str | None = None + type: Annotated[ + Literal["web_search_call"], Field(title="WebSearchCallResponseItemType") + ] class ReviewStartParams(BaseModel): @@ -6810,40 +4913,6 @@ class TurnCompletedNotification(BaseModel): turn: Turn -class UserMessageTurnItem(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - content: list[UserInput] - id: str - type: Annotated[Literal["UserMessage"], Field(title="UserMessageTurnItemType")] - - -class TurnItem( - RootModel[ - UserMessageTurnItem - | AgentMessageTurnItem - | PlanTurnItem - | ReasoningTurnItem - | WebSearchTurnItem - | ImageGenerationTurnItem - | ContextCompactionTurnItem - ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: ( - UserMessageTurnItem - | AgentMessageTurnItem - | PlanTurnItem - | ReasoningTurnItem - | WebSearchTurnItem - | ImageGenerationTurnItem - | ContextCompactionTurnItem - ) - - class TurnPlanStep(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -7129,178 +5198,6 @@ class ConfigWriteResponse(BaseModel): version: str -class TokenCountEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - info: TokenUsageInfo | None = None - rate_limits: RateLimitSnapshot | None = None - type: Annotated[Literal["token_count"], Field(title="TokenCountEventMsgType")] - - -class ExecApprovalRequestEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - additional_permissions: Annotated[ - PermissionProfile | None, - Field( - description="Optional additional filesystem permissions requested for this command." - ), - ] = None - approval_id: Annotated[ - str | None, - Field( - description="Identifier for this specific approval callback.\n\nWhen absent, the approval is for the command item itself (`call_id`). This is present for subcommand approvals (via execve intercept)." - ), - ] = None - available_decisions: Annotated[ - list[ReviewDecision] | None, - Field( - description="Ordered list of decisions the client may present for this prompt.\n\nWhen absent, clients should derive the legacy default set from the other fields on this request." - ), - ] = None - call_id: Annotated[ - str, Field(description="Identifier for the associated command execution item.") - ] - command: Annotated[list[str], Field(description="The command to be executed.")] - cwd: Annotated[str, Field(description="The command's working directory.")] - network_approval_context: Annotated[ - NetworkApprovalContext | None, - Field( - description="Optional network context for a blocked request that can be approved." - ), - ] = None - parsed_cmd: list[ParsedCommand] - proposed_execpolicy_amendment: Annotated[ - list[str] | None, - Field( - description="Proposed execpolicy amendment that can be applied to allow future runs." - ), - ] = None - proposed_network_policy_amendments: Annotated[ - list[NetworkPolicyAmendment] | None, - Field( - description="Proposed network policy amendments (for example allow/deny this host in future)." - ), - ] = None - reason: Annotated[ - str | None, - Field( - description="Optional human-readable reason for the approval (e.g. retry without sandbox)." - ), - ] = None - skill_metadata: Annotated[ - ExecApprovalRequestSkillMetadata | None, - Field( - description="Optional skill metadata when the approval was triggered by a skill script." - ), - ] = None - turn_id: Annotated[ - str | None, - Field( - description="Turn ID that this command belongs to. Uses `#[serde(default)]` for backwards compatibility." - ), - ] = "" - type: Annotated[ - Literal["exec_approval_request"], Field(title="ExecApprovalRequestEventMsgType") - ] - - -class RequestUserInputEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - call_id: Annotated[ - str, - Field( - description="Responses API call id for the associated tool call, if available." - ), - ] - questions: list[RequestUserInputQuestion] - turn_id: Annotated[ - str | None, - Field( - description="Turn ID that this request belongs to. Uses `#[serde(default)]` for backwards compatibility." - ), - ] = "" - type: Annotated[ - Literal["request_user_input"], Field(title="RequestUserInputEventMsgType") - ] - - -class ListSkillsResponseEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - skills: list[SkillsListEntry] - type: Annotated[ - Literal["list_skills_response"], Field(title="ListSkillsResponseEventMsgType") - ] - - -class PlanUpdateEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - explanation: Annotated[ - str | None, - Field( - description="Arguments for the `update_plan` todo/checklist tool (not plan mode)." - ), - ] = None - plan: list[PlanItemArg] - type: Annotated[Literal["plan_update"], Field(title="PlanUpdateEventMsgType")] - - -class ExitedReviewModeEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - review_output: ReviewOutputEvent | None = None - type: Annotated[ - Literal["exited_review_mode"], Field(title="ExitedReviewModeEventMsgType") - ] - - -class ItemStartedEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - item: TurnItem - thread_id: ThreadId - turn_id: str - type: Annotated[Literal["item_started"], Field(title="ItemStartedEventMsgType")] - - -class ItemCompletedEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - item: TurnItem - thread_id: ThreadId - turn_id: str - type: Annotated[Literal["item_completed"], Field(title="ItemCompletedEventMsgType")] - - -class HookStartedEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - run: HookRunSummary - turn_id: str | None = None - type: Annotated[Literal["hook_started"], Field(title="HookStartedEventMsgType")] - - -class HookCompletedEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - run: HookRunSummary - turn_id: str | None = None - type: Annotated[Literal["hook_completed"], Field(title="HookCompletedEventMsgType")] - - class ExternalAgentConfigDetectResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -7418,43 +5315,6 @@ class ProfileV2(BaseModel): web_search: WebSearchMode | None = None -class HandoffRequestedRealtimeEvent(BaseModel): - model_config = ConfigDict( - extra="forbid", - populate_by_name=True, - ) - handoff_requested: Annotated[ - RealtimeHandoffRequested, Field(alias="HandoffRequested") - ] - - -class RealtimeEvent( - RootModel[ - SessionUpdatedRealtimeEvent - | InputTranscriptDeltaRealtimeEvent - | OutputTranscriptDeltaRealtimeEvent - | AudioOutRealtimeEvent - | ConversationItemAddedRealtimeEvent - | ConversationItemDoneRealtimeEvent - | HandoffRequestedRealtimeEvent - | ErrorRealtimeEvent - ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: ( - SessionUpdatedRealtimeEvent - | InputTranscriptDeltaRealtimeEvent - | OutputTranscriptDeltaRealtimeEvent - | AudioOutRealtimeEvent - | ConversationItemAddedRealtimeEvent - | ConversationItemDoneRealtimeEvent - | HandoffRequestedRealtimeEvent - | ErrorRealtimeEvent - ) - - class FunctionCallOutputResponseItem(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -7883,6 +5743,7 @@ class ClientRequest( | ThreadReadRequest | SkillsListRequest | PluginListRequest + | PluginReadRequest | SkillsRemoteListRequest | SkillsRemoteExportRequest | AppListRequest @@ -7938,6 +5799,7 @@ class ClientRequest( | ThreadReadRequest | SkillsListRequest | PluginListRequest + | PluginReadRequest | SkillsRemoteListRequest | SkillsRemoteExportRequest | AppListRequest @@ -8015,27 +5877,6 @@ class ConfigReadResponse(BaseModel): origins: dict[str, ConfigLayerMetadata] -class RealtimeConversationRealtimeEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - payload: RealtimeEvent - type: Annotated[ - Literal["realtime_conversation_realtime"], - Field(title="RealtimeConversationRealtimeEventMsgType"), - ] - - -class RawResponseItemEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - item: ResponseItem - type: Annotated[ - Literal["raw_response_item"], Field(title="RawResponseItemEventMsgType") - ] - - class RawResponseItemCompletedNotification(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -8160,248 +6001,3 @@ class ServerNotification( title="ServerNotification", ), ] - - -class SessionConfiguredEventMsg(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - approval_policy: Annotated[ - AskForApproval, Field(description="When to escalate for approval for execution") - ] - cwd: Annotated[ - str, - Field( - description="Working directory that should be treated as the *root* of the session." - ), - ] - forked_from_id: ThreadId | None = None - history_entry_count: Annotated[ - int, Field(description="Current number of entries in the history log.", ge=0) - ] - history_log_id: Annotated[ - int, - Field( - description="Identifier of the history log file (inode on Unix, 0 otherwise).", - ge=0, - ), - ] - initial_messages: Annotated[ - list[EventMsg] | None, - Field( - description="Optional initial messages (as events) for resumed sessions. When present, UIs can use these to seed the history." - ), - ] = None - model: Annotated[ - str, Field(description="Tell the client what model is being queried.") - ] - model_provider_id: str - network_proxy: Annotated[ - SessionNetworkProxyRuntime | None, - Field( - description="Runtime proxy bind addresses, when the managed proxy was started for this session." - ), - ] = None - reasoning_effort: Annotated[ - ReasoningEffort | None, - Field( - description="The effort the model is putting into reasoning about the user's request." - ), - ] = None - rollout_path: Annotated[ - str | None, - Field( - description="Path in which the rollout is stored. Can be `None` for ephemeral threads" - ), - ] = None - sandbox_policy: Annotated[ - SandboxPolicy, - Field(description="How to sandbox commands executed in the system"), - ] - service_tier: ServiceTier | None = None - session_id: ThreadId - thread_name: Annotated[ - str | None, - Field(description="Optional user-facing thread name (may be unset)."), - ] = None - type: Annotated[ - Literal["session_configured"], Field(title="SessionConfiguredEventMsgType") - ] - - -class EventMsg( - RootModel[ - ErrorEventMsg - | WarningEventMsg - | RealtimeConversationStartedEventMsg - | RealtimeConversationRealtimeEventMsg - | RealtimeConversationClosedEventMsg - | ModelRerouteEventMsg - | ContextCompactedEventMsg - | ThreadRolledBackEventMsg - | TaskStartedEventMsg - | TaskCompleteEventMsg - | TokenCountEventMsg - | AgentMessageEventMsg - | UserMessageEventMsg - | AgentMessageDeltaEventMsg - | AgentReasoningEventMsg - | AgentReasoningDeltaEventMsg - | AgentReasoningRawContentEventMsg - | AgentReasoningRawContentDeltaEventMsg - | AgentReasoningSectionBreakEventMsg - | SessionConfiguredEventMsg - | ThreadNameUpdatedEventMsg - | McpStartupUpdateEventMsg - | McpStartupCompleteEventMsg - | McpToolCallBeginEventMsg - | McpToolCallEndEventMsg - | WebSearchBeginEventMsg - | WebSearchEndEventMsg - | ImageGenerationBeginEventMsg - | ImageGenerationEndEventMsg - | ExecCommandBeginEventMsg - | ExecCommandOutputDeltaEventMsg - | TerminalInteractionEventMsg - | ExecCommandEndEventMsg - | ViewImageToolCallEventMsg - | ExecApprovalRequestEventMsg - | RequestPermissionsEventMsg - | RequestUserInputEventMsg - | DynamicToolCallRequestEventMsg - | DynamicToolCallResponseEventMsg - | ElicitationRequestEventMsg - | ApplyPatchApprovalRequestEventMsg - | DeprecationNoticeEventMsg - | BackgroundEventEventMsg - | UndoStartedEventMsg - | UndoCompletedEventMsg - | StreamErrorEventMsg - | PatchApplyBeginEventMsg - | PatchApplyEndEventMsg - | TurnDiffEventMsg - | GetHistoryEntryResponseEventMsg - | McpListToolsResponseEventMsg - | ListCustomPromptsResponseEventMsg - | ListSkillsResponseEventMsg - | ListRemoteSkillsResponseEventMsg - | RemoteSkillDownloadedEventMsg - | SkillsUpdateAvailableEventMsg - | PlanUpdateEventMsg - | TurnAbortedEventMsg - | ShutdownCompleteEventMsg - | EnteredReviewModeEventMsg - | ExitedReviewModeEventMsg - | RawResponseItemEventMsg - | ItemStartedEventMsg - | ItemCompletedEventMsg - | HookStartedEventMsg - | HookCompletedEventMsg - | AgentMessageContentDeltaEventMsg - | PlanDeltaEventMsg - | ReasoningContentDeltaEventMsg - | ReasoningRawContentDeltaEventMsg - | CollabAgentSpawnBeginEventMsg - | CollabAgentSpawnEndEventMsg - | CollabAgentInteractionBeginEventMsg - | CollabAgentInteractionEndEventMsg - | CollabWaitingBeginEventMsg - | CollabWaitingEndEventMsg - | CollabCloseBeginEventMsg - | CollabCloseEndEventMsg - | CollabResumeBeginEventMsg - | CollabResumeEndEventMsg - ] -): - model_config = ConfigDict( - populate_by_name=True, - ) - root: Annotated[ - ErrorEventMsg - | WarningEventMsg - | RealtimeConversationStartedEventMsg - | RealtimeConversationRealtimeEventMsg - | RealtimeConversationClosedEventMsg - | ModelRerouteEventMsg - | ContextCompactedEventMsg - | ThreadRolledBackEventMsg - | TaskStartedEventMsg - | TaskCompleteEventMsg - | TokenCountEventMsg - | AgentMessageEventMsg - | UserMessageEventMsg - | AgentMessageDeltaEventMsg - | AgentReasoningEventMsg - | AgentReasoningDeltaEventMsg - | AgentReasoningRawContentEventMsg - | AgentReasoningRawContentDeltaEventMsg - | AgentReasoningSectionBreakEventMsg - | SessionConfiguredEventMsg - | ThreadNameUpdatedEventMsg - | McpStartupUpdateEventMsg - | McpStartupCompleteEventMsg - | McpToolCallBeginEventMsg - | McpToolCallEndEventMsg - | WebSearchBeginEventMsg - | WebSearchEndEventMsg - | ImageGenerationBeginEventMsg - | ImageGenerationEndEventMsg - | ExecCommandBeginEventMsg - | ExecCommandOutputDeltaEventMsg - | TerminalInteractionEventMsg - | ExecCommandEndEventMsg - | ViewImageToolCallEventMsg - | ExecApprovalRequestEventMsg - | RequestPermissionsEventMsg - | RequestUserInputEventMsg - | DynamicToolCallRequestEventMsg - | DynamicToolCallResponseEventMsg - | ElicitationRequestEventMsg - | ApplyPatchApprovalRequestEventMsg - | DeprecationNoticeEventMsg - | BackgroundEventEventMsg - | UndoStartedEventMsg - | UndoCompletedEventMsg - | StreamErrorEventMsg - | PatchApplyBeginEventMsg - | PatchApplyEndEventMsg - | TurnDiffEventMsg - | GetHistoryEntryResponseEventMsg - | McpListToolsResponseEventMsg - | ListCustomPromptsResponseEventMsg - | ListSkillsResponseEventMsg - | ListRemoteSkillsResponseEventMsg - | RemoteSkillDownloadedEventMsg - | SkillsUpdateAvailableEventMsg - | PlanUpdateEventMsg - | TurnAbortedEventMsg - | ShutdownCompleteEventMsg - | EnteredReviewModeEventMsg - | ExitedReviewModeEventMsg - | RawResponseItemEventMsg - | ItemStartedEventMsg - | ItemCompletedEventMsg - | HookStartedEventMsg - | HookCompletedEventMsg - | AgentMessageContentDeltaEventMsg - | PlanDeltaEventMsg - | ReasoningContentDeltaEventMsg - | ReasoningRawContentDeltaEventMsg - | CollabAgentSpawnBeginEventMsg - | CollabAgentSpawnEndEventMsg - | CollabAgentInteractionBeginEventMsg - | CollabAgentInteractionEndEventMsg - | CollabWaitingBeginEventMsg - | CollabWaitingEndEventMsg - | CollabCloseBeginEventMsg - | CollabCloseEndEventMsg - | CollabResumeBeginEventMsg - | CollabResumeEndEventMsg, - Field( - description="Response event from the agent NOTE: Make sure none of these values have optional types, as it will mess up the extension code-gen.", - title="EventMsg", - ), - ] - - -SessionConfiguredEventMsg.model_rebuild() diff --git a/sdk/python/src/codex_app_server/generated/v2_types.py b/sdk/python/src/codex_app_server/generated/v2_types.py deleted file mode 100644 index ef28f982e5a..00000000000 --- a/sdk/python/src/codex_app_server/generated/v2_types.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Stable aliases over the canonical generated v2 models.""" - -from .v2_all import ( - ModelListResponse, - ThreadCompactStartResponse, - ThreadItem, - ThreadListResponse, - ThreadReadResponse, - ThreadTokenUsageUpdatedNotification, - TurnCompletedNotification as TurnCompletedNotificationPayload, - TurnSteerResponse, -) - -__all__ = [ - "ModelListResponse", - "ThreadCompactStartResponse", - "ThreadItem", - "ThreadListResponse", - "ThreadReadResponse", - "ThreadTokenUsageUpdatedNotification", - "TurnCompletedNotificationPayload", - "TurnSteerResponse", -] diff --git a/sdk/python/src/codex_app_server/public_api.py b/sdk/python/src/codex_app_server/public_api.py index 842dcfe539d..10d4b780243 100644 --- a/sdk/python/src/codex_app_server/public_api.py +++ b/sdk/python/src/codex_app_server/public_api.py @@ -7,52 +7,33 @@ from .async_client import AsyncAppServerClient from .client import AppServerClient, AppServerConfig from .generated.v2_all import ( - AgentMessageDeltaNotification, - RawResponseItemCompletedNotification, - ThreadArchiveResponse, - ThreadSetNameResponse, - TurnError, - TurnInterruptResponse, -) -from .generated.v2_types import ( - ModelListResponse, - ThreadCompactStartResponse, - ThreadItem, - ThreadListResponse, - ThreadReadResponse, - ThreadTokenUsageUpdatedNotification, - TurnCompletedNotificationPayload, - TurnSteerResponse, -) -from .models import InitializeResponse, JsonObject, Notification -from .public_types import ( AskForApproval, + ModelListResponse, Personality, ReasoningEffort, ReasoningSummary, SandboxMode, SandboxPolicy, ServiceTier, + ThreadArchiveResponse, + ThreadCompactStartResponse, ThreadForkParams, + ThreadItem, ThreadListParams, + ThreadListResponse, + ThreadReadResponse, ThreadResumeParams, + ThreadSetNameResponse, ThreadSortKey, ThreadSourceKind, ThreadStartParams, + Turn as AppServerTurn, + TurnCompletedNotification, + TurnInterruptResponse, TurnStartParams, - TurnStatus, + TurnSteerResponse, ) - - -@dataclass(slots=True) -class TurnResult: - thread_id: str - turn_id: str - status: TurnStatus - error: TurnError | None - text: str - items: list[ThreadItem] - usage: ThreadTokenUsageUpdatedNotification | None = None +from .models import InitializeResponse, JsonObject, Notification @dataclass(slots=True) @@ -86,13 +67,6 @@ class MentionInput: Input = list[InputItem] | InputItem -@dataclass(slots=True) -class InitializeResult: - server_name: str - server_version: str - user_agent: str - - def _to_wire_item(item: InputItem) -> JsonObject: if isinstance(item, TextInput): return {"type": "text", "text": item.text} @@ -126,55 +100,6 @@ def _split_user_agent(user_agent: str) -> tuple[str | None, str | None]: return raw, None -def _enum_value(value: object) -> object: - return getattr(value, "value", value) - - -def _assistant_output_text_chunks( - notification: RawResponseItemCompletedNotification, -) -> list[str]: - item = notification.item.root - if _enum_value(getattr(item, "type", None)) != "message": - return [] - if getattr(item, "role", None) != "assistant": - return [] - - chunks: list[str] = [] - for content in getattr(item, "content", []) or []: - content_item = getattr(content, "root", content) - if _enum_value(getattr(content_item, "type", None)) != "output_text": - continue - text = getattr(content_item, "text", None) - if isinstance(text, str) and text: - chunks.append(text) - return chunks - - -def _build_turn_result( - completed: TurnCompletedNotificationPayload | None, - usage: ThreadTokenUsageUpdatedNotification | None, - delta_chunks: list[str], - raw_text_chunks: list[str], -) -> TurnResult: - if completed is None: - raise RuntimeError("turn completed event not received") - if completed.turn.status == TurnStatus.completed and usage is None: - raise RuntimeError( - "thread/tokenUsage/updated notification not received for completed turn" - ) - - text = "".join(delta_chunks) if delta_chunks else "".join(raw_text_chunks) - return TurnResult( - thread_id=completed.thread_id, - turn_id=completed.turn.id, - status=completed.turn.status, - error=completed.turn.error, - text=text, - items=list(completed.turn.items or []), - usage=usage, - ) - - class Codex: """Minimal typed SDK surface for app-server v2.""" @@ -182,7 +107,7 @@ def __init__(self, config: AppServerConfig | None = None) -> None: self._client = AppServerClient(config=config) try: self._client.start() - self._init = self._parse_initialize(self._client.initialize()) + self._init = self._validate_initialize(self._client.initialize()) except Exception: self._client.close() raise @@ -194,7 +119,7 @@ def __exit__(self, _exc_type, _exc, _tb) -> None: self.close() @staticmethod - def _parse_initialize(payload: InitializeResponse) -> InitializeResult: + def _validate_initialize(payload: InitializeResponse) -> InitializeResponse: user_agent = (payload.userAgent or "").strip() server = payload.serverInfo @@ -220,14 +145,10 @@ def _parse_initialize(payload: InitializeResponse) -> InitializeResult: f"(user_agent={user_agent!r}, server_name={normalized_server_name!r}, server_version={normalized_server_version!r})" ) - return InitializeResult( - server_name=normalized_server_name, - server_version=normalized_server_version, - user_agent=user_agent, - ) + return payload @property - def metadata(self) -> InitializeResult: + def metadata(self) -> InitializeResponse: return self._init def close(self) -> None: @@ -370,7 +291,7 @@ class AsyncCodex: def __init__(self, config: AppServerConfig | None = None) -> None: self._client = AsyncAppServerClient(config=config) - self._init: InitializeResult | None = None + self._init: InitializeResponse | None = None self._initialized = False self._init_lock = asyncio.Lock() @@ -390,7 +311,7 @@ async def _ensure_initialized(self) -> None: try: await self._client.start() payload = await self._client.initialize() - self._init = Codex._parse_initialize(payload) + self._init = Codex._validate_initialize(payload) self._initialized = True except Exception: await self._client.close() @@ -399,7 +320,7 @@ async def _ensure_initialized(self) -> None: raise @property - def metadata(self) -> InitializeResult: + def metadata(self) -> InitializeResponse: if self._init is None: raise RuntimeError( "AsyncCodex is not initialized yet. Use `async with AsyncCodex()` or call an async API first." @@ -675,50 +596,27 @@ def stream(self) -> Iterator[Notification]: yield event if ( event.method == "turn/completed" - and isinstance(event.payload, TurnCompletedNotificationPayload) + and isinstance(event.payload, TurnCompletedNotification) and event.payload.turn.id == self.id ): break finally: self._client.release_turn_consumer(self.id) - def run(self) -> TurnResult: - completed: TurnCompletedNotificationPayload | None = None - usage: ThreadTokenUsageUpdatedNotification | None = None - delta_chunks: list[str] = [] - raw_text_chunks: list[str] = [] - + def run(self) -> AppServerTurn: + completed: TurnCompletedNotification | None = None stream = self.stream() try: for event in stream: payload = event.payload - if ( - isinstance(payload, AgentMessageDeltaNotification) - and payload.turn_id == self.id - ): - delta_chunks.append(payload.delta) - continue - if ( - isinstance(payload, RawResponseItemCompletedNotification) - and payload.turn_id == self.id - ): - raw_text_chunks.extend(_assistant_output_text_chunks(payload)) - continue - if ( - isinstance(payload, ThreadTokenUsageUpdatedNotification) - and payload.turn_id == self.id - ): - usage = payload - continue - if ( - isinstance(payload, TurnCompletedNotificationPayload) - and payload.turn.id == self.id - ): + if isinstance(payload, TurnCompletedNotification) and payload.turn.id == self.id: completed = payload finally: stream.close() - return _build_turn_result(completed, usage, delta_chunks, raw_text_chunks) + if completed is None: + raise RuntimeError("turn completed event not received") + return completed.turn @dataclass(slots=True) @@ -749,47 +647,24 @@ async def stream(self) -> AsyncIterator[Notification]: yield event if ( event.method == "turn/completed" - and isinstance(event.payload, TurnCompletedNotificationPayload) + and isinstance(event.payload, TurnCompletedNotification) and event.payload.turn.id == self.id ): break finally: self._codex._client.release_turn_consumer(self.id) - async def run(self) -> TurnResult: - completed: TurnCompletedNotificationPayload | None = None - usage: ThreadTokenUsageUpdatedNotification | None = None - delta_chunks: list[str] = [] - raw_text_chunks: list[str] = [] - + async def run(self) -> AppServerTurn: + completed: TurnCompletedNotification | None = None stream = self.stream() try: async for event in stream: payload = event.payload - if ( - isinstance(payload, AgentMessageDeltaNotification) - and payload.turn_id == self.id - ): - delta_chunks.append(payload.delta) - continue - if ( - isinstance(payload, RawResponseItemCompletedNotification) - and payload.turn_id == self.id - ): - raw_text_chunks.extend(_assistant_output_text_chunks(payload)) - continue - if ( - isinstance(payload, ThreadTokenUsageUpdatedNotification) - and payload.turn_id == self.id - ): - usage = payload - continue - if ( - isinstance(payload, TurnCompletedNotificationPayload) - and payload.turn.id == self.id - ): + if isinstance(payload, TurnCompletedNotification) and payload.turn.id == self.id: completed = payload finally: await stream.aclose() - return _build_turn_result(completed, usage, delta_chunks, raw_text_chunks) + if completed is None: + raise RuntimeError("turn completed event not received") + return completed.turn diff --git a/sdk/python/src/codex_app_server/public_types.py b/sdk/python/src/codex_app_server/public_types.py deleted file mode 100644 index bc7e67d3ce2..00000000000 --- a/sdk/python/src/codex_app_server/public_types.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Shallow public aliases over the generated v2 wire models.""" - -from .generated.v2_all import ( - AskForApproval, - Personality, - PlanType, - ReasoningEffort, - ReasoningSummary, - SandboxMode, - SandboxPolicy, - ServiceTier, - ThreadForkParams, - ThreadListParams, - ThreadResumeParams, - ThreadSortKey, - ThreadSourceKind, - ThreadStartParams, - TurnStartParams, - TurnStatus, - TurnSteerParams, -) - -__all__ = [ - "AskForApproval", - "Personality", - "PlanType", - "ReasoningEffort", - "ReasoningSummary", - "SandboxMode", - "SandboxPolicy", - "ServiceTier", - "ThreadForkParams", - "ThreadListParams", - "ThreadResumeParams", - "ThreadSortKey", - "ThreadSourceKind", - "ThreadStartParams", - "TurnStartParams", - "TurnStatus", - "TurnSteerParams", -] diff --git a/sdk/python/tests/test_artifact_workflow_and_binaries.py b/sdk/python/tests/test_artifact_workflow_and_binaries.py index 90446451d13..938de05e28a 100644 --- a/sdk/python/tests/test_artifact_workflow_and_binaries.py +++ b/sdk/python/tests/test_artifact_workflow_and_binaries.py @@ -117,7 +117,7 @@ def test_python_codegen_schema_annotation_adds_stable_variant_titles() -> None: ] assert ask_for_approval_titles == [ "AskForApprovalValue", - "RejectAskForApproval", + "GranularAskForApproval", ] reasoning_summary_titles = [ diff --git a/sdk/python/tests/test_public_api_runtime_behavior.py b/sdk/python/tests/test_public_api_runtime_behavior.py index 414460905d8..47034687cba 100644 --- a/sdk/python/tests/test_public_api_runtime_behavior.py +++ b/sdk/python/tests/test_public_api_runtime_behavior.py @@ -10,12 +10,11 @@ from codex_app_server.client import AppServerClient from codex_app_server.generated.v2_all import ( AgentMessageDeltaNotification, - RawResponseItemCompletedNotification, - ThreadTokenUsageUpdatedNotification, + TurnCompletedNotification, + TurnStatus, ) from codex_app_server.models import InitializeResponse, Notification from codex_app_server.public_api import AsyncCodex, AsyncTurn, Codex, Turn -from codex_app_server.public_types import TurnStatus ROOT = Path(__file__).resolve().parents[1] @@ -39,60 +38,6 @@ def _delta_notification( ) -def _raw_response_notification( - *, - thread_id: str = "thread-1", - turn_id: str = "turn-1", - text: str = "raw-text", -) -> Notification: - return Notification( - method="rawResponseItem/completed", - payload=RawResponseItemCompletedNotification.model_validate( - { - "item": { - "type": "message", - "role": "assistant", - "content": [{"type": "output_text", "text": text}], - }, - "threadId": thread_id, - "turnId": turn_id, - } - ), - ) - - -def _usage_notification( - *, - thread_id: str = "thread-1", - turn_id: str = "turn-1", -) -> Notification: - return Notification( - method="thread/tokenUsage/updated", - payload=ThreadTokenUsageUpdatedNotification.model_validate( - { - "threadId": thread_id, - "turnId": turn_id, - "tokenUsage": { - "last": { - "cachedInputTokens": 0, - "inputTokens": 1, - "outputTokens": 2, - "reasoningOutputTokens": 0, - "totalTokens": 3, - }, - "total": { - "cachedInputTokens": 0, - "inputTokens": 1, - "outputTokens": 2, - "reasoningOutputTokens": 0, - "totalTokens": 3, - }, - }, - } - ), - ) - - def _completed_notification( *, thread_id: str = "thread-1", @@ -101,7 +46,7 @@ def _completed_notification( ) -> Notification: return Notification( method="turn/completed", - payload=public_api_module.TurnCompletedNotificationPayload.model_validate( + payload=TurnCompletedNotification.model_validate( { "threadId": thread_id, "turn": { @@ -259,12 +204,10 @@ async def fake_next_notification() -> Notification: asyncio.run(scenario()) -def test_turn_run_falls_back_to_completed_raw_response_text() -> None: +def test_turn_run_returns_completed_turn_payload() -> None: client = AppServerClient() notifications: deque[Notification] = deque( [ - _raw_response_notification(text="hello from raw response"), - _usage_notification(), _completed_notification(), ] ) @@ -272,8 +215,9 @@ def test_turn_run_falls_back_to_completed_raw_response_text() -> None: result = Turn(client, "thread-1", "turn-1").run() + assert result.id == "turn-1" assert result.status == TurnStatus.completed - assert result.text == "hello from raw response" + assert result.items == [] def test_retry_examples_compare_status_with_enum() -> None: diff --git a/sdk/python/tests/test_public_api_signatures.py b/sdk/python/tests/test_public_api_signatures.py index dc61b7ba8df..b7b14fe0ed8 100644 --- a/sdk/python/tests/test_public_api_signatures.py +++ b/sdk/python/tests/test_public_api_signatures.py @@ -196,15 +196,15 @@ def test_lifecycle_methods_are_codex_scoped() -> None: def test_initialize_metadata_parses_user_agent_shape() -> None: - parsed = Codex._parse_initialize(InitializeResponse.model_validate({"userAgent": "codex-cli/1.2.3"})) - assert parsed.user_agent == "codex-cli/1.2.3" - assert parsed.server_name == "codex-cli" - assert parsed.server_version == "1.2.3" + payload = InitializeResponse.model_validate({"userAgent": "codex-cli/1.2.3"}) + parsed = Codex._validate_initialize(payload) + assert parsed is payload + assert parsed.userAgent == "codex-cli/1.2.3" def test_initialize_metadata_requires_non_empty_information() -> None: try: - Codex._parse_initialize(InitializeResponse.model_validate({})) + Codex._validate_initialize(InitializeResponse.model_validate({})) except RuntimeError as exc: assert "missing required metadata" in str(exc) else: diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py index 578e3e1bd7e..463190b81ad 100644 --- a/sdk/python/tests/test_real_app_server_integration.py +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -182,10 +182,11 @@ def test_real_initialize_and_model_list(runtime_env: PreparedRuntimeEnv) -> None with Codex() as codex: models = codex.models(include_hidden=True) + server = codex.metadata.serverInfo print(json.dumps({ - "user_agent": codex.metadata.user_agent, - "server_name": codex.metadata.server_name, - "server_version": codex.metadata.server_version, + "user_agent": codex.metadata.userAgent, + "server_name": None if server is None else server.name, + "server_version": None if server is None else server.version, "model_count": len(models.data), })) """ @@ -193,8 +194,10 @@ def test_real_initialize_and_model_list(runtime_env: PreparedRuntimeEnv) -> None ) assert isinstance(data["user_agent"], str) and data["user_agent"].strip() - assert isinstance(data["server_name"], str) and data["server_name"].strip() - assert isinstance(data["server_version"], str) and data["server_version"].strip() + if data["server_name"] is not None: + assert isinstance(data["server_name"], str) and data["server_name"].strip() + if data["server_version"] is not None: + assert isinstance(data["server_version"], str) and data["server_version"].strip() assert isinstance(data["model_count"], int) @@ -212,13 +215,17 @@ def test_real_thread_and_turn_start_smoke(runtime_env: PreparedRuntimeEnv) -> No config={"model_reasoning_effort": "high"}, ) result = thread.turn(TextInput("hello")).run() + persisted = thread.read(include_turns=True) + persisted_turn = next( + (turn for turn in persisted.thread.turns or [] if turn.id == result.id), + None, + ) print(json.dumps({ - "thread_id": result.thread_id, - "turn_id": result.turn_id, - "items_count": len(result.items), - "has_usage": result.usage is not None, - "usage_thread_id": None if result.usage is None else result.usage.thread_id, - "usage_turn_id": None if result.usage is None else result.usage.turn_id, + "thread_id": thread.id, + "turn_id": result.id, + "status": result.status.value, + "items_count": len(result.items or []), + "persisted_items_count": 0 if persisted_turn is None else len(persisted_turn.items or []), })) """ ), @@ -226,10 +233,9 @@ def test_real_thread_and_turn_start_smoke(runtime_env: PreparedRuntimeEnv) -> No assert isinstance(data["thread_id"], str) and data["thread_id"].strip() assert isinstance(data["turn_id"], str) and data["turn_id"].strip() + assert data["status"] == "completed" assert isinstance(data["items_count"], int) - assert data["has_usage"] is True - assert data["usage_thread_id"] == data["thread_id"] - assert data["usage_turn_id"] == data["turn_id"] + assert isinstance(data["persisted_items_count"], int) def test_real_async_thread_turn_usage_and_ids_smoke( @@ -250,13 +256,17 @@ async def main(): config={"model_reasoning_effort": "high"}, ) result = await (await thread.turn(TextInput("say ok"))).run() + persisted = await thread.read(include_turns=True) + persisted_turn = next( + (turn for turn in persisted.thread.turns or [] if turn.id == result.id), + None, + ) print(json.dumps({ - "thread_id": result.thread_id, - "turn_id": result.turn_id, - "items_count": len(result.items), - "has_usage": result.usage is not None, - "usage_thread_id": None if result.usage is None else result.usage.thread_id, - "usage_turn_id": None if result.usage is None else result.usage.turn_id, + "thread_id": thread.id, + "turn_id": result.id, + "status": result.status.value, + "items_count": len(result.items or []), + "persisted_items_count": 0 if persisted_turn is None else len(persisted_turn.items or []), })) asyncio.run(main()) @@ -266,10 +276,9 @@ async def main(): assert isinstance(data["thread_id"], str) and data["thread_id"].strip() assert isinstance(data["turn_id"], str) and data["turn_id"].strip() + assert data["status"] == "completed" assert isinstance(data["items_count"], int) - assert data["has_usage"] is True - assert data["usage_thread_id"] == data["thread_id"] - assert data["usage_turn_id"] == data["turn_id"] + assert isinstance(data["persisted_items_count"], int) def test_notebook_bootstrap_resolves_sdk_and_runtime_from_unrelated_cwd( @@ -386,10 +395,10 @@ def test_real_examples_run_and_assert( if folder == "01_quickstart_constructor": assert "Status:" in out and "Text:" in out - assert "Server: None None" not in out + assert "Server: unknown" not in out elif folder == "02_turn_run": assert "thread_id:" in out and "turn_id:" in out and "status:" in out - assert "usage: None" not in out + assert "persisted.items.count:" in out elif folder == "03_turn_stream_events": assert "turn/completed" in out elif folder == "04_models_and_metadata": @@ -409,7 +418,6 @@ def test_real_examples_run_and_assert( elif folder == "11_cli_mini_app": assert "Thread:" in out elif folder == "12_turn_params_kitchen_sink": - assert "Status:" in out and "Usage:" in out + assert "Status:" in out and "Items:" in out elif folder == "13_model_select_and_turn_params": - assert "selected.model:" in out and "agent.message.params:" in out and "usage.params:" in out - assert "usage.params: None" not in out + assert "selected.model:" in out and "agent.message.params:" in out and "items.params:" in out From 5e7154df5eed245ed809fc4b207440e595e0e332 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Mon, 16 Mar 2026 14:54:00 -0700 Subject: [PATCH 07/14] python-sdk: rename turn wrapper handles (2026-03-16) Rename the public SDK wrapper objects from Turn/AsyncTurn to TurnHandle/AsyncTurnHandle so the wrapper control object is clearly distinct from the canonical generated app-server Turn model. Update the top-level exports, public API docs, runtime behavior test coverage, and the Python SDK codegen helper so future generated method signatures preserve the new wrapper names. Co-authored-by: Codex --- sdk/python/docs/api-reference.md | 16 ++++++++-------- sdk/python/docs/faq.md | 4 ++-- sdk/python/docs/getting-started.md | 2 +- sdk/python/scripts/update_sdk_artifacts.py | 8 ++++---- sdk/python/src/codex_app_server/__init__.py | 8 ++++---- sdk/python/src/codex_app_server/public_api.py | 12 ++++++------ .../tests/test_public_api_runtime_behavior.py | 17 +++++++++++------ 7 files changed, 36 insertions(+), 31 deletions(-) diff --git a/sdk/python/docs/api-reference.md b/sdk/python/docs/api-reference.md index 02cd0631bde..42d9c74b0a4 100644 --- a/sdk/python/docs/api-reference.md +++ b/sdk/python/docs/api-reference.md @@ -2,7 +2,7 @@ Public surface of `codex_app_server` for app-server v2. -This SDK surface is experimental. The current implementation intentionally allows only one active `Turn.stream()` or `Turn.run()` consumer per client instance at a time. +This SDK surface is experimental. The current implementation intentionally allows only one active `TurnHandle.stream()` or `TurnHandle.run()` consumer per client instance at a time. ## Package Entry @@ -12,8 +12,8 @@ from codex_app_server import ( AsyncCodex, Thread, AsyncThread, - Turn, - AsyncTurn, + TurnHandle, + AsyncTurnHandle, InitializeResponse, Input, InputItem, @@ -87,21 +87,21 @@ async with AsyncCodex() as codex: ### Thread -- `turn(input: Input, *, approval_policy=None, cwd=None, effort=None, model=None, output_schema=None, personality=None, sandbox_policy=None, summary=None) -> Turn` +- `turn(input: Input, *, approval_policy=None, cwd=None, effort=None, model=None, output_schema=None, personality=None, sandbox_policy=None, summary=None) -> TurnHandle` - `read(*, include_turns: bool = False) -> ThreadReadResponse` - `set_name(name: str) -> ThreadSetNameResponse` - `compact() -> ThreadCompactStartResponse` ### AsyncThread -- `turn(input: Input, *, approval_policy=None, cwd=None, effort=None, model=None, output_schema=None, personality=None, sandbox_policy=None, summary=None) -> Awaitable[AsyncTurn]` +- `turn(input: Input, *, approval_policy=None, cwd=None, effort=None, model=None, output_schema=None, personality=None, sandbox_policy=None, summary=None) -> Awaitable[AsyncTurnHandle]` - `read(*, include_turns: bool = False) -> Awaitable[ThreadReadResponse]` - `set_name(name: str) -> Awaitable[ThreadSetNameResponse]` - `compact() -> Awaitable[ThreadCompactStartResponse]` -## Turn / AsyncTurn +## TurnHandle / AsyncTurnHandle -### Turn +### TurnHandle - `steer(input: Input) -> TurnSteerResponse` - `interrupt() -> TurnInterruptResponse` @@ -113,7 +113,7 @@ Behavior notes: - `stream()` and `run()` are exclusive per client instance in the current experimental build - starting a second turn consumer on the same `Codex` instance raises `RuntimeError` -### AsyncTurn +### AsyncTurnHandle - `steer(input: Input) -> Awaitable[TurnSteerResponse]` - `interrupt() -> Awaitable[TurnInterruptResponse]` diff --git a/sdk/python/docs/faq.md b/sdk/python/docs/faq.md index aa35e402cdb..db57440af74 100644 --- a/sdk/python/docs/faq.md +++ b/sdk/python/docs/faq.md @@ -8,8 +8,8 @@ ## `run()` vs `stream()` -- `Turn.run()` / `AsyncTurn.run()` is the easiest path. It consumes events until completion and returns the canonical generated app-server `Turn` model. -- `Turn.stream()` / `AsyncTurn.stream()` yields raw notifications (`Notification`) so you can react event-by-event. +- `TurnHandle.run()` / `AsyncTurnHandle.run()` is the easiest path. It consumes events until completion and returns the canonical generated app-server `Turn` model. +- `TurnHandle.stream()` / `AsyncTurnHandle.stream()` yields raw notifications (`Notification`) so you can react event-by-event. Choose `run()` for most apps. Choose `stream()` for progress UIs, custom timeout logic, or custom parsing. diff --git a/sdk/python/docs/getting-started.md b/sdk/python/docs/getting-started.md index 85003a1e987..a39a78504da 100644 --- a/sdk/python/docs/getting-started.md +++ b/sdk/python/docs/getting-started.md @@ -42,7 +42,7 @@ What happened: - `Codex()` started and initialized `codex app-server`. - `thread_start(...)` created a thread. - `turn(...).run()` consumed events until `turn/completed` and returned the canonical generated app-server `Turn` model. -- one client can have only one active `Turn.stream()` / `Turn.run()` consumer at a time in the current experimental build +- one client can have only one active `TurnHandle.stream()` / `TurnHandle.run()` consumer at a time in the current experimental build ## 3) Continue the same thread (multi-turn) diff --git a/sdk/python/scripts/update_sdk_artifacts.py b/sdk/python/scripts/update_sdk_artifacts.py index da4cbceb1a9..84bcffa340f 100755 --- a/sdk/python/scripts/update_sdk_artifacts.py +++ b/sdk/python/scripts/update_sdk_artifacts.py @@ -793,7 +793,7 @@ def _render_thread_block( " input: Input,", " *,", *_kw_signature_lines(turn_fields), - " ) -> Turn:", + " ) -> TurnHandle:", " wire_input = _to_wire_input(input)", " params = TurnStartParams(", " thread_id=self.id,", @@ -801,7 +801,7 @@ def _render_thread_block( *_model_arg_lines(turn_fields), " )", " turn = self._client.turn_start(self.id, wire_input, params=params)", - " return Turn(self._client, self.id, turn.turn.id)", + " return TurnHandle(self._client, self.id, turn.turn.id)", ] return "\n".join(lines) @@ -815,7 +815,7 @@ def _render_async_thread_block( " input: Input,", " *,", *_kw_signature_lines(turn_fields), - " ) -> AsyncTurn:", + " ) -> AsyncTurnHandle:", " await self._codex._ensure_initialized()", " wire_input = _to_wire_input(input)", " params = TurnStartParams(", @@ -828,7 +828,7 @@ def _render_async_thread_block( " wire_input,", " params=params,", " )", - " return AsyncTurn(self._codex, self.id, turn.turn.id)", + " return AsyncTurnHandle(self._codex, self.id, turn.turn.id)", ] return "\n".join(lines) diff --git a/sdk/python/src/codex_app_server/__init__.py b/sdk/python/src/codex_app_server/__init__.py index 8d7650fa01f..208e1a3d85b 100644 --- a/sdk/python/src/codex_app_server/__init__.py +++ b/sdk/python/src/codex_app_server/__init__.py @@ -40,7 +40,7 @@ from .public_api import ( AsyncCodex, AsyncThread, - AsyncTurn, + AsyncTurnHandle, Codex, ImageInput, Input, @@ -50,7 +50,7 @@ SkillInput, TextInput, Thread, - Turn, + TurnHandle, ) from .retry import retry_on_overload @@ -65,8 +65,8 @@ "AsyncCodex", "Thread", "AsyncThread", - "Turn", - "AsyncTurn", + "TurnHandle", + "AsyncTurnHandle", "InitializeResponse", "Input", "InputItem", diff --git a/sdk/python/src/codex_app_server/public_api.py b/sdk/python/src/codex_app_server/public_api.py index 10d4b780243..ac741a4d600 100644 --- a/sdk/python/src/codex_app_server/public_api.py +++ b/sdk/python/src/codex_app_server/public_api.py @@ -490,7 +490,7 @@ def turn( sandbox_policy: SandboxPolicy | None = None, service_tier: ServiceTier | None = None, summary: ReasoningSummary | None = None, - ) -> Turn: + ) -> TurnHandle: wire_input = _to_wire_input(input) params = TurnStartParams( thread_id=self.id, @@ -506,7 +506,7 @@ def turn( summary=summary, ) turn = self._client.turn_start(self.id, wire_input, params=params) - return Turn(self._client, self.id, turn.turn.id) + return TurnHandle(self._client, self.id, turn.turn.id) # END GENERATED: Thread.flat_methods def read(self, *, include_turns: bool = False) -> ThreadReadResponse: @@ -538,7 +538,7 @@ async def turn( sandbox_policy: SandboxPolicy | None = None, service_tier: ServiceTier | None = None, summary: ReasoningSummary | None = None, - ) -> AsyncTurn: + ) -> AsyncTurnHandle: await self._codex._ensure_initialized() wire_input = _to_wire_input(input) params = TurnStartParams( @@ -559,7 +559,7 @@ async def turn( wire_input, params=params, ) - return AsyncTurn(self._codex, self.id, turn.turn.id) + return AsyncTurnHandle(self._codex, self.id, turn.turn.id) # END GENERATED: AsyncThread.flat_methods async def read(self, *, include_turns: bool = False) -> ThreadReadResponse: @@ -576,7 +576,7 @@ async def compact(self) -> ThreadCompactStartResponse: @dataclass(slots=True) -class Turn: +class TurnHandle: _client: AppServerClient thread_id: str id: str @@ -620,7 +620,7 @@ def run(self) -> AppServerTurn: @dataclass(slots=True) -class AsyncTurn: +class AsyncTurnHandle: _codex: AsyncCodex thread_id: str id: str diff --git a/sdk/python/tests/test_public_api_runtime_behavior.py b/sdk/python/tests/test_public_api_runtime_behavior.py index 47034687cba..8c226de460b 100644 --- a/sdk/python/tests/test_public_api_runtime_behavior.py +++ b/sdk/python/tests/test_public_api_runtime_behavior.py @@ -14,7 +14,12 @@ TurnStatus, ) from codex_app_server.models import InitializeResponse, Notification -from codex_app_server.public_api import AsyncCodex, AsyncTurn, Codex, Turn +from codex_app_server.public_api import ( + AsyncCodex, + AsyncTurnHandle, + Codex, + TurnHandle, +) ROOT = Path(__file__).resolve().parents[1] @@ -162,10 +167,10 @@ def test_turn_stream_rejects_second_active_consumer() -> None: ) client.next_notification = notifications.popleft # type: ignore[method-assign] - first_stream = Turn(client, "thread-1", "turn-1").stream() + first_stream = TurnHandle(client, "thread-1", "turn-1").stream() assert next(first_stream).method == "item/agentMessage/delta" - second_stream = Turn(client, "thread-1", "turn-2").stream() + second_stream = TurnHandle(client, "thread-1", "turn-2").stream() with pytest.raises(RuntimeError, match="Concurrent turn consumers are not yet supported"): next(second_stream) @@ -192,10 +197,10 @@ async def fake_next_notification() -> Notification: codex._ensure_initialized = fake_ensure_initialized # type: ignore[method-assign] codex._client.next_notification = fake_next_notification # type: ignore[method-assign] - first_stream = AsyncTurn(codex, "thread-1", "turn-1").stream() + first_stream = AsyncTurnHandle(codex, "thread-1", "turn-1").stream() assert (await anext(first_stream)).method == "item/agentMessage/delta" - second_stream = AsyncTurn(codex, "thread-1", "turn-2").stream() + second_stream = AsyncTurnHandle(codex, "thread-1", "turn-2").stream() with pytest.raises(RuntimeError, match="Concurrent turn consumers are not yet supported"): await anext(second_stream) @@ -213,7 +218,7 @@ def test_turn_run_returns_completed_turn_payload() -> None: ) client.next_notification = notifications.popleft # type: ignore[method-assign] - result = Turn(client, "thread-1", "turn-1").run() + result = TurnHandle(client, "thread-1", "turn-1").run() assert result.id == "turn-1" assert result.status == TurnStatus.completed From 0769ed5b5d4d2a686680e4bb201b7c6a88f42bb5 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Mon, 16 Mar 2026 15:10:12 -0700 Subject: [PATCH 08/14] python-sdk: clarify initialize metadata and async entrypoint (2026-03-16) Normalize validated initialize metadata back onto InitializeResponse so successful metadata access exposes populated serverInfo fields even when they were derived from userAgent. Also make async lifecycle guidance explicit in the public surface by documenting async with AsyncCodex() as the preferred entrypoint and aligning the AsyncCodex metadata error message with that model. Co-authored-by: Codex --- sdk/python/docs/api-reference.md | 10 +++++++++ sdk/python/docs/faq.md | 3 +++ sdk/python/docs/getting-started.md | 3 +++ sdk/python/src/codex_app_server/public_api.py | 21 ++++++++++++++++--- .../tests/test_public_api_signatures.py | 3 +++ 5 files changed, 37 insertions(+), 3 deletions(-) diff --git a/sdk/python/docs/api-reference.md b/sdk/python/docs/api-reference.md index 42d9c74b0a4..29396b773e4 100644 --- a/sdk/python/docs/api-reference.md +++ b/sdk/python/docs/api-reference.md @@ -62,6 +62,16 @@ with Codex() as codex: AsyncCodex(config: AppServerConfig | None = None) ``` +Preferred usage: + +```python +async with AsyncCodex() as codex: + ... +``` + +`AsyncCodex` initializes lazily. Context entry is the standard path because it +ensures startup and shutdown are paired explicitly. + Properties/methods: - `metadata -> InitializeResponse` diff --git a/sdk/python/docs/faq.md b/sdk/python/docs/faq.md index db57440af74..b2c9cf3b1f4 100644 --- a/sdk/python/docs/faq.md +++ b/sdk/python/docs/faq.md @@ -17,6 +17,9 @@ Choose `run()` for most apps. Choose `stream()` for progress UIs, custom timeout - `Codex` is the sync public API. - `AsyncCodex` is an async replica of the same public API shape. +- Prefer `async with AsyncCodex()` for async code. It is the standard path for + explicit startup/shutdown, and `AsyncCodex` initializes lazily on context + entry or first awaited API use. If your app is not already async, stay with `Codex`. diff --git a/sdk/python/docs/getting-started.md b/sdk/python/docs/getting-started.md index a39a78504da..aaa6298d4ac 100644 --- a/sdk/python/docs/getting-started.md +++ b/sdk/python/docs/getting-started.md @@ -61,6 +61,9 @@ with Codex() as codex: ## 4) Async parity +Use `async with AsyncCodex()` as the normal async entrypoint. `AsyncCodex` +initializes lazily, and context entry makes startup/shutdown explicit. + ```python import asyncio from codex_app_server import AsyncCodex, TextInput diff --git a/sdk/python/src/codex_app_server/public_api.py b/sdk/python/src/codex_app_server/public_api.py index ac741a4d600..50aa3e76984 100644 --- a/sdk/python/src/codex_app_server/public_api.py +++ b/sdk/python/src/codex_app_server/public_api.py @@ -33,7 +33,7 @@ TurnStartParams, TurnSteerResponse, ) -from .models import InitializeResponse, JsonObject, Notification +from .models import InitializeResponse, JsonObject, Notification, ServerInfo @dataclass(slots=True) @@ -145,6 +145,15 @@ def _validate_initialize(payload: InitializeResponse) -> InitializeResponse: f"(user_agent={user_agent!r}, server_name={normalized_server_name!r}, server_version={normalized_server_version!r})" ) + if server is None: + payload.serverInfo = ServerInfo( + name=normalized_server_name, + version=normalized_server_version, + ) + else: + server.name = normalized_server_name + server.version = normalized_server_version + return payload @property @@ -287,7 +296,12 @@ def models(self, *, include_hidden: bool = False) -> ModelListResponse: class AsyncCodex: - """Async mirror of :class:`Codex` with matching method shapes.""" + """Async mirror of :class:`Codex`. + + Prefer ``async with AsyncCodex()`` so initialization and shutdown are + explicit and paired. The async client initializes lazily on context entry + or first awaited API use. + """ def __init__(self, config: AppServerConfig | None = None) -> None: self._client = AsyncAppServerClient(config=config) @@ -323,7 +337,8 @@ async def _ensure_initialized(self) -> None: def metadata(self) -> InitializeResponse: if self._init is None: raise RuntimeError( - "AsyncCodex is not initialized yet. Use `async with AsyncCodex()` or call an async API first." + "AsyncCodex is not initialized yet. Prefer `async with AsyncCodex()`; " + "initialization also happens on first awaited API use." ) return self._init diff --git a/sdk/python/tests/test_public_api_signatures.py b/sdk/python/tests/test_public_api_signatures.py index b7b14fe0ed8..a73d89c0cb0 100644 --- a/sdk/python/tests/test_public_api_signatures.py +++ b/sdk/python/tests/test_public_api_signatures.py @@ -200,6 +200,9 @@ def test_initialize_metadata_parses_user_agent_shape() -> None: parsed = Codex._validate_initialize(payload) assert parsed is payload assert parsed.userAgent == "codex-cli/1.2.3" + assert parsed.serverInfo is not None + assert parsed.serverInfo.name == "codex-cli" + assert parsed.serverInfo.version == "1.2.3" def test_initialize_metadata_requires_non_empty_information() -> None: From cd84a3fd01dc295d02e2ff4c5c2968e72175db5b Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Mon, 16 Mar 2026 16:19:29 -0700 Subject: [PATCH 09/14] Delete sdk/python/examples/assets/sample_scene.png --- sdk/python/examples/assets/sample_scene.png | Bin 3724 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 sdk/python/examples/assets/sample_scene.png diff --git a/sdk/python/examples/assets/sample_scene.png b/sdk/python/examples/assets/sample_scene.png deleted file mode 100644 index e4efdd429dd397a00a594112e0d6af0ec0e8e2f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3724 zcmZ`+3piBi8~@IkF$SXsEi>y$Xv0>RozZp?#w~26A=_oirGL6?D>k+?kyE0UN@Yc* z7$xo6rPP+KOvkF#vM!M*mDy)=D>1v}QW*dDoI$tUXU_ASIq&zqzxVgvzH{DhCEgyo z+8=5ogmk?;-Fy+k352kNY#b!#4=vb?(1fL4Zq9zu_n$Q_Pj6_LT>j^9;$~g*oU^8; zNuQm6@Obj$c@N#rG4L%ulJiD(4i-MgR(0uacaG&>5ZUP8{hF8Z-M#H2)p1T69gGS> z3i^6t+AkdFOs@$=*+q}FkeI;WB3v+4LnI%aT|_@-r=k(*5*C_n5RACNsN*B@`aO{b z^80kMf`k0pj0MQ8?WJP@IF{I>8*+x?O}MM-fAuih3$xp|gKbNDBzW zfD7f~F|BgII8yMyX&mr~>YNi`X$cTpfTHvjPTe&q!cX<0-9(0Ctd5ICmgV(TtB8qf zkTjz1_V8`q_J*aVB)XD+!y1oi*vTQ4ejVNC=MVl3bUV{RFjny4`Wa=GTt%;@5HYwv z$UWv%@HPxT=M&!=3QC-AoinalKwx*4$NMv*%RuU7r`I{TbTBc_D(g+$n!tqIdY3DH`Au81^l6ypL zUy89LLAr*)XkLE!WnJ$^c?QPLPeog!SB@=Q`7wMCwE;6!7-I77n;fy-gOEXXHH%sz5hxZ;MUo__G%0n3DJd3vwx9nw3n|H5vG+Ue`7h?W%ov ziWRpJV{wBq0PW39VdWVIRwK9UVi9A0M`WIHkoti$_yFf*%RdAw_##W}(ZFU__S~ON zG!V8nAel)n#)m#*Wn0T8H!60TID|>3F=~%g9QfqUDIEcUbRv$UB^V{pRSHtNjRizF zDaS4ku&%vKIr-`JW7ztIq2(IU*&aq?{&hBtt%DhG4>*)Y{i7RQna?LK z`EA)J16$jyX~jeGVAVUvUZ1G6))XNcNLq$k9EhuLW}9s=I72DJaK%(asul(RqB1TTRMXwaE7w- z$h08Qhd;`34w-G-6+vq^+mX#J$ME3Ii`}FrI#<+V)o9U0W$!nqR-S$qz7DZWS4$tZ zmgin=sU~+zBW}8+kY^>m-yHax$+MVMa%w(XChTJ3oU$c_9E0g)Cx@AMOieRCgZ;lc z`wG`jTobybk5f%aLFS7JLE6GDuO-%G&l=MoQ$2eo3cGY)>RC=%RXE#&$xA#M$Zzwz z-=+5@GrHWK&kmhXZ(mX7Vx>loM~tev8D-oVqi(3xJJr==|D7e=^D+Pch<7vWM@&@YO}^y|wL zbw%Rk9&bT7DR;2e=4MC!s;RYh8fjW0YU`a=->*{8Q-2)eCXC5KFc+iRkJ6vlpB27h@WVIIer(C^S#e?e&5S=hjTq9hGs~#Z`ip8yJ!jO zlIe(;m%&k4J|Q%N{p8oLg@zWp(A1J!5G7Ar;UZg1W#uZaC=FQKbY`!mgUTmSa%Y#z zzGMlL*Xf?adm=e0)Q)~S#w80ZZYnn_p=qpl0er>jndZMq6-$^?;G{<$8 zs=^@iknes#H!Wie>U4zrARW8RmQE+HI#mnW&y0MvU)Ap;+R@&Wm7-EUe`uU?thZgH zlAL^HJ7xIg`M89sBmXQ>Eqxs1sNxYa=y!LE$vTiTxbysOB7-|+O7t) zM5t<>Ndjw%_kN&=i%T<`q@|~DDO^W~h&m4Kw3Ll}mqT34h6?E67g>d(-R5R^Ku0fK zD)2hNSnz`z6*r9)gv^h~8lLBBrEg9q04#f&!}dA!Q!f5hkQ)S^LWNXD?)fhu$Y;;o z%-2i~_r;{rPjn!F{t7~a$~@d4tJnIEb7Y=-k&)ElFY`&j0yt0!gj?rFov6Z$*R5LBG#2>InBVmaWz|zrZQdbrDLRAdhpaORG2%%W;04ox*+?kYD zK#cF5wn{#@8R}4@SVHWl+UAW|oJmJc+vd`m)5$*fk2;GiZR5u^-HA?fQno07&@3xv z8!9Yq=%s}kP4c~NP`bbC@^?2Ql$yE7y1g1MrSMdMM*H$vS*p=xWUovL&Z}7(0lByTsl-i-tr-STAJWKYhyE!p{^FEJ)coBy6)qxBYu$Mj5pgHJ+#Ap0FYqu!M;quUc+n>wwt zB@n%{6ME}_&U=<1f~^PAUbAU+nDL%>YBoG|<0blHjw9XA$7B9(C5Agqp_5EE@Ws`G z+BARec(=&Bh@LMnirbqdQ)^CE_Ixqneaa^)-=hPR@13ZmJ)x6cWPjv!mA#$x66Qc( z=o?{qzR79gf0kfaI|lPZ?qzEYdd`6Q_fN;^DF6Z$JN@oMH6$^GlbN#^WNrdD%|jldK2^a2A6ygb%D zh+vIPJz`r-uHsPkl&=!h_MlK7y3kG#hM42OreF@UPNP!rkbywV%t+X&fAt(F~%a Date: Mon, 16 Mar 2026 16:29:52 -0700 Subject: [PATCH 10/14] python-sdk: align pinned runtime bootstrap and drop sample image (2026-03-16) - bump the repo-managed runtime bootstrap to rust-v0.116.0-alpha.1 so example and integration paths match the current SDK thread/start schema - make release artifact download more robust by retrying metadata lookups without stale auth and preferring deterministic release asset URLs - refresh generated Python artifacts and signature expectations so artifact drift tests pass on the current branch shape - replace the checked-in local image asset with a generated temporary PNG used by the examples and notebook local-image flow - add lightweight tests for pinned runtime doc drift and invalid-auth fallback in runtime metadata resolution Co-authored-by: Codex --- sdk/python/_runtime_setup.py | 73 +++++++++++-------- .../examples/08_local_image_and_text/async.py | 36 +++++---- .../examples/08_local_image_and_text/sync.py | 34 ++++----- sdk/python/examples/README.md | 4 +- sdk/python/examples/_bootstrap.py | 54 +++++++++++++- sdk/python/notebooks/sdk_walkthrough.ipynb | 29 ++++---- .../src/codex_app_server/generated/v2_all.py | 10 ++- sdk/python/src/codex_app_server/public_api.py | 16 ++++ .../test_artifact_workflow_and_binaries.py | 46 ++++++++++++ .../tests/test_public_api_signatures.py | 8 ++ .../tests/test_real_app_server_integration.py | 24 +++++- 11 files changed, 247 insertions(+), 87 deletions(-) diff --git a/sdk/python/_runtime_setup.py b/sdk/python/_runtime_setup.py index 576e5c7da71..5eb3999f4c5 100644 --- a/sdk/python/_runtime_setup.py +++ b/sdk/python/_runtime_setup.py @@ -16,7 +16,7 @@ from pathlib import Path PACKAGE_NAME = "codex-cli-bin" -PINNED_RUNTIME_VERSION = "0.115.0-alpha.11" +PINNED_RUNTIME_VERSION = "0.116.0-alpha.1" REPO_SLUG = "openai/codex" @@ -122,22 +122,53 @@ def _installed_runtime_version(python_executable: str | Path) -> str | None: def _release_metadata(version: str) -> dict[str, object]: url = f"https://api.github.com/repos/{REPO_SLUG}/releases/tags/rust-v{version}" - request = urllib.request.Request( - url, - headers=_github_api_headers("application/vnd.github+json"), - ) - try: - with urllib.request.urlopen(request) as response: - return json.load(response) - except urllib.error.HTTPError as exc: - raise RuntimeSetupError( - f"Failed to resolve release metadata for rust-v{version} from {REPO_SLUG}: " - f"{exc.code} {exc.reason}" - ) from exc + token = _github_token() + attempts = [True, False] if token is not None else [False] + last_error: urllib.error.HTTPError | None = None + + for include_auth in attempts: + headers = { + "Accept": "application/vnd.github+json", + "User-Agent": "codex-python-runtime-setup", + } + if include_auth and token is not None: + headers["Authorization"] = f"Bearer {token}" + + request = urllib.request.Request(url, headers=headers) + try: + with urllib.request.urlopen(request) as response: + return json.load(response) + except urllib.error.HTTPError as exc: + last_error = exc + if include_auth and exc.code == 401: + continue + break + + assert last_error is not None + raise RuntimeSetupError( + f"Failed to resolve release metadata for rust-v{version} from {REPO_SLUG}: " + f"{last_error.code} {last_error.reason}" + ) from last_error def _download_release_archive(version: str, temp_root: Path) -> Path: asset_name = platform_asset_name() + archive_path = temp_root / asset_name + + browser_download_url = ( + f"https://github.com/{REPO_SLUG}/releases/download/rust-v{version}/{asset_name}" + ) + request = urllib.request.Request( + browser_download_url, + headers={"User-Agent": "codex-python-runtime-setup"}, + ) + try: + with urllib.request.urlopen(request) as response, archive_path.open("wb") as fh: + shutil.copyfileobj(response, fh) + return archive_path + except urllib.error.HTTPError: + pass + metadata = _release_metadata(version) assets = metadata.get("assets") if not isinstance(assets, list): @@ -155,13 +186,9 @@ def _download_release_archive(version: str, temp_root: Path) -> Path: f"Release rust-v{version} does not contain asset {asset_name} for this platform." ) - archive_path = temp_root / asset_name api_url = asset.get("url") - browser_download_url = asset.get("browser_download_url") if not isinstance(api_url, str): api_url = None - if not isinstance(browser_download_url, str): - browser_download_url = None if api_url is not None: token = _github_token() @@ -177,18 +204,6 @@ def _download_release_archive(version: str, temp_root: Path) -> Path: except urllib.error.HTTPError: pass - if browser_download_url is not None: - request = urllib.request.Request( - browser_download_url, - headers={"User-Agent": "codex-python-runtime-setup"}, - ) - try: - with urllib.request.urlopen(request) as response, archive_path.open("wb") as fh: - shutil.copyfileobj(response, fh) - return archive_path - except urllib.error.HTTPError: - pass - if shutil.which("gh") is None: raise RuntimeSetupError( f"Unable to download {asset_name} for rust-v{version}. " diff --git a/sdk/python/examples/08_local_image_and_text/async.py b/sdk/python/examples/08_local_image_and_text/async.py index aae7a53a7b8..07f06b312db 100644 --- a/sdk/python/examples/08_local_image_and_text/async.py +++ b/sdk/python/examples/08_local_image_and_text/async.py @@ -10,6 +10,7 @@ ensure_local_sdk_src, find_turn_by_id, runtime_config, + temporary_sample_image_path, ) ensure_local_sdk_src() @@ -18,27 +19,24 @@ from codex_app_server import AsyncCodex, LocalImageInput, TextInput -IMAGE_PATH = Path(__file__).resolve().parents[1] / "assets" / "sample_scene.png" -if not IMAGE_PATH.exists(): - raise FileNotFoundError(f"Missing bundled image: {IMAGE_PATH}") - async def main() -> None: - async with AsyncCodex(config=runtime_config()) as codex: - thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) - - turn = await thread.turn( - [ - TextInput("Read this local image and summarize what you see in 2 bullets."), - LocalImageInput(str(IMAGE_PATH.resolve())), - ] - ) - result = await turn.run() - persisted = await thread.read(include_turns=True) - persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) - - print("Status:", result.status) - print(assistant_text_from_turn(persisted_turn)) + with temporary_sample_image_path() as image_path: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) + + turn = await thread.turn( + [ + TextInput("Read this generated local image and summarize the colors/layout in 2 bullets."), + LocalImageInput(str(image_path.resolve())), + ] + ) + result = await turn.run() + persisted = await thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) + + print("Status:", result.status) + print(assistant_text_from_turn(persisted_turn)) if __name__ == "__main__": diff --git a/sdk/python/examples/08_local_image_and_text/sync.py b/sdk/python/examples/08_local_image_and_text/sync.py index f09d1805842..883e05a6bcb 100644 --- a/sdk/python/examples/08_local_image_and_text/sync.py +++ b/sdk/python/examples/08_local_image_and_text/sync.py @@ -10,27 +10,25 @@ ensure_local_sdk_src, find_turn_by_id, runtime_config, + temporary_sample_image_path, ) ensure_local_sdk_src() from codex_app_server import Codex, LocalImageInput, TextInput -IMAGE_PATH = Path(__file__).resolve().parents[1] / "assets" / "sample_scene.png" -if not IMAGE_PATH.exists(): - raise FileNotFoundError(f"Missing bundled image: {IMAGE_PATH}") - -with Codex(config=runtime_config()) as codex: - thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) - - result = thread.turn( - [ - TextInput("Read this local image and summarize what you see in 2 bullets."), - LocalImageInput(str(IMAGE_PATH.resolve())), - ] - ).run() - persisted = thread.read(include_turns=True) - persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) - - print("Status:", result.status) - print(assistant_text_from_turn(persisted_turn)) +with temporary_sample_image_path() as image_path: + with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) + + result = thread.turn( + [ + TextInput("Read this generated local image and summarize the colors/layout in 2 bullets."), + LocalImageInput(str(image_path.resolve())), + ] + ).run() + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) + + print("Status:", result.status) + print(assistant_text_from_turn(persisted_turn)) diff --git a/sdk/python/examples/README.md b/sdk/python/examples/README.md index 8cd18d2c4d5..9d569b7de80 100644 --- a/sdk/python/examples/README.md +++ b/sdk/python/examples/README.md @@ -30,7 +30,7 @@ will download the matching GitHub release artifact, stage a temporary local `codex-cli-bin` package, install it into your active interpreter, and clean up the temporary files afterward. -Current pinned runtime version: `0.115.0-alpha.11` +Current pinned runtime version: `0.116.0-alpha.1` ## Run examples @@ -70,7 +70,7 @@ python examples/01_quickstart_constructor/async.py - `07_image_and_text/` - remote image URL + text multimodal turn - `08_local_image_and_text/` - - local image + text multimodal turn using bundled sample image + - local image + text multimodal turn using a generated temporary sample image - `09_async_parity/` - parity-style sync flow (see async parity in other examples) - `10_error_handling_and_retry/` diff --git a/sdk/python/examples/_bootstrap.py b/sdk/python/examples/_bootstrap.py index d15a1835cc7..00cd62a0bc3 100644 --- a/sdk/python/examples/_bootstrap.py +++ b/sdk/python/examples/_bootstrap.py @@ -1,10 +1,13 @@ from __future__ import annotations +import contextlib import importlib.util import os import sys +import tempfile +import zlib from pathlib import Path -from typing import Iterable +from typing import Iterable, Iterator _SDK_PYTHON_DIR = Path(__file__).resolve().parents[1] _SDK_PYTHON_STR = str(_SDK_PYTHON_DIR) @@ -52,6 +55,55 @@ def runtime_config(): return AppServerConfig() +def _png_chunk(chunk_type: bytes, data: bytes) -> bytes: + import struct + + payload = chunk_type + data + checksum = zlib.crc32(payload) & 0xFFFFFFFF + return struct.pack(">I", len(data)) + payload + struct.pack(">I", checksum) + + +def _generated_sample_png_bytes() -> bytes: + import struct + + width = 96 + height = 96 + top_left = (120, 180, 255) + top_right = (255, 220, 90) + bottom_left = (90, 180, 95) + bottom_right = (180, 85, 85) + + rows = bytearray() + for y in range(height): + rows.append(0) + for x in range(width): + if y < height // 2 and x < width // 2: + color = top_left + elif y < height // 2: + color = top_right + elif x < width // 2: + color = bottom_left + else: + color = bottom_right + rows.extend(color) + + header = struct.pack(">IIBBBBB", width, height, 8, 2, 0, 0, 0) + return ( + b"\x89PNG\r\n\x1a\n" + + _png_chunk(b"IHDR", header) + + _png_chunk(b"IDAT", zlib.compress(bytes(rows))) + + _png_chunk(b"IEND", b"") + ) + + +@contextlib.contextmanager +def temporary_sample_image_path() -> Iterator[Path]: + with tempfile.TemporaryDirectory(prefix="codex-python-example-image-") as temp_root: + image_path = Path(temp_root) / "generated_sample.png" + image_path.write_bytes(_generated_sample_png_bytes()) + yield image_path + + def server_label(metadata: object) -> str: server = getattr(metadata, "serverInfo", None) server_name = ((getattr(server, "name", None) or "") if server is not None else "").strip() diff --git a/sdk/python/notebooks/sdk_walkthrough.ipynb b/sdk/python/notebooks/sdk_walkthrough.ipynb index c4c59372e4e..5043d4f45fc 100644 --- a/sdk/python/notebooks/sdk_walkthrough.ipynb +++ b/sdk/python/notebooks/sdk_walkthrough.ipynb @@ -400,22 +400,19 @@ "metadata": {}, "outputs": [], "source": [ - "# Cell 7: multimodal with local image (bundled asset)\n", - "local_image_path = repo_python_dir / 'examples' / 'assets' / 'sample_scene.png'\n", - "if not local_image_path.exists():\n", - " raise FileNotFoundError(f'Missing bundled image: {local_image_path}')\n", - "\n", - "with Codex() as codex:\n", - " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", - " result = thread.turn([\n", - " TextInput('Describe this local image in 2 bullets.'),\n", - " LocalImageInput(str(local_image_path.resolve())),\n", - " ]).run()\n", - " persisted = thread.read(include_turns=True)\n", - " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", - "\n", - " print('status:', result.status)\n", - " print(assistant_text_from_turn(persisted_turn))\n" + "# Cell 7: multimodal with local image (generated temporary file)\n", + "with temporary_sample_image_path() as local_image_path:\n", + " with Codex() as codex:\n", + " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", + " result = thread.turn([\n", + " TextInput('Describe the colors and layout in this generated local image in 2 bullets.'),\n", + " LocalImageInput(str(local_image_path.resolve())),\n", + " ]).run()\n", + " persisted = thread.read(include_turns=True)\n", + " persisted_turn = find_turn_by_id(persisted.thread.turns, result.id)\n", + "\n", + " print('status:', result.status)\n", + " print(assistant_text_from_turn(persisted_turn))\n" ] }, { diff --git a/sdk/python/src/codex_app_server/generated/v2_all.py b/sdk/python/src/codex_app_server/generated/v2_all.py index e953baaac7c..2dd09454ed6 100644 --- a/sdk/python/src/codex_app_server/generated/v2_all.py +++ b/sdk/python/src/codex_app_server/generated/v2_all.py @@ -339,6 +339,7 @@ class CodexErrorInfo( class CollabAgentStatus(Enum): pending_init = "pendingInit" running = "running" + interrupted = "interrupted" completed = "completed" errored = "errored" shutdown = "shutdown" @@ -746,6 +747,7 @@ class DynamicToolSpec(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + defer_loading: Annotated[bool | None, Field(alias="deferLoading")] = None description: str input_schema: Annotated[Any, Field(alias="inputSchema")] name: str @@ -1657,7 +1659,13 @@ class PluginInterface(BaseModel): capabilities: list[str] category: str | None = None composer_icon: Annotated[AbsolutePathBuf | None, Field(alias="composerIcon")] = None - default_prompt: Annotated[str | None, Field(alias="defaultPrompt")] = None + default_prompt: Annotated[ + list[str] | None, + Field( + alias="defaultPrompt", + description="Starter prompts for the plugin. Capped at 3 entries with a maximum of 128 characters per entry.", + ), + ] = None developer_name: Annotated[str | None, Field(alias="developerName")] = None display_name: Annotated[str | None, Field(alias="displayName")] = None logo: AbsolutePathBuf | None = None diff --git a/sdk/python/src/codex_app_server/public_api.py b/sdk/python/src/codex_app_server/public_api.py index 50aa3e76984..b465c574d30 100644 --- a/sdk/python/src/codex_app_server/public_api.py +++ b/sdk/python/src/codex_app_server/public_api.py @@ -168,6 +168,7 @@ def thread_start( self, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, base_instructions: str | None = None, config: JsonObject | None = None, cwd: str | None = None, @@ -182,6 +183,7 @@ def thread_start( ) -> Thread: params = ThreadStartParams( approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, base_instructions=base_instructions, config=config, cwd=cwd, @@ -226,6 +228,7 @@ def thread_resume( thread_id: str, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, base_instructions: str | None = None, config: JsonObject | None = None, cwd: str | None = None, @@ -239,6 +242,7 @@ def thread_resume( params = ThreadResumeParams( thread_id=thread_id, approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, base_instructions=base_instructions, config=config, cwd=cwd, @@ -257,6 +261,7 @@ def thread_fork( thread_id: str, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, base_instructions: str | None = None, config: JsonObject | None = None, cwd: str | None = None, @@ -270,6 +275,7 @@ def thread_fork( params = ThreadForkParams( thread_id=thread_id, approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, base_instructions=base_instructions, config=config, cwd=cwd, @@ -352,6 +358,7 @@ async def thread_start( self, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, base_instructions: str | None = None, config: JsonObject | None = None, cwd: str | None = None, @@ -367,6 +374,7 @@ async def thread_start( await self._ensure_initialized() params = ThreadStartParams( approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, base_instructions=base_instructions, config=config, cwd=cwd, @@ -412,6 +420,7 @@ async def thread_resume( thread_id: str, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, base_instructions: str | None = None, config: JsonObject | None = None, cwd: str | None = None, @@ -426,6 +435,7 @@ async def thread_resume( params = ThreadResumeParams( thread_id=thread_id, approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, base_instructions=base_instructions, config=config, cwd=cwd, @@ -444,6 +454,7 @@ async def thread_fork( thread_id: str, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, base_instructions: str | None = None, config: JsonObject | None = None, cwd: str | None = None, @@ -458,6 +469,7 @@ async def thread_fork( params = ThreadForkParams( thread_id=thread_id, approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, base_instructions=base_instructions, config=config, cwd=cwd, @@ -497,6 +509,7 @@ def turn( input: Input, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, cwd: str | None = None, effort: ReasoningEffort | None = None, model: str | None = None, @@ -511,6 +524,7 @@ def turn( thread_id=self.id, input=wire_input, approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, cwd=cwd, effort=effort, model=model, @@ -545,6 +559,7 @@ async def turn( input: Input, *, approval_policy: AskForApproval | None = None, + approvals_reviewer: ApprovalsReviewer | None = None, cwd: str | None = None, effort: ReasoningEffort | None = None, model: str | None = None, @@ -560,6 +575,7 @@ async def turn( thread_id=self.id, input=wire_input, approval_policy=approval_policy, + approvals_reviewer=approvals_reviewer, cwd=cwd, effort=effort, model=model, diff --git a/sdk/python/tests/test_artifact_workflow_and_binaries.py b/sdk/python/tests/test_artifact_workflow_and_binaries.py index 938de05e28a..b19dc745a30 100644 --- a/sdk/python/tests/test_artifact_workflow_and_binaries.py +++ b/sdk/python/tests/test_artifact_workflow_and_binaries.py @@ -2,9 +2,11 @@ import ast import importlib.util +import io import json import sys import tomllib +import urllib.error from pathlib import Path import pytest @@ -23,6 +25,17 @@ def _load_update_script_module(): return module +def _load_runtime_setup_module(): + runtime_setup_path = ROOT / "_runtime_setup.py" + spec = importlib.util.spec_from_file_location("_runtime_setup", runtime_setup_path) + if spec is None or spec.loader is None: + raise AssertionError(f"Failed to load runtime setup module: {runtime_setup_path}") + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) + return module + + def test_generation_has_single_maintenance_entrypoint_script() -> None: scripts = sorted(p.name for p in (ROOT / "scripts").glob("*.py")) assert scripts == ["update_sdk_artifacts.py"] @@ -146,6 +159,39 @@ def test_runtime_package_template_has_no_checked_in_binaries() -> None: ) == ["__init__.py"] +def test_examples_readme_matches_pinned_runtime_version() -> None: + runtime_setup = _load_runtime_setup_module() + readme = (ROOT / "examples" / "README.md").read_text() + assert ( + f"Current pinned runtime version: `{runtime_setup.pinned_runtime_version()}`" + in readme + ) + + +def test_release_metadata_retries_without_invalid_auth(monkeypatch: pytest.MonkeyPatch) -> None: + runtime_setup = _load_runtime_setup_module() + authorizations: list[str | None] = [] + + def fake_urlopen(request): + authorization = request.headers.get("Authorization") + authorizations.append(authorization) + if authorization is not None: + raise urllib.error.HTTPError( + request.full_url, + 401, + "Unauthorized", + hdrs=None, + fp=None, + ) + return io.StringIO('{"assets": []}') + + monkeypatch.setenv("GH_TOKEN", "invalid-token") + monkeypatch.setattr(runtime_setup.urllib.request, "urlopen", fake_urlopen) + + assert runtime_setup._release_metadata("1.2.3") == {"assets": []} + assert authorizations == ["Bearer invalid-token", None] + + def test_runtime_package_is_wheel_only_and_builds_platform_specific_wheels() -> None: pyproject = tomllib.loads( (ROOT.parent / "python-runtime" / "pyproject.toml").read_text() diff --git a/sdk/python/tests/test_public_api_signatures.py b/sdk/python/tests/test_public_api_signatures.py index a73d89c0cb0..c8bd23c351e 100644 --- a/sdk/python/tests/test_public_api_signatures.py +++ b/sdk/python/tests/test_public_api_signatures.py @@ -40,6 +40,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: expected = { Codex.thread_start: [ "approval_policy", + "approvals_reviewer", "base_instructions", "config", "cwd", @@ -64,6 +65,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: ], Codex.thread_resume: [ "approval_policy", + "approvals_reviewer", "base_instructions", "config", "cwd", @@ -76,6 +78,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: ], Codex.thread_fork: [ "approval_policy", + "approvals_reviewer", "base_instructions", "config", "cwd", @@ -88,6 +91,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: ], Thread.turn: [ "approval_policy", + "approvals_reviewer", "cwd", "effort", "model", @@ -99,6 +103,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: ], AsyncCodex.thread_start: [ "approval_policy", + "approvals_reviewer", "base_instructions", "config", "cwd", @@ -123,6 +128,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: ], AsyncCodex.thread_resume: [ "approval_policy", + "approvals_reviewer", "base_instructions", "config", "cwd", @@ -135,6 +141,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: ], AsyncCodex.thread_fork: [ "approval_policy", + "approvals_reviewer", "base_instructions", "config", "cwd", @@ -147,6 +154,7 @@ def test_generated_public_signatures_are_snake_case_and_typed() -> None: ], AsyncThread.turn: [ "approval_policy", + "approvals_reviewer", "cwd", "effort", "model", diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py index 463190b81ad..9e7ad237cfc 100644 --- a/sdk/python/tests/test_real_app_server_integration.py +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -133,6 +133,24 @@ def _run_python( ) +def _runtime_compatibility_hint( + runtime_env: PreparedRuntimeEnv, + *, + stdout: str, + stderr: str, +) -> str: + combined = f"{stdout}\n{stderr}" + if "ThreadStartResponse" in combined and "approvalsReviewer" in combined: + return ( + "\nCompatibility hint:\n" + f"Pinned runtime {runtime_env.runtime_version} returned a thread/start payload " + "that is older than the current SDK schema and is missing " + "`approvalsReviewer`. Bump `sdk/python/_runtime_setup.py` to a matching " + "released runtime version.\n" + ) + return "" + + def _run_json_python( runtime_env: PreparedRuntimeEnv, source: str, @@ -142,7 +160,10 @@ def _run_json_python( ) -> dict[str, object]: result = _run_python(runtime_env, source, cwd=cwd, timeout_s=timeout_s) assert result.returncode == 0, ( - f"Python snippet failed.\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + "Python snippet failed.\n" + f"STDOUT:\n{result.stdout}\n" + f"STDERR:\n{result.stderr}" + f"{_runtime_compatibility_hint(runtime_env, stdout=result.stdout, stderr=result.stderr)}" ) return json.loads(result.stdout) @@ -389,6 +410,7 @@ def test_real_examples_run_and_assert( f"Example failed: {folder}/{script}\n" f"STDOUT:\n{result.stdout}\n" f"STDERR:\n{result.stderr}" + f"{_runtime_compatibility_hint(runtime_env, stdout=result.stdout, stderr=result.stderr)}" ) out = result.stdout From 199b74248b7fb4612f6dbfe047e5753464df3bbf Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Mon, 16 Mar 2026 22:51:52 -0700 Subject: [PATCH 11/14] python-sdk: split stream and control examples (2026-03-16) - make 03_turn_stream_events a clean streaming example with curated event output - add 14_turn_controls as a separate steer and interrupt demo with concise summaries - update the notebook and runtime-backed example assertions to match the new example shapes Co-authored-by: Codex --- .../examples/03_turn_stream_events/async.py | 53 +++++++++----- .../examples/03_turn_stream_events/sync.py | 53 +++++++++----- sdk/python/examples/14_turn_controls/async.py | 71 +++++++++++++++++++ sdk/python/examples/14_turn_controls/sync.py | 63 ++++++++++++++++ sdk/python/examples/README.md | 4 +- sdk/python/notebooks/sdk_walkthrough.ipynb | 55 ++++++++++---- .../tests/test_real_app_server_integration.py | 8 ++- 7 files changed, 256 insertions(+), 51 deletions(-) create mode 100644 sdk/python/examples/14_turn_controls/async.py create mode 100644 sdk/python/examples/14_turn_controls/sync.py diff --git a/sdk/python/examples/03_turn_stream_events/async.py b/sdk/python/examples/03_turn_stream_events/async.py index ea8165bccde..33509ffec35 100644 --- a/sdk/python/examples/03_turn_stream_events/async.py +++ b/sdk/python/examples/03_turn_stream_events/async.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -17,26 +22,40 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) - turn = await thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) - - # Best effort controls: models can finish quickly, so races are expected. - try: - _ = await turn.steer(TextInput("Keep it brief and stop after 20 numbers.")) - print("steer: sent") - except Exception as exc: - print("steer: skipped", type(exc).__name__) - - try: - _ = await turn.interrupt() - print("interrupt: sent") - except Exception as exc: - print("interrupt: skipped", type(exc).__name__) + turn = await thread.turn(TextInput("Explain SIMD in 3 short bullets.")) event_count = 0 + saw_started = False + saw_delta = False + completed_status = "unknown" + async for event in turn.stream(): event_count += 1 - print(event.method, event.payload) - + if event.method == "turn/started": + saw_started = True + print("stream.started") + continue + if event.method == "item/agentMessage/delta": + delta = getattr(event.payload, "delta", "") + if delta: + if not saw_delta: + print("assistant> ", end="", flush=True) + print(delta, end="", flush=True) + saw_delta = True + continue + if event.method == "turn/completed": + completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status)) + + if saw_delta: + print() + else: + persisted = await thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, turn.id) + final_text = assistant_text_from_turn(persisted_turn).strip() or "[no assistant text]" + print("assistant>", final_text) + + print("stream.started.seen:", saw_started) + print("stream.completed:", completed_status) print("events.count:", event_count) diff --git a/sdk/python/examples/03_turn_stream_events/sync.py b/sdk/python/examples/03_turn_stream_events/sync.py index e9fb908c52b..d458e171fa1 100644 --- a/sdk/python/examples/03_turn_stream_events/sync.py +++ b/sdk/python/examples/03_turn_stream_events/sync.py @@ -5,7 +5,12 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + find_turn_by_id, + runtime_config, +) ensure_local_sdk_src() @@ -13,24 +18,38 @@ with Codex(config=runtime_config()) as codex: thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) - turn = thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) - - # Best effort controls: models can finish quickly, so races are expected. - try: - _ = turn.steer(TextInput("Keep it brief and stop after 20 numbers.")) - print("steer: sent") - except Exception as exc: - print("steer: skipped", type(exc).__name__) - - try: - _ = turn.interrupt() - print("interrupt: sent") - except Exception as exc: - print("interrupt: skipped", type(exc).__name__) + turn = thread.turn(TextInput("Explain SIMD in 3 short bullets.")) event_count = 0 + saw_started = False + saw_delta = False + completed_status = "unknown" + for event in turn.stream(): event_count += 1 - print(event.method, event.payload) - + if event.method == "turn/started": + saw_started = True + print("stream.started") + continue + if event.method == "item/agentMessage/delta": + delta = getattr(event.payload, "delta", "") + if delta: + if not saw_delta: + print("assistant> ", end="", flush=True) + print(delta, end="", flush=True) + saw_delta = True + continue + if event.method == "turn/completed": + completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status)) + + if saw_delta: + print() + else: + persisted = thread.read(include_turns=True) + persisted_turn = find_turn_by_id(persisted.thread.turns, turn.id) + final_text = assistant_text_from_turn(persisted_turn).strip() or "[no assistant text]" + print("assistant>", final_text) + + print("stream.started.seen:", saw_started) + print("stream.completed:", completed_status) print("events.count:", event_count) diff --git a/sdk/python/examples/14_turn_controls/async.py b/sdk/python/examples/14_turn_controls/async.py new file mode 100644 index 00000000000..e180482e338 --- /dev/null +++ b/sdk/python/examples/14_turn_controls/async.py @@ -0,0 +1,71 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + runtime_config, +) + +ensure_local_sdk_src() + +import asyncio + +from codex_app_server import AsyncCodex, TextInput + + +async def main() -> None: + async with AsyncCodex(config=runtime_config()) as codex: + thread = await codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) + steer_turn = await thread.turn(TextInput("Count from 1 to 40 with commas, then one summary sentence.")) + steer_result = "sent" + try: + _ = await steer_turn.steer(TextInput("Keep it brief and stop after 10 numbers.")) + except Exception as exc: + steer_result = f"skipped {type(exc).__name__}" + + steer_event_count = 0 + steer_completed_status = "unknown" + steer_completed_turn = None + async for event in steer_turn.stream(): + steer_event_count += 1 + if event.method == "turn/completed": + steer_completed_turn = event.payload.turn + steer_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status)) + + steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]" + + interrupt_turn = await thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) + interrupt_result = "sent" + try: + _ = await interrupt_turn.interrupt() + except Exception as exc: + interrupt_result = f"skipped {type(exc).__name__}" + + interrupt_event_count = 0 + interrupt_completed_status = "unknown" + interrupt_completed_turn = None + async for event in interrupt_turn.stream(): + interrupt_event_count += 1 + if event.method == "turn/completed": + interrupt_completed_turn = event.payload.turn + interrupt_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status)) + + interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]" + + print("steer.result:", steer_result) + print("steer.final.status:", steer_completed_status) + print("steer.events.count:", steer_event_count) + print("steer.assistant.preview:", steer_preview) + print("interrupt.result:", interrupt_result) + print("interrupt.final.status:", interrupt_completed_status) + print("interrupt.events.count:", interrupt_event_count) + print("interrupt.assistant.preview:", interrupt_preview) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/python/examples/14_turn_controls/sync.py b/sdk/python/examples/14_turn_controls/sync.py new file mode 100644 index 00000000000..9e9de4dc1ee --- /dev/null +++ b/sdk/python/examples/14_turn_controls/sync.py @@ -0,0 +1,63 @@ +import sys +from pathlib import Path + +_EXAMPLES_ROOT = Path(__file__).resolve().parents[1] +if str(_EXAMPLES_ROOT) not in sys.path: + sys.path.insert(0, str(_EXAMPLES_ROOT)) + +from _bootstrap import ( + assistant_text_from_turn, + ensure_local_sdk_src, + runtime_config, +) + +ensure_local_sdk_src() + +from codex_app_server import Codex, TextInput + +with Codex(config=runtime_config()) as codex: + thread = codex.thread_start(model="gpt-5.4", config={"model_reasoning_effort": "high"}) + steer_turn = thread.turn(TextInput("Count from 1 to 40 with commas, then one summary sentence.")) + steer_result = "sent" + try: + _ = steer_turn.steer(TextInput("Keep it brief and stop after 10 numbers.")) + except Exception as exc: + steer_result = f"skipped {type(exc).__name__}" + + steer_event_count = 0 + steer_completed_status = "unknown" + steer_completed_turn = None + for event in steer_turn.stream(): + steer_event_count += 1 + if event.method == "turn/completed": + steer_completed_turn = event.payload.turn + steer_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status)) + + steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or "[no assistant text]" + + interrupt_turn = thread.turn(TextInput("Count from 1 to 200 with commas, then one summary sentence.")) + interrupt_result = "sent" + try: + _ = interrupt_turn.interrupt() + except Exception as exc: + interrupt_result = f"skipped {type(exc).__name__}" + + interrupt_event_count = 0 + interrupt_completed_status = "unknown" + interrupt_completed_turn = None + for event in interrupt_turn.stream(): + interrupt_event_count += 1 + if event.method == "turn/completed": + interrupt_completed_turn = event.payload.turn + interrupt_completed_status = getattr(event.payload.turn.status, "value", str(event.payload.turn.status)) + + interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or "[no assistant text]" + + print("steer.result:", steer_result) + print("steer.final.status:", steer_completed_status) + print("steer.events.count:", steer_event_count) + print("steer.assistant.preview:", steer_preview) + print("interrupt.result:", interrupt_result) + print("interrupt.final.status:", interrupt_completed_status) + print("interrupt.events.count:", interrupt_event_count) + print("interrupt.assistant.preview:", interrupt_preview) diff --git a/sdk/python/examples/README.md b/sdk/python/examples/README.md index 9d569b7de80..d7c38adc276 100644 --- a/sdk/python/examples/README.md +++ b/sdk/python/examples/README.md @@ -60,7 +60,7 @@ python examples/01_quickstart_constructor/async.py - `02_turn_run/` - inspect full turn output fields - `03_turn_stream_events/` - - stream and print raw notifications + - stream a turn with a small curated event view - `04_models_and_metadata/` - read server metadata and model list - `05_existing_thread/` @@ -81,3 +81,5 @@ python examples/01_quickstart_constructor/async.py - one turn using most optional `turn(...)` params (sync + async) - `13_model_select_and_turn_params/` - list models, pick highest model + highest supported reasoning effort, run turns, print message and usage +- `14_turn_controls/` + - separate best-effort `steer()` and `interrupt()` demos with concise summaries diff --git a/sdk/python/notebooks/sdk_walkthrough.ipynb b/sdk/python/notebooks/sdk_walkthrough.ipynb index 5043d4f45fc..4a7ac5c1c3b 100644 --- a/sdk/python/notebooks/sdk_walkthrough.ipynb +++ b/sdk/python/notebooks/sdk_walkthrough.ipynb @@ -509,33 +509,58 @@ "metadata": {}, "outputs": [], "source": [ - "# Cell 10: async stream + steer + interrupt (best effort)\n", + "# Cell 10: async turn controls (best effort steer + interrupt)\n", "import asyncio\n", "\n", "\n", "async def async_stream_demo():\n", " async with AsyncCodex() as codex:\n", " thread = await codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", - " turn = await thread.turn(TextInput('Count from 1 to 200 with commas, then one summary sentence.'))\n", + " steer_turn = await thread.turn(TextInput('Count from 1 to 40 with commas, then one summary sentence.'))\n", "\n", + " steer_result = 'sent'\n", " try:\n", - " _ = await turn.steer(TextInput('Keep it brief and stop after 20 numbers.'))\n", - " print('steer: sent')\n", + " _ = await steer_turn.steer(TextInput('Keep it brief and stop after 10 numbers.'))\n", " except Exception as e:\n", - " print('steer: skipped', type(e).__name__)\n", + " steer_result = f'skipped {type(e).__name__}'\n", "\n", - " try:\n", - " _ = await turn.interrupt()\n", - " print('interrupt: sent')\n", - " except Exception as e:\n", - " print('interrupt: skipped', type(e).__name__)\n", + " steer_event_count = 0\n", + " steer_completed_status = 'unknown'\n", + " steer_completed_turn = None\n", + " async for event in steer_turn.stream():\n", + " steer_event_count += 1\n", + " if event.method == 'turn/completed':\n", + " steer_completed_turn = event.payload.turn\n", + " steer_completed_status = getattr(event.payload.turn.status, 'value', str(event.payload.turn.status))\n", "\n", - " event_count = 0\n", - " async for event in turn.stream():\n", - " event_count += 1\n", - " print(event.method, event.payload)\n", + " steer_preview = assistant_text_from_turn(steer_completed_turn).strip() or '[no assistant text]'\n", "\n", - " print('events.count:', event_count)\n", + " interrupt_turn = await thread.turn(TextInput('Count from 1 to 200 with commas, then one summary sentence.'))\n", + " interrupt_result = 'sent'\n", + " try:\n", + " _ = await interrupt_turn.interrupt()\n", + " except Exception as e:\n", + " interrupt_result = f'skipped {type(e).__name__}'\n", + "\n", + " interrupt_event_count = 0\n", + " interrupt_completed_status = 'unknown'\n", + " interrupt_completed_turn = None\n", + " async for event in interrupt_turn.stream():\n", + " interrupt_event_count += 1\n", + " if event.method == 'turn/completed':\n", + " interrupt_completed_turn = event.payload.turn\n", + " interrupt_completed_status = getattr(event.payload.turn.status, 'value', str(event.payload.turn.status))\n", + "\n", + " interrupt_preview = assistant_text_from_turn(interrupt_completed_turn).strip() or '[no assistant text]'\n", + "\n", + " print('steer.result:', steer_result)\n", + " print('steer.final.status:', steer_completed_status)\n", + " print('steer.events.count:', steer_event_count)\n", + " print('steer.assistant.preview:', steer_preview)\n", + " print('interrupt.result:', interrupt_result)\n", + " print('interrupt.final.status:', interrupt_completed_status)\n", + " print('interrupt.events.count:', interrupt_event_count)\n", + " print('interrupt.assistant.preview:', interrupt_preview)\n", "\n", "\n", "await async_stream_demo()\n" diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py index 9e7ad237cfc..0594cbd3496 100644 --- a/sdk/python/tests/test_real_app_server_integration.py +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -55,6 +55,8 @@ ("12_turn_params_kitchen_sink", "async.py"), ("13_model_select_and_turn_params", "sync.py"), ("13_model_select_and_turn_params", "async.py"), + ("14_turn_controls", "sync.py"), + ("14_turn_controls", "async.py"), ] @@ -422,7 +424,8 @@ def test_real_examples_run_and_assert( assert "thread_id:" in out and "turn_id:" in out and "status:" in out assert "persisted.items.count:" in out elif folder == "03_turn_stream_events": - assert "turn/completed" in out + assert "stream.completed:" in out + assert "assistant>" in out elif folder == "04_models_and_metadata": assert "models.count:" in out assert "server_name=None" not in out @@ -443,3 +446,6 @@ def test_real_examples_run_and_assert( assert "Status:" in out and "Items:" in out elif folder == "13_model_select_and_turn_params": assert "selected.model:" in out and "agent.message.params:" in out and "items.params:" in out + elif folder == "14_turn_controls": + assert "steer.result:" in out and "steer.final.status:" in out + assert "interrupt.result:" in out and "interrupt.final.status:" in out From c45ff3a05542758bf30f6c3bff851f685ed0747c Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Tue, 17 Mar 2026 14:13:03 -0700 Subject: [PATCH 12/14] python-sdk: rename public_api module to api (2026-03-17) Co-authored-by: Codex --- sdk/python/scripts/update_sdk_artifacts.py | 2 +- sdk/python/src/codex_app_server/__init__.py | 2 +- sdk/python/src/codex_app_server/{public_api.py => api.py} | 0 sdk/python/tests/test_contract_generation.py | 2 +- sdk/python/tests/test_public_api_runtime_behavior.py | 4 ++-- sdk/python/tests/test_public_api_signatures.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) rename sdk/python/src/codex_app_server/{public_api.py => api.py} (100%) diff --git a/sdk/python/scripts/update_sdk_artifacts.py b/sdk/python/scripts/update_sdk_artifacts.py index 84bcffa340f..6685fd09990 100755 --- a/sdk/python/scripts/update_sdk_artifacts.py +++ b/sdk/python/scripts/update_sdk_artifacts.py @@ -835,7 +835,7 @@ def _render_async_thread_block( def generate_public_api_flat_methods() -> None: src_dir = sdk_root() / "src" - public_api_path = src_dir / "codex_app_server" / "public_api.py" + public_api_path = src_dir / "codex_app_server" / "api.py" if not public_api_path.exists(): # PR2 can run codegen before the ergonomic public API layer is added. return diff --git a/sdk/python/src/codex_app_server/__init__.py b/sdk/python/src/codex_app_server/__init__.py index 208e1a3d85b..91f334df8cf 100644 --- a/sdk/python/src/codex_app_server/__init__.py +++ b/sdk/python/src/codex_app_server/__init__.py @@ -37,7 +37,7 @@ TurnSteerParams, ) from .models import InitializeResponse -from .public_api import ( +from .api import ( AsyncCodex, AsyncThread, AsyncTurnHandle, diff --git a/sdk/python/src/codex_app_server/public_api.py b/sdk/python/src/codex_app_server/api.py similarity index 100% rename from sdk/python/src/codex_app_server/public_api.py rename to sdk/python/src/codex_app_server/api.py diff --git a/sdk/python/tests/test_contract_generation.py b/sdk/python/tests/test_contract_generation.py index ae926e4817b..bb5ec18bbc2 100644 --- a/sdk/python/tests/test_contract_generation.py +++ b/sdk/python/tests/test_contract_generation.py @@ -9,7 +9,7 @@ GENERATED_TARGETS = [ Path("src/codex_app_server/generated/notification_registry.py"), Path("src/codex_app_server/generated/v2_all.py"), - Path("src/codex_app_server/public_api.py"), + Path("src/codex_app_server/api.py"), ] diff --git a/sdk/python/tests/test_public_api_runtime_behavior.py b/sdk/python/tests/test_public_api_runtime_behavior.py index 8c226de460b..dfddd31968c 100644 --- a/sdk/python/tests/test_public_api_runtime_behavior.py +++ b/sdk/python/tests/test_public_api_runtime_behavior.py @@ -6,7 +6,7 @@ import pytest -import codex_app_server.public_api as public_api_module +import codex_app_server.api as public_api_module from codex_app_server.client import AppServerClient from codex_app_server.generated.v2_all import ( AgentMessageDeltaNotification, @@ -14,7 +14,7 @@ TurnStatus, ) from codex_app_server.models import InitializeResponse, Notification -from codex_app_server.public_api import ( +from codex_app_server.api import ( AsyncCodex, AsyncTurnHandle, Codex, diff --git a/sdk/python/tests/test_public_api_signatures.py b/sdk/python/tests/test_public_api_signatures.py index c8bd23c351e..4ac051c03bd 100644 --- a/sdk/python/tests/test_public_api_signatures.py +++ b/sdk/python/tests/test_public_api_signatures.py @@ -6,7 +6,7 @@ from codex_app_server import AppServerConfig from codex_app_server.models import InitializeResponse -from codex_app_server.public_api import AsyncCodex, AsyncThread, Codex, Thread +from codex_app_server.api import AsyncCodex, AsyncThread, Codex, Thread def _keyword_only_names(fn: object) -> list[str]: From 3fba6cf3970474f4eb388a2e9a9f3942d2fb28a6 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Tue, 17 Mar 2026 15:40:55 -0700 Subject: [PATCH 13/14] python-sdk: polish example output contracts (2026-03-17) Co-authored-by: Codex --- .../examples/04_models_and_metadata/async.py | 10 +++--- .../examples/04_models_and_metadata/sync.py | 8 ++--- sdk/python/examples/11_cli_mini_app/async.py | 4 +-- sdk/python/examples/11_cli_mini_app/sync.py | 4 +-- .../12_turn_params_kitchen_sink/async.py | 32 +++++++++++-------- .../12_turn_params_kitchen_sink/sync.py | 30 +++++++++-------- sdk/python/examples/README.md | 4 +-- .../tests/test_real_app_server_integration.py | 21 +++++++++--- 8 files changed, 65 insertions(+), 48 deletions(-) diff --git a/sdk/python/examples/04_models_and_metadata/async.py b/sdk/python/examples/04_models_and_metadata/async.py index 9686eec95dd..e434b432185 100644 --- a/sdk/python/examples/04_models_and_metadata/async.py +++ b/sdk/python/examples/04_models_and_metadata/async.py @@ -5,7 +5,7 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ensure_local_sdk_src, runtime_config, server_label ensure_local_sdk_src() @@ -16,12 +16,10 @@ async def main() -> None: async with AsyncCodex(config=runtime_config()) as codex: - print("metadata:", codex.metadata) - - models = await codex.models(include_hidden=True) + print("server:", server_label(codex.metadata)) + models = await codex.models() print("models.count:", len(models.data)) - if models.data: - print("first model id:", models.data[0].id) + print("models:", ", ".join(model.id for model in models.data[:5]) or "[none]") if __name__ == "__main__": diff --git a/sdk/python/examples/04_models_and_metadata/sync.py b/sdk/python/examples/04_models_and_metadata/sync.py index 800e133db64..66c33548ce6 100644 --- a/sdk/python/examples/04_models_and_metadata/sync.py +++ b/sdk/python/examples/04_models_and_metadata/sync.py @@ -5,16 +5,14 @@ if str(_EXAMPLES_ROOT) not in sys.path: sys.path.insert(0, str(_EXAMPLES_ROOT)) -from _bootstrap import ensure_local_sdk_src, runtime_config +from _bootstrap import ensure_local_sdk_src, runtime_config, server_label ensure_local_sdk_src() from codex_app_server import Codex with Codex(config=runtime_config()) as codex: - print("metadata:", codex.metadata) - + print("server:", server_label(codex.metadata)) models = codex.models() print("models.count:", len(models.data)) - if models.data: - print("first model id:", models.data[0].id) + print("models:", ", ".join(model.id for model in models.data[:5]) or "[none]") diff --git a/sdk/python/examples/11_cli_mini_app/async.py b/sdk/python/examples/11_cli_mini_app/async.py index 489dc51203f..4216cf78204 100644 --- a/sdk/python/examples/11_cli_mini_app/async.py +++ b/sdk/python/examples/11_cli_mini_app/async.py @@ -34,8 +34,8 @@ def _format_usage(usage: object | None) -> str: return ( "usage>\n" - f" last: input={last.inputTokens} output={last.outputTokens} reasoning={last.reasoningOutputTokens} total={last.totalTokens} cached={last.cachedInputTokens}\n" - f" total: input={total.inputTokens} output={total.outputTokens} reasoning={total.reasoningOutputTokens} total={total.totalTokens} cached={total.cachedInputTokens}" + f" last: input={last.input_tokens} output={last.output_tokens} reasoning={last.reasoning_output_tokens} total={last.total_tokens} cached={last.cached_input_tokens}\n" + f" total: input={total.input_tokens} output={total.output_tokens} reasoning={total.reasoning_output_tokens} total={total.total_tokens} cached={total.cached_input_tokens}" ) diff --git a/sdk/python/examples/11_cli_mini_app/sync.py b/sdk/python/examples/11_cli_mini_app/sync.py index 9aecca414f0..e961cfbcc3f 100644 --- a/sdk/python/examples/11_cli_mini_app/sync.py +++ b/sdk/python/examples/11_cli_mini_app/sync.py @@ -34,8 +34,8 @@ def _format_usage(usage: object | None) -> str: return ( "usage>\n" - f" last: input={last.inputTokens} output={last.outputTokens} reasoning={last.reasoningOutputTokens} total={last.totalTokens} cached={last.cachedInputTokens}\n" - f" total: input={total.inputTokens} output={total.outputTokens} reasoning={total.reasoningOutputTokens} total={total.totalTokens} cached={total.cachedInputTokens}" + f" last: input={last.input_tokens} output={last.output_tokens} reasoning={last.reasoning_output_tokens} total={last.total_tokens} cached={last.cached_input_tokens}\n" + f" total: input={total.input_tokens} output={total.output_tokens} reasoning={total.reasoning_output_tokens} total={total.total_tokens} cached={total.cached_input_tokens}" ) diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/async.py b/sdk/python/examples/12_turn_params_kitchen_sink/async.py index 12b36fdbf7a..88a24535c22 100644 --- a/sdk/python/examples/12_turn_params_kitchen_sink/async.py +++ b/sdk/python/examples/12_turn_params_kitchen_sink/async.py @@ -1,3 +1,4 @@ +import json import sys from pathlib import Path @@ -20,9 +21,7 @@ AskForApproval, AsyncCodex, Personality, - ReasoningEffort, ReasoningSummary, - SandboxPolicy, TextInput, ) @@ -39,12 +38,6 @@ "additionalProperties": False, } -SANDBOX_POLICY = SandboxPolicy.model_validate( - { - "type": "readOnly", - "access": {"type": "fullAccess"}, - } -) SUMMARY = ReasoningSummary.model_validate("concise") PROMPT = ( @@ -61,20 +54,33 @@ async def main() -> None: turn = await thread.turn( TextInput(PROMPT), approval_policy=APPROVAL_POLICY, - cwd=str(Path.cwd()), - effort=ReasoningEffort.medium, - model="gpt-5.4", output_schema=OUTPUT_SCHEMA, personality=Personality.pragmatic, - sandbox_policy=SANDBOX_POLICY, summary=SUMMARY, ) result = await turn.run() persisted = await thread.read(include_turns=True) persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) + structured_text = assistant_text_from_turn(persisted_turn).strip() + try: + structured = json.loads(structured_text) + except json.JSONDecodeError as exc: + raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc + + summary = structured.get("summary") + actions = structured.get("actions") + if not isinstance(summary, str) or not isinstance(actions, list) or not all( + isinstance(action, str) for action in actions + ): + raise RuntimeError( + f"Expected structured output with string summary/actions, got: {structured!r}" + ) print("Status:", result.status) - print("Text:", assistant_text_from_turn(persisted_turn)) + print("summary:", summary) + print("actions:") + for action in actions: + print("-", action) print("Items:", 0 if persisted_turn is None else len(persisted_turn.items or [])) diff --git a/sdk/python/examples/12_turn_params_kitchen_sink/sync.py b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py index f685917d2b4..e4095c8ec96 100644 --- a/sdk/python/examples/12_turn_params_kitchen_sink/sync.py +++ b/sdk/python/examples/12_turn_params_kitchen_sink/sync.py @@ -1,3 +1,4 @@ +import json import sys from pathlib import Path @@ -18,9 +19,7 @@ AskForApproval, Codex, Personality, - ReasoningEffort, ReasoningSummary, - SandboxPolicy, TextInput, ) @@ -37,12 +36,6 @@ "additionalProperties": False, } -SANDBOX_POLICY = SandboxPolicy.model_validate( - { - "type": "readOnly", - "access": {"type": "fullAccess"}, - } -) SUMMARY = ReasoningSummary.model_validate("concise") PROMPT = ( @@ -57,18 +50,29 @@ turn = thread.turn( TextInput(PROMPT), approval_policy=APPROVAL_POLICY, - cwd=str(Path.cwd()), - effort=ReasoningEffort.medium, - model="gpt-5.4", output_schema=OUTPUT_SCHEMA, personality=Personality.pragmatic, - sandbox_policy=SANDBOX_POLICY, summary=SUMMARY, ) result = turn.run() persisted = thread.read(include_turns=True) persisted_turn = find_turn_by_id(persisted.thread.turns, result.id) + structured_text = assistant_text_from_turn(persisted_turn).strip() + try: + structured = json.loads(structured_text) + except json.JSONDecodeError as exc: + raise RuntimeError(f"Expected JSON matching OUTPUT_SCHEMA, got: {structured_text!r}") from exc + + summary = structured.get("summary") + actions = structured.get("actions") + if not isinstance(summary, str) or not isinstance(actions, list) or not all( + isinstance(action, str) for action in actions + ): + raise RuntimeError(f"Expected structured output with string summary/actions, got: {structured!r}") print("Status:", result.status) - print("Text:", assistant_text_from_turn(persisted_turn)) + print("summary:", summary) + print("actions:") + for action in actions: + print("-", action) print("Items:", 0 if persisted_turn is None else len(persisted_turn.items or [])) diff --git a/sdk/python/examples/README.md b/sdk/python/examples/README.md index d7c38adc276..5edf2badbdc 100644 --- a/sdk/python/examples/README.md +++ b/sdk/python/examples/README.md @@ -62,7 +62,7 @@ python examples/01_quickstart_constructor/async.py - `03_turn_stream_events/` - stream a turn with a small curated event view - `04_models_and_metadata/` - - read server metadata and model list + - discover visible models for the connected runtime - `05_existing_thread/` - resume a real existing thread (created in-script) - `06_thread_lifecycle_and_controls/` @@ -78,7 +78,7 @@ python examples/01_quickstart_constructor/async.py - `11_cli_mini_app/` - interactive chat loop - `12_turn_params_kitchen_sink/` - - one turn using most optional `turn(...)` params (sync + async) + - structured output with a curated advanced `turn(...)` configuration - `13_model_select_and_turn_params/` - list models, pick highest model + highest supported reasoning effort, run turns, print message and usage - `14_turn_controls/` diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py index 0594cbd3496..b08928c5d69 100644 --- a/sdk/python/tests/test_real_app_server_integration.py +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -27,7 +27,7 @@ reason="set RUN_REAL_CODEX_TESTS=1 to run real Codex integration coverage", ) -# 11_cli_mini_app is interactive; we still run it by feeding '/exit'. +# 11_cli_mini_app is interactive; we still run it by feeding one prompt, then '/exit'. EXAMPLE_CASES: list[tuple[str, str]] = [ ("01_quickstart_constructor", "sync.py"), ("01_quickstart_constructor", "async.py"), @@ -180,7 +180,11 @@ def _run_example( path = EXAMPLES_DIR / folder / script assert path.exists(), f"Missing example script: {path}" - stdin = "/exit\n" if folder == "11_cli_mini_app" else None + stdin = ( + "Give 3 short bullets on SIMD.\nNow rewrite that as 1 short sentence.\n/exit\n" + if folder == "11_cli_mini_app" + else None + ) return _run_command( [str(runtime_env.python), str(path)], cwd=ROOT, @@ -427,9 +431,10 @@ def test_real_examples_run_and_assert( assert "stream.completed:" in out assert "assistant>" in out elif folder == "04_models_and_metadata": + assert "server:" in out assert "models.count:" in out - assert "server_name=None" not in out - assert "server_version=None" not in out + assert "models:" in out + assert "metadata:" not in out elif folder == "05_existing_thread": assert "Created thread:" in out elif folder == "06_thread_lifecycle_and_controls": @@ -442,8 +447,14 @@ def test_real_examples_run_and_assert( assert "Text:" in out elif folder == "11_cli_mini_app": assert "Thread:" in out + assert out.count("assistant>") >= 2 + assert out.count("assistant.status>") >= 2 + assert out.count("usage>") >= 2 elif folder == "12_turn_params_kitchen_sink": - assert "Status:" in out and "Items:" in out + assert "Status:" in out + assert "summary:" in out + assert "actions:" in out + assert "Items:" in out elif folder == "13_model_select_and_turn_params": assert "selected.model:" in out and "agent.message.params:" in out and "items.params:" in out elif folder == "14_turn_controls": From da3cad32b253922bf797beb594d691ea7167f639 Mon Sep 17 00:00:00 2001 From: Shaqayeq Date: Tue, 17 Mar 2026 15:48:53 -0700 Subject: [PATCH 14/14] python-sdk: fix advanced notebook walkthrough (2026-03-17) Co-authored-by: Codex --- sdk/python/notebooks/sdk_walkthrough.ipynb | 16 ++++++++++------ .../tests/test_real_app_server_integration.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/sdk/python/notebooks/sdk_walkthrough.ipynb b/sdk/python/notebooks/sdk_walkthrough.ipynb index 4a7ac5c1c3b..951cb24e488 100644 --- a/sdk/python/notebooks/sdk_walkthrough.ipynb +++ b/sdk/python/notebooks/sdk_walkthrough.ipynb @@ -270,7 +270,7 @@ " thread = codex.thread_start(model='gpt-5.4', config={'model_reasoning_effort': 'high'})\n", " turn = thread.turn(\n", " TextInput('Propose a safe production feature-flag rollout. Return JSON matching the schema.'),\n", - " approval_policy=AskForApproval.never,\n", + " approval_policy=AskForApproval.model_validate('never'),\n", " cwd=str(Path.cwd()),\n", " effort=ReasoningEffort.medium,\n", " model='gpt-5.4',\n", @@ -354,12 +354,14 @@ " model=selected_model.model,\n", " effort=selected_effort,\n", " ).run()\n", - " print('agent.message:', first.text)\n", - " print('usage:', first.usage)\n", + " persisted = thread.read(include_turns=True)\n", + " first_turn = find_turn_by_id(persisted.thread.turns, first.id)\n", + " print('agent.message:', assistant_text_from_turn(first_turn))\n", + " print('items:', 0 if first_turn is None else len(first_turn.items or []))\n", "\n", " second = thread.turn(\n", " TextInput('Return JSON for a safe feature-flag rollout plan.'),\n", - " approval_policy=AskForApproval.never,\n", + " approval_policy=AskForApproval.model_validate('never'),\n", " cwd=str(Path.cwd()),\n", " effort=selected_effort,\n", " model=selected_model.model,\n", @@ -368,8 +370,10 @@ " sandbox_policy=sandbox_policy,\n", " summary=ReasoningSummary.model_validate('concise'),\n", " ).run()\n", - " print('agent.message.params:', second.text)\n", - " print('usage.params:', second.usage)\n" + " persisted = thread.read(include_turns=True)\n", + " second_turn = find_turn_by_id(persisted.thread.turns, second.id)\n", + " print('agent.message.params:', assistant_text_from_turn(second_turn))\n", + " print('items.params:', 0 if second_turn is None else len(second_turn.items or []))\n" ] }, { diff --git a/sdk/python/tests/test_real_app_server_integration.py b/sdk/python/tests/test_real_app_server_integration.py index b08928c5d69..3790e37dc0e 100644 --- a/sdk/python/tests/test_real_app_server_integration.py +++ b/sdk/python/tests/test_real_app_server_integration.py @@ -347,6 +347,23 @@ def test_notebook_sync_cell_smoke(runtime_env: PreparedRuntimeEnv) -> None: assert "server:" in result.stdout +def test_notebook_advanced_cell_smoke(runtime_env: PreparedRuntimeEnv) -> None: + source = "\n\n".join( + [ + _notebook_cell_source(1), + _notebook_cell_source(2), + _notebook_cell_source(7), + ] + ) + result = _run_python(runtime_env, source, timeout_s=360) + assert result.returncode == 0, ( + f"Notebook advanced smoke failed.\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + ) + assert "selected.model:" in result.stdout + assert "agent.message.params:" in result.stdout + assert "items.params:" in result.stdout + + def test_real_streaming_smoke_turn_completed(runtime_env: PreparedRuntimeEnv) -> None: data = _run_json_python( runtime_env,