diff --git a/.claude/settings.local.json b/.claude/settings.local.json
new file mode 100644
index 00000000..e0dbc74d
--- /dev/null
+++ b/.claude/settings.local.json
@@ -0,0 +1,15 @@
+{
+ "permissions": {
+ "allow": [
+ "Bash(gh api *)",
+ "Bash(python3 -c \"import json,sys; [print\\(d['name'], d['type']\\) for d in json.load\\(sys.stdin\\)]\")",
+ "Bash(gh pr *)",
+ "Bash(git fetch *)",
+ "Bash(git checkout *)",
+ "Bash(git merge *)",
+ "Bash(git rm *)",
+ "Bash(GIT_EDITOR=true git merge --continue)",
+ "Bash(git push *)"
+ ]
+ }
+}
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index f489b133..3aa82510 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -1,6 +1,6 @@
# This workflow is triggered when a GitHub release is created.
# It can also be run manually to re-publish to PyPI in case it failed for some reason.
-# You can run this workflow by navigating to https://www.github.com/llamastack/llama-stack-client-python/actions/workflows/publish-pypi.yml
+# You can run this workflow by navigating to https://www.github.com/ogx-ai/ogx-client-python/actions/workflows/publish-pypi.yml
name: Publish PyPI
on:
workflow_dispatch:
@@ -25,4 +25,4 @@ jobs:
run: |
bash ./bin/publish-pypi
env:
- PYPI_TOKEN: ${{ secrets.LLAMA_STACK_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }}
+ PYPI_TOKEN: ${{ secrets.OGX_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }}
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
index 1e9e0aac..723b9921 100644
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -9,7 +9,7 @@ jobs:
release_doctor:
name: release doctor
runs-on: ubuntu-latest
- if: github.repository == 'llamastack/llama-stack-client-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
+ if: github.repository == 'ogx-ai/ogx-client-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
- uses: actions/checkout@v6
@@ -18,4 +18,4 @@ jobs:
run: |
bash ./bin/check-release-environment
env:
- PYPI_TOKEN: ${{ secrets.LLAMA_STACK_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }}
+ PYPI_TOKEN: ${{ secrets.OGX_CLIENT_PYPI_TOKEN || secrets.PYPI_TOKEN }}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 74d6e1d4..2f6a273d 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.7.0-alpha.3"
+ ".": "0.7.2-alpha.4"
}
diff --git a/.stats.yml b/.stats.yml
index 508e23e9..b18105e1 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 92
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-0f79b0793508266adfa12255cd643ffd9cc8744f58e9442380f682ec2b34770f.yml
-openapi_spec_hash: 0c08db6da9aaa343f7650eae332bd23b
-config_hash: d8a05907bd87286473cdf868da7d2ede
+configured_endpoints: 72
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack/llama-stack-client-7d0d5abb61225de22f231841e03c595cbb64c696480698e35dcb572ab13219f9.yml
+openapi_spec_hash: ce0a298a4e761307d455f97f9bd88553
+config_hash: e9033b935d1171e83638620c36d47ad5
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1a3669c2..83b2f48d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,13 +1,12 @@
# Changelog
-## 0.7.0-alpha.3 (2026-04-07)
+## 0.7.2-alpha.4 (2026-04-30)
-Full Changelog: [v0.7.0-alpha.2...v0.7.0-alpha.3](https://github.com/llamastack/llama-stack-client-python/compare/v0.7.0-alpha.2...v0.7.0-alpha.3)
+Full Changelog: [v0.7.0-alpha.2...v0.7.2-alpha.4](https://github.com/ogx-ai/ogx-client-python/compare/v0.7.0-alpha.2...v0.7.2-alpha.4)
-### Bug Fixes
+### Chores
-* export path_template from _utils ([#330](https://github.com/llamastack/llama-stack-client-python/issues/330)) ([02b3604](https://github.com/llamastack/llama-stack-client-python/commit/02b36047634beb2fc5810d6697762ad2db247250))
-* remove leftover post_training and toolgroups CLI commands ([#333](https://github.com/llamastack/llama-stack-client-python/issues/333)) ([aab810c](https://github.com/llamastack/llama-stack-client-python/commit/aab810ca664b2968770780cec909c5501c508998))
+* sync repo ([149adb9](https://github.com/ogx-ai/ogx-client-python/commit/149adb9e67ef76ed398eeef24067f60930f19745))
## 0.7.0-alpha.2 (2026-04-01)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 36604c01..cb4cf2ff 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock
Most of the SDK is generated code. Modifications to code will be persisted between generations, but may
result in merge conflicts between manual patches and changes from the generator. The generator will never
-modify the contents of the `src/llama_stack_client/lib/` and `examples/` directories.
+modify the contents of the `src/ogx_client/lib/` and `examples/` directories.
## Adding and running examples
@@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g
To install via git:
```sh
-$ pip install git+ssh://git@github.com/llamastack/llama-stack-client-python.git
+$ pip install git+ssh://git@github.com/ogx-ai/ogx-client-python.git
```
Alternatively, you can build from source and install the wheel file:
@@ -119,7 +119,7 @@ the changes aren't made through the automated pipeline, you may want to make rel
### Publish with a GitHub workflow
-You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/llamastack/llama-stack-client-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up.
+You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/ogx-ai/ogx-client-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up.
### Publish manually
diff --git a/LICENSE b/LICENSE
index c781a0e2..b3ccfb7e 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,22 +1,7 @@
-MIT License
+Copyright 2026 ogx-client
-Copyright (c) Meta Platforms, Inc. and affiliates
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/README.md b/README.md
index a2757391..08eabf15 100644
--- a/README.md
+++ b/README.md
@@ -1,97 +1,57 @@
-# Llama Stack Client Python API library
+# Ogx Client Python API library
+
+[)](https://pypi.org/project/ogx_client/)
-[](https://pypi.org/project/llama_stack_client/) [](https://pypi.org/project/llama-stack-client/)
-[](https://discord.gg/llama-stack)
-
-The Llama Stack Client Python library provides convenient access to the Llama Stack Client REST API from any Python 3.12+
+The Ogx Client Python library provides convenient access to the Ogx Client REST API from any Python 3.9+
application. The library includes type definitions for all request params and response fields,
and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx).
-It is generated with [Stainless](https://www.stainlessapi.com/).
+It is generated with [Stainless](https://www.stainless.com/).
## Documentation
-For starting up a Llama Stack server, please checkout our [Quickstart guide to start a Llama Stack server](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html)
-
-The REST API documentation can be found on [llama-stack API Reference](https://llama-stack.readthedocs.io/en/latest/references/api_reference/index.html). The full API of this library can be found in [api.md](api.md).
-
-You can find more example apps with client SDKs to talk with the Llama Stack server in our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repo.
+The REST API documentation can be found on [ogx.readthedocs.io](https://ogx.readthedocs.io/en/latest/). The full API of this library can be found in [api.md](api.md).
## Installation
```sh
# install from PyPI
-pip install '--pre llama_stack_client'
+pip install '--pre ogx_client'
```
## Usage
-The full API of this library can be found in [api.md](api.md). You may find basic client examples in our [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repo.
+The full API of this library can be found in [api.md](api.md).
```python
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
-client = LlamaStackClient()
+client = OgxClient()
-models = client.models.list()
+list_models_response = client.models.list()
+print(list_models_response.data)
```
-While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) to add `LLAMA_STACK_CLIENT_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control.
-
-After installing the `llama-stack-client` package, you can also use the [`llama-stack-client` CLI](https://github.com/meta-llama/llama-stack/tree/main/llama-stack-client) to interact with the Llama Stack server.
-```bash
-llama-stack-client inference chat-completion --message "hello, what model are you"
-```
-
-```python
-OpenAIChatCompletion(
- id="AmivnS0iMv-mmEE4_A0DK1T",
- choices=[
- OpenAIChatCompletionChoice(
- finish_reason="stop",
- index=0,
- message=OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam(
- role="assistant",
- content="Hello! I am an AI designed by Meta AI, and my model is a type of recurrent neural network (RNN) called a transformer. My specific architecture is based on the BERT (Bidirectional Encoder Representations from Transformers) model, which is a pre-trained language model that has been fine-tuned for a variety of natural language processing tasks.\n\nHere are some key details about my model:\n\n* **Model type:** Transformer-based language model\n* **Architecture:** BERT (Bidirectional Encoder Representations from Transformers)\n* **Training data:** A massive corpus of text data, including but not limited to:\n\t+ Web pages\n\t+ Books\n\t+ Articles\n\t+ Forums\n\t+ Social media platforms\n* **Parameters:** My model has approximately 1.5 billion parameters, which allows me to understand and generate human-like language.\n* **Capabilities:** I can perform a wide range of tasks, including but not limited to:\n\t+ Answering questions\n\t+ Generating text\n\t+ Translating languages\n\t+ Summarizing content\n\t+ Offering suggestions and ideas\n\nI'm constantly learning and improving, so please bear with me if I make any mistakes or don't quite understand what you're asking. How can I assist you today?",
- name=None,
- tool_calls=None,
- function_call=None,
- ),
- logprobs=OpenAIChatCompletionChoiceLogprobs(content=None, refusal=None),
- )
- ],
- created=1749825661,
- model="Llama-3.3-70B-Instruct",
- object="chat.completion",
- system_fingerprint=None,
- usage={
- "completion_tokens": 258,
- "prompt_tokens": 16,
- "total_tokens": 274,
- "completion_tokens_details": None,
- "prompt_tokens_details": None,
- },
- service_tier=None,
-)
-```
+While you can provide an `api_key` keyword argument,
+we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/)
+to add `OGX_CLIENT_API_KEY="My API Key"` to your `.env` file
+so that your API Key is not stored in source control.
## Async usage
-Simply import `AsyncLlamaStackClient` instead of `LlamaStackClient` and use `await` with each API call:
+Simply import `AsyncOgxClient` instead of `OgxClient` and use `await` with each API call:
```python
import asyncio
-from llama_stack_client import AsyncLlamaStackClient
+from ogx_client import AsyncOgxClient
-client = AsyncLlamaStackClient(
- # defaults to "production".
- environment="sandbox",
-)
+client = AsyncOgxClient()
async def main() -> None:
- models = await client.models.list()
+ list_models_response = await client.models.list()
+ print(list_models_response.data)
asyncio.run(main())
@@ -107,22 +67,23 @@ You can enable this by installing `aiohttp`:
```sh
# install from PyPI
-pip install '--pre llama_stack_client[aiohttp]'
+pip install '--pre ogx_client[aiohttp]'
```
Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
```python
import asyncio
-from llama_stack_client import DefaultAioHttpClient
-from llama_stack_client import AsyncLlamaStackClient
+from ogx_client import DefaultAioHttpClient
+from ogx_client import AsyncOgxClient
async def main() -> None:
- async with AsyncLlamaStackClient(
+ async with AsyncOgxClient(
http_client=DefaultAioHttpClient(),
) as client:
- models = await client.models.list()
+ list_models_response = await client.models.list()
+ print(list_models_response.data)
asyncio.run(main())
@@ -133,9 +94,9 @@ asyncio.run(main())
We provide support for streaming responses using Server Side Events (SSE).
```python
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
-client = LlamaStackClient()
+client = OgxClient()
stream = client.chat.completions.create(
messages=[
@@ -154,9 +115,9 @@ for completion in stream:
The async client uses the exact same interface.
```python
-from llama_stack_client import AsyncLlamaStackClient
+from ogx_client import AsyncOgxClient
-client = AsyncLlamaStackClient()
+client = AsyncOgxClient()
stream = await client.chat.completions.create(
messages=[
@@ -183,14 +144,14 @@ Typed requests and responses provide autocomplete and documentation within your
## Pagination
-List methods in the Llama Stack Client API are paginated.
+List methods in the Ogx Client API are paginated.
This library provides auto-paginating iterators with each list response, so you do not have to request successive pages manually:
```python
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
-client = LlamaStackClient()
+client = OgxClient()
all_responses = []
# Automatically fetches more pages as needed.
@@ -204,9 +165,9 @@ Or, asynchronously:
```python
import asyncio
-from llama_stack_client import AsyncLlamaStackClient
+from ogx_client import AsyncOgxClient
-client = AsyncLlamaStackClient()
+client = AsyncOgxClient()
async def main() -> None:
@@ -249,9 +210,9 @@ for response in first_page.data:
Nested parameters are dictionaries, typed using `TypedDict`, for example:
```python
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
-client = LlamaStackClient()
+client = OgxClient()
response_object = client.responses.create(
input="string",
@@ -267,9 +228,9 @@ Request parameters that correspond to file uploads can be passed as `bytes`, or
```python
from pathlib import Path
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
-client = LlamaStackClient()
+client = OgxClient()
client.files.create(
file=Path("/path/to/file"),
@@ -281,18 +242,18 @@ The async client uses the exact same interface. If you pass a [`PathLike`](https
## Handling errors
-When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `llama_stack_client.APIConnectionError` is raised.
+When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `ogx_client.APIConnectionError` is raised.
When the API returns a non-success status code (that is, 4xx or 5xx
-response), a subclass of `llama_stack_client.APIStatusError` is raised, containing `status_code` and `response` properties.
+response), a subclass of `ogx_client.APIStatusError` is raised, containing `status_code` and `response` properties.
-All errors inherit from `llama_stack_client.APIError`.
+All errors inherit from `ogx_client.APIError`.
```python
-import llama_stack_client
-from llama_stack_client import LlamaStackClient
+import ogx_client
+from ogx_client import OgxClient
-client = LlamaStackClient()
+client = OgxClient()
try:
client.chat.completions.create(
@@ -304,18 +265,18 @@ try:
],
model="model",
)
-except llama_stack_client.APIConnectionError as e:
+except ogx_client.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
-except llama_stack_client.RateLimitError as e:
+except ogx_client.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
-except llama_stack_client.APIStatusError as e:
+except ogx_client.APIStatusError as e:
print("Another non-200-range status code was received")
print(e.status_code)
print(e.response)
```
-Error codes are as followed:
+Error codes are as follows:
| Status Code | Error Type |
| ----------- | -------------------------- |
@@ -337,10 +298,10 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ
You can use the `max_retries` option to configure or disable retry settings:
```python
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
# Configure the default for all requests:
-client = LlamaStackClient(
+client = OgxClient(
# default is 2
max_retries=0,
)
@@ -360,19 +321,19 @@ client.with_options(max_retries=5).chat.completions.create(
### Timeouts
By default requests time out after 1 minute. You can configure this with a `timeout` option,
-which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:
+which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object:
```python
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
# Configure the default for all requests:
-client = LlamaStackClient(
+client = OgxClient(
# 20 seconds (default is 1 minute)
timeout=20.0,
)
# More granular control:
-client = LlamaStackClient(
+client = OgxClient(
timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
)
@@ -398,12 +359,14 @@ Note that requests that time out are [retried twice by default](#retries).
We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module.
-You can enable logging by setting the environment variable `LLAMA_STACK_LOG` to `debug`.
+You can enable logging by setting the environment variable `OGX_CLIENT_LOG` to `info`.
```shell
-$ export LLAMA_STACK_LOG=debug
+$ export OGX_CLIENT_LOG=info
```
+Or to `debug` for more verbose logging.
+
### How to tell whether `None` means `null` or missing
In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`:
@@ -421,9 +384,9 @@ if response.my_field is None:
The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g.,
```py
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
-client = LlamaStackClient()
+client = OgxClient()
response = client.chat.completions.with_raw_response.create(
messages=[{
"content": "string",
@@ -437,9 +400,9 @@ completion = response.parse() # get the object that `chat.completions.create()`
print(completion.id)
```
-These methods return an [`APIResponse`](https://github.com/meta-llama/llama-stack-python/tree/main/src/llama_stack_client/_response.py) object.
+These methods return an [`APIResponse`](https://github.com/ogx-ai/ogx-client-python/tree/main/src/ogx_client/_response.py) object.
-The async client returns an [`AsyncAPIResponse`](https://github.com/meta-llama/llama-stack-python/tree/main/src/llama_stack_client/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
+The async client returns an [`AsyncAPIResponse`](https://github.com/ogx-ai/ogx-client-python/tree/main/src/ogx_client/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
#### `.with_streaming_response`
@@ -474,8 +437,7 @@ If you need to access undocumented endpoints, params, or response properties, th
#### Undocumented endpoints
To make requests to undocumented endpoints, you can make requests using `client.get`, `client.post`, and other
-http verbs. Options on the client will be respected (such as retries) will be respected when making this
-request.
+http verbs. Options on the client will be respected (such as retries) when making this request.
```py
import httpx
@@ -504,18 +466,19 @@ can also get all the extra fields on the Pydantic model as a dict with
You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including:
-- Support for proxies
-- Custom transports
+- Support for [proxies](https://www.python-httpx.org/advanced/proxies/)
+- Custom [transports](https://www.python-httpx.org/advanced/transports/)
- Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality
```python
-from llama_stack_client import LlamaStackClient, DefaultHttpxClient
+import httpx
+from ogx_client import OgxClient, DefaultHttpxClient
-client = LlamaStackClient(
- # Or use the `LLAMA_STACK_CLIENT_BASE_URL` env var
+client = OgxClient(
+ # Or use the `OGX_CLIENT_BASE_URL` env var
base_url="http://my.test.server.example.com:8083",
http_client=DefaultHttpxClient(
- proxies="http://my.test.proxy.example.com",
+ proxy="http://my.test.proxy.example.com",
transport=httpx.HTTPTransport(local_address="0.0.0.0"),
),
)
@@ -531,17 +494,27 @@ client.with_options(http_client=DefaultHttpxClient(...))
By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting.
+```py
+from ogx_client import OgxClient
+
+with OgxClient() as client:
+ # make requests here
+ ...
+
+# HTTP client is now closed
+```
+
## Versioning
This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions:
1. Changes that only affect static types, without breaking runtime behavior.
-2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_.
+2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals.)_
3. Changes that we do not expect to impact the vast majority of users in practice.
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
-We are keen for your feedback; please open an [issue](https://www.github.com/meta-llama/llama-stack-python/issues) with questions, bugs, or suggestions.
+We are keen for your feedback; please open an [issue](https://www.github.com/ogx-ai/ogx-client-python/issues) with questions, bugs, or suggestions.
### Determining the installed version
@@ -550,14 +523,14 @@ If you've upgraded to the latest version but aren't seeing any new features you
You can determine the version that is being used at runtime with:
```py
-import llama_stack_client
-print(llama_stack_client.__version__)
+import ogx_client
+print(ogx_client.__version__)
```
## Requirements
-Python 3.12 or higher.
+Python 3.9 or higher.
-## License
+## Contributing
-This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
+See [the contributing documentation](./CONTRIBUTING.md).
diff --git a/SECURITY.md b/SECURITY.md
index 1b5f3a4d..33b7de0e 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -16,11 +16,11 @@ before making any information public.
## Reporting Non-SDK Related Security Issues
If you encounter security issues that are not directly related to SDKs but pertain to the services
-or products provided by Llama Stack Client, please follow the respective company's security reporting guidelines.
+or products provided by Ogx Client, please follow the respective company's security reporting guidelines.
-### Llama Stack Client Terms and Policies
+### Ogx Client Terms and Policies
-Please contact llamastack@meta.com for any questions or concerns regarding the security of our services.
+Please contact contributors@ogx.dev for any questions or concerns regarding the security of our services.
---
diff --git a/api.md b/api.md
index 5e0426aa..277b36f6 100644
--- a/api.md
+++ b/api.md
@@ -1,7 +1,7 @@
# Shared Types
```python
-from llama_stack_client.types import (
+from ogx_client.types import (
HealthInfo,
InterleavedContent,
InterleavedContentItem,
@@ -12,7 +12,6 @@ from llama_stack_client.types import (
RouteInfo,
SafetyViolation,
SamplingParams,
- ScoringResult,
SystemMessage,
VersionInfo,
)
@@ -23,9 +22,13 @@ from llama_stack_client.types import (
Types:
```python
-from llama_stack_client.types import (
+from ogx_client.types import (
+ CompactedResponse,
+ ResponseInput,
+ ResponseMessage,
ResponseObject,
ResponseObjectStream,
+ ResponseOutput,
ResponseListResponse,
ResponseDeleteResponse,
)
@@ -33,67 +36,68 @@ from llama_stack_client.types import (
Methods:
-- client.responses.create(\*\*params) -> ResponseObject
-- client.responses.retrieve(response_id) -> ResponseObject
-- client.responses.list(\*\*params) -> SyncOpenAICursorPage[ResponseListResponse]
-- client.responses.delete(response_id) -> ResponseDeleteResponse
+- client.responses.create(\*\*params) -> ResponseObject
+- client.responses.retrieve(response_id) -> ResponseObject
+- client.responses.list(\*\*params) -> SyncOpenAICursorPage[ResponseListResponse]
+- client.responses.delete(response_id) -> ResponseDeleteResponse
+- client.responses.compact(\*\*params) -> CompactedResponse
## InputItems
Types:
```python
-from llama_stack_client.types.responses import InputItemListResponse
+from ogx_client.types.responses import InputItemListResponse
```
Methods:
-- client.responses.input_items.list(response_id, \*\*params) -> InputItemListResponse
+- client.responses.input_items.list(response_id, \*\*params) -> InputItemListResponse
# Prompts
Types:
```python
-from llama_stack_client.types import ListPromptsResponse, Prompt, PromptListResponse
+from ogx_client.types import ListPromptsResponse, Prompt, PromptListResponse
```
Methods:
-- client.prompts.create(\*\*params) -> Prompt
-- client.prompts.retrieve(prompt_id, \*\*params) -> Prompt
-- client.prompts.update(prompt_id, \*\*params) -> Prompt
-- client.prompts.list() -> PromptListResponse
-- client.prompts.delete(prompt_id) -> None
-- client.prompts.set_default_version(prompt_id, \*\*params) -> Prompt
+- client.prompts.create(\*\*params) -> Prompt
+- client.prompts.retrieve(prompt_id, \*\*params) -> Prompt
+- client.prompts.update(prompt_id, \*\*params) -> Prompt
+- client.prompts.list() -> PromptListResponse
+- client.prompts.delete(prompt_id) -> None
+- client.prompts.set_default_version(prompt_id, \*\*params) -> Prompt
## Versions
Methods:
-- client.prompts.versions.list(prompt_id) -> PromptListResponse
+- client.prompts.versions.list(prompt_id) -> PromptListResponse
# Conversations
Types:
```python
-from llama_stack_client.types import ConversationObject, ConversationDeleteResponse
+from ogx_client.types import ConversationObject, ConversationDeleteResponse
```
Methods:
-- client.conversations.create(\*\*params) -> ConversationObject
-- client.conversations.retrieve(conversation_id) -> ConversationObject
-- client.conversations.update(conversation_id, \*\*params) -> ConversationObject
-- client.conversations.delete(conversation_id) -> ConversationDeleteResponse
+- client.conversations.create(\*\*params) -> ConversationObject
+- client.conversations.retrieve(conversation_id) -> ConversationObject
+- client.conversations.update(conversation_id, \*\*params) -> ConversationObject
+- client.conversations.delete(conversation_id) -> ConversationDeleteResponse
## Items
Types:
```python
-from llama_stack_client.types.conversations import (
+from ogx_client.types.conversations import (
ItemCreateResponse,
ItemListResponse,
ItemDeleteResponse,
@@ -103,36 +107,36 @@ from llama_stack_client.types.conversations import (
Methods:
-- client.conversations.items.create(conversation_id, \*\*params) -> ItemCreateResponse
-- client.conversations.items.list(conversation_id, \*\*params) -> SyncOpenAICursorPage[ItemListResponse]
-- client.conversations.items.delete(item_id, \*, conversation_id) -> ItemDeleteResponse
-- client.conversations.items.get(item_id, \*, conversation_id) -> ItemGetResponse
+- client.conversations.items.create(conversation_id, \*\*params) -> ItemCreateResponse
+- client.conversations.items.list(conversation_id, \*\*params) -> SyncOpenAICursorPage[ItemListResponse]
+- client.conversations.items.delete(item_id, \*, conversation_id) -> ItemDeleteResponse
+- client.conversations.items.get(item_id, \*, conversation_id) -> ItemGetResponse
# Inspect
Methods:
-- client.inspect.health() -> HealthInfo
-- client.inspect.version() -> VersionInfo
+- client.inspect.health() -> HealthInfo
+- client.inspect.version() -> VersionInfo
# Embeddings
Types:
```python
-from llama_stack_client.types import CreateEmbeddingsResponse
+from ogx_client.types import CreateEmbeddingsResponse
```
Methods:
-- client.embeddings.create(\*\*params) -> CreateEmbeddingsResponse
+- client.embeddings.create(\*\*params) -> CreateEmbeddingsResponse
# Chat
Types:
```python
-from llama_stack_client.types import ChatCompletionChunk
+from ogx_client.types import ChatCompletionChunk
```
## Completions
@@ -140,7 +144,7 @@ from llama_stack_client.types import ChatCompletionChunk
Types:
```python
-from llama_stack_client.types.chat import (
+from ogx_client.types.chat import (
CompletionCreateResponse,
CompletionRetrieveResponse,
CompletionListResponse,
@@ -149,41 +153,41 @@ from llama_stack_client.types.chat import (
Methods:
-- client.chat.completions.create(\*\*params) -> CompletionCreateResponse
-- client.chat.completions.retrieve(completion_id) -> CompletionRetrieveResponse
-- client.chat.completions.list(\*\*params) -> CompletionListResponse
+- client.chat.completions.create(\*\*params) -> CompletionCreateResponse
+- client.chat.completions.retrieve(completion_id) -> CompletionRetrieveResponse
+- client.chat.completions.list(\*\*params) -> CompletionListResponse
# Completions
Types:
```python
-from llama_stack_client.types import CompletionCreateResponse
+from ogx_client.types import CompletionCreateResponse
```
Methods:
-- client.completions.create(\*\*params) -> CompletionCreateResponse
+- client.completions.create(\*\*params) -> CompletionCreateResponse
# VectorIo
Types:
```python
-from llama_stack_client.types import QueryChunksResponse
+from ogx_client.types import QueryChunksResponse
```
Methods:
-- client.vector_io.insert(\*\*params) -> None
-- client.vector_io.query(\*\*params) -> QueryChunksResponse
+- client.vector_io.insert(\*\*params) -> None
+- client.vector_io.query(\*\*params) -> QueryChunksResponse
# VectorStores
Types:
```python
-from llama_stack_client.types import (
+from ogx_client.types import (
ListVectorStoresResponse,
VectorStore,
VectorStoreDeleteResponse,
@@ -193,40 +197,36 @@ from llama_stack_client.types import (
Methods:
-- client.vector_stores.create(\*\*params) -> VectorStore
-- client.vector_stores.retrieve(vector_store_id) -> VectorStore
-- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStore
-- client.vector_stores.list(\*\*params) -> SyncOpenAICursorPage[VectorStore]
-- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleteResponse
-- client.vector_stores.search(vector_store_id, \*\*params) -> VectorStoreSearchResponse
+- client.vector_stores.create(\*\*params) -> VectorStore
+- client.vector_stores.retrieve(vector_store_id) -> VectorStore
+- client.vector_stores.update(vector_store_id, \*\*params) -> VectorStore
+- client.vector_stores.list(\*\*params) -> SyncOpenAICursorPage[VectorStore]
+- client.vector_stores.delete(vector_store_id) -> VectorStoreDeleteResponse
+- client.vector_stores.search(vector_store_id, \*\*params) -> VectorStoreSearchResponse
## Files
Types:
```python
-from llama_stack_client.types.vector_stores import (
- VectorStoreFile,
- FileDeleteResponse,
- FileContentResponse,
-)
+from ogx_client.types.vector_stores import VectorStoreFile, FileDeleteResponse, FileContentResponse
```
Methods:
-- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFile
-- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFile
-- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFile
-- client.vector_stores.files.list(vector_store_id, \*\*params) -> SyncOpenAICursorPage[VectorStoreFile]
-- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> FileDeleteResponse
-- client.vector_stores.files.content(file_id, \*, vector_store_id, \*\*params) -> FileContentResponse
+- client.vector_stores.files.create(vector_store_id, \*\*params) -> VectorStoreFile
+- client.vector_stores.files.retrieve(file_id, \*, vector_store_id) -> VectorStoreFile
+- client.vector_stores.files.update(file_id, \*, vector_store_id, \*\*params) -> VectorStoreFile
+- client.vector_stores.files.list(vector_store_id, \*\*params) -> SyncOpenAICursorPage[VectorStoreFile]
+- client.vector_stores.files.delete(file_id, \*, vector_store_id) -> FileDeleteResponse
+- client.vector_stores.files.content(file_id, \*, vector_store_id, \*\*params) -> FileContentResponse
## FileBatches
Types:
```python
-from llama_stack_client.types.vector_stores import (
+from ogx_client.types.vector_stores import (
ListVectorStoreFilesInBatchResponse,
VectorStoreFileBatches,
)
@@ -234,159 +234,116 @@ from llama_stack_client.types.vector_stores import (
Methods:
-- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatches
-- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatches
-- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatches
-- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> SyncOpenAICursorPage[VectorStoreFile]
+- client.vector_stores.file_batches.create(vector_store_id, \*\*params) -> VectorStoreFileBatches
+- client.vector_stores.file_batches.retrieve(batch_id, \*, vector_store_id) -> VectorStoreFileBatches
+- client.vector_stores.file_batches.cancel(batch_id, \*, vector_store_id) -> VectorStoreFileBatches
+- client.vector_stores.file_batches.list_files(batch_id, \*, vector_store_id, \*\*params) -> SyncOpenAICursorPage[VectorStoreFile]
# Models
Types:
```python
-from llama_stack_client.types import (
- ListModelsResponse,
- Model,
- ModelRetrieveResponse,
- ModelListResponse,
-)
+from ogx_client.types import ListModelsResponse, Model, ModelRetrieveResponse
```
Methods:
-- client.models.retrieve(model_id) -> ModelRetrieveResponse
-- client.models.list() -> ModelListResponse
+- client.models.retrieve(model_id) -> ModelRetrieveResponse
+- client.models.list() -> ListModelsResponse
## OpenAI
Methods:
-- client.models.openai.list() -> ModelListResponse
+- client.models.openai.list() -> ListModelsResponse
# Providers
Types:
```python
-from llama_stack_client.types import ProviderListResponse
+from ogx_client.types import ProviderListResponse
```
Methods:
-- client.providers.retrieve(provider_id) -> ProviderInfo
-- client.providers.list() -> ProviderListResponse
+- client.providers.retrieve(provider_id) -> ProviderInfo
+- client.providers.list() -> ProviderListResponse
# Routes
Types:
```python
-from llama_stack_client.types import RouteListResponse
+from ogx_client.types import RouteListResponse
```
Methods:
-- client.routes.list(\*\*params) -> RouteListResponse
+- client.routes.list(\*\*params) -> RouteListResponse
# Moderations
Types:
```python
-from llama_stack_client.types import CreateResponse
+from ogx_client.types import CreateResponse
```
Methods:
-- client.moderations.create(\*\*params) -> CreateResponse
+- client.moderations.create(\*\*params) -> CreateResponse
# Safety
Types:
```python
-from llama_stack_client.types import RunShieldResponse
+from ogx_client.types import RunShieldResponse
```
Methods:
-- client.safety.run_shield(\*\*params) -> RunShieldResponse
+- client.safety.run_shield(\*\*params) -> RunShieldResponse
# Shields
Types:
```python
-from llama_stack_client.types import ListShieldsResponse, Shield, ShieldListResponse
-```
-
-Methods:
-
-- client.shields.retrieve(identifier) -> Shield
-- client.shields.list() -> ShieldListResponse
-- client.shields.delete(identifier) -> None
-- client.shields.register(\*\*params) -> Shield
-
-# Scoring
-
-Types:
-
-```python
-from llama_stack_client.types import ScoringScoreResponse, ScoringScoreBatchResponse
-```
-
-Methods:
-
-- client.scoring.score(\*\*params) -> ScoringScoreResponse
-- client.scoring.score_batch(\*\*params) -> ScoringScoreBatchResponse
-
-# ScoringFunctions
-
-Types:
-
-```python
-from llama_stack_client.types import (
- ListScoringFunctionsResponse,
- ScoringFn,
- ScoringFnParams,
- ScoringFunctionListResponse,
-)
+from ogx_client.types import ListShieldsResponse, Shield, ShieldListResponse
```
Methods:
-- client.scoring_functions.retrieve(scoring_fn_id) -> ScoringFn
-- client.scoring_functions.list() -> ScoringFunctionListResponse
-- client.scoring_functions.register(\*\*params) -> None
-- client.scoring_functions.unregister(scoring_fn_id) -> None
+- client.shields.retrieve(identifier) -> Shield
+- client.shields.list() -> ShieldListResponse
+- client.shields.delete(identifier) -> None
+- client.shields.register(\*\*params) -> Shield
# Files
Types:
```python
-from llama_stack_client.types import (
- DeleteFileResponse,
- File,
- ListFilesResponse,
- FileContentResponse,
-)
+from ogx_client.types import DeleteFileResponse, File, ListFilesResponse, FileContentResponse
```
Methods:
-- client.files.create(\*\*params) -> File
-- client.files.retrieve(file_id) -> File
-- client.files.list(\*\*params) -> SyncOpenAICursorPage[File]
-- client.files.delete(file_id) -> DeleteFileResponse
-- client.files.content(file_id) -> str
+- client.files.create(\*\*params) -> File
+- client.files.retrieve(file_id) -> File
+- client.files.list(\*\*params) -> SyncOpenAICursorPage[File]
+- client.files.delete(file_id) -> DeleteFileResponse
+- client.files.content(file_id) -> str
# Batches
Types:
```python
-from llama_stack_client.types import (
+from ogx_client.types import (
BatchCreateResponse,
BatchRetrieveResponse,
BatchListResponse,
@@ -396,94 +353,31 @@ from llama_stack_client.types import (
Methods:
-- client.batches.create(\*\*params) -> BatchCreateResponse
-- client.batches.retrieve(batch_id) -> BatchRetrieveResponse
-- client.batches.list(\*\*params) -> SyncOpenAICursorPage[BatchListResponse]
-- client.batches.cancel(batch_id) -> BatchCancelResponse
+- client.batches.create(\*\*params) -> BatchCreateResponse
+- client.batches.retrieve(batch_id) -> BatchRetrieveResponse
+- client.batches.list(\*\*params) -> SyncOpenAICursorPage[BatchListResponse]
+- client.batches.cancel(batch_id) -> BatchCancelResponse
# Alpha
-## Benchmarks
-
-Types:
-
-```python
-from llama_stack_client.types.alpha import Benchmark, ListBenchmarksResponse, BenchmarkListResponse
-```
-
-Methods:
-
-- client.alpha.benchmarks.retrieve(benchmark_id) -> Benchmark
-- client.alpha.benchmarks.list() -> BenchmarkListResponse
-- client.alpha.benchmarks.register(\*\*params) -> None
-- client.alpha.benchmarks.unregister(benchmark_id) -> None
-
-## Eval
-
-Types:
-
-```python
-from llama_stack_client.types.alpha import BenchmarkConfig, EvaluateResponse, Job
-```
-
-Methods:
-
-- client.alpha.eval.evaluate_rows(benchmark_id, \*\*params) -> EvaluateResponse
-- client.alpha.eval.evaluate_rows_alpha(benchmark_id, \*\*params) -> EvaluateResponse
-- client.alpha.eval.run_eval(benchmark_id, \*\*params) -> Job
-- client.alpha.eval.run_eval_alpha(benchmark_id, \*\*params) -> Job
-
-### Jobs
-
-Methods:
-
-- client.alpha.eval.jobs.retrieve(job_id, \*, benchmark_id) -> EvaluateResponse
-- client.alpha.eval.jobs.cancel(job_id, \*, benchmark_id) -> None
-- client.alpha.eval.jobs.status(job_id, \*, benchmark_id) -> Job
-
## Admin
Methods:
-- client.alpha.admin.health() -> HealthInfo
-- client.alpha.admin.inspect_provider(provider_id) -> ProviderInfo
-- client.alpha.admin.list_providers() -> ProviderListResponse
-- client.alpha.admin.list_routes(\*\*params) -> RouteListResponse
-- client.alpha.admin.version() -> VersionInfo
+- client.alpha.admin.health() -> HealthInfo
+- client.alpha.admin.inspect_provider(provider_id) -> ProviderInfo
+- client.alpha.admin.list_providers() -> ProviderListResponse
+- client.alpha.admin.list_routes(\*\*params) -> RouteListResponse
+- client.alpha.admin.version() -> VersionInfo
## Inference
Types:
```python
-from llama_stack_client.types.alpha import InferenceRerankResponse
-```
-
-Methods:
-
-- client.alpha.inference.rerank(\*\*params) -> InferenceRerankResponse
-
-# Beta
-
-## Datasets
-
-Types:
-
-```python
-from llama_stack_client.types.beta import (
- ListDatasetsResponse,
- DatasetRetrieveResponse,
- DatasetListResponse,
- DatasetIterrowsResponse,
- DatasetRegisterResponse,
-)
+from ogx_client.types.alpha import InferenceRerankResponse
```
Methods:
-- client.beta.datasets.retrieve(dataset_id) -> DatasetRetrieveResponse
-- client.beta.datasets.list() -> DatasetListResponse
-- client.beta.datasets.appendrows(dataset_id, \*\*params) -> None
-- client.beta.datasets.iterrows(dataset_id, \*\*params) -> DatasetIterrowsResponse
-- client.beta.datasets.register(\*\*params) -> DatasetRegisterResponse
-- client.beta.datasets.unregister(dataset_id) -> None
+- client.alpha.inference.rerank(\*\*params) -> InferenceRerankResponse
diff --git a/pyproject.toml b/pyproject.toml
index 7c8dbfa8..18744327 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,34 +1,31 @@
[project]
-name = "llama_stack_client"
-version = "0.7.0-alpha.3"
-description = "The official Python library for the llama-stack-client API"
+name = "ogx_client"
+version = "0.7.2-alpha.4"
+description = "The official Python library for the ogx-client API"
dynamic = ["readme"]
license = "MIT"
-authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
+authors = [
+{ name = "Ogx Client", email = "contributors@ogx.dev" },
+]
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
- "typing-extensions>=4.7, <5", "typing-extensions>=4.14, <5",
+ "typing-extensions>=4.14, <5",
"anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
- "tqdm",
- "rich",
- "click",
- "pyaml",
- "prompt_toolkit",
- "pandas",
- "termcolor",
- "fire",
- "requests",
]
-requires-python = ">= 3.12"
+requires-python = ">= 3.9"
classifiers = [
"Typing :: Typed",
"Intended Audience :: Developers",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Operating System :: OS Independent",
"Operating System :: POSIX",
@@ -40,8 +37,8 @@ classifiers = [
]
[project.urls]
-Homepage = "https://github.com/llamastack/llama-stack-client-python"
-Repository = "https://github.com/llamastack/llama-stack-client-python"
+Homepage = "https://github.com/ogx-ai/ogx-client-python"
+Repository = "https://github.com/ogx-ai/ogx-client-python"
[project.optional-dependencies]
aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.9"]
@@ -62,7 +59,7 @@ dev = [
"pyright==1.1.399",
"mypy==1.17",
"respx",
- "pytest>=7.1.1",
+ "pytest",
"pytest-asyncio",
"ruff",
"time-machine",
@@ -70,8 +67,6 @@ dev = [
"importlib-metadata>=6.7.0",
"rich>=13.7.1",
"pytest-xdist>=3.6.1",
- "pre-commit",
- "black",
]
pydantic-v1 = [
"pydantic>=1.9.0,<2",
@@ -81,7 +76,6 @@ pydantic-v2 = [
"pydantic~=2.12 ; python_full_version >= '3.14'",
]
-
[build-system]
requires = ["hatchling==1.26.3", "hatch-fancy-pypi-readme"]
build-backend = "hatchling.build"
@@ -92,7 +86,7 @@ include = [
]
[tool.hatch.build.targets.wheel]
-packages = ["src/llama_stack_client"]
+packages = ["src/ogx_client"]
[tool.hatch.build.targets.sdist]
# Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc)
@@ -118,7 +112,7 @@ path = "README.md"
[[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]]
# replace relative links with absolute links
pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)'
-replacement = '[\1](https://github.com/llamastack/llama-stack-client-python/tree/main/\g<2>)'
+replacement = '[\1](https://github.com/ogx-ai/ogx-client-python/tree/main/\g<2>)'
[tool.pytest.ini_options]
testpaths = ["tests"]
@@ -130,6 +124,26 @@ filterwarnings = [
"error"
]
+[tool.pyright]
+# this enables practically every flag given by pyright.
+# there are a couple of flags that are still disabled by
+# default in strict mode as they are experimental and niche.
+typeCheckingMode = "strict"
+pythonVersion = "3.9"
+
+exclude = [
+ "_dev",
+ ".venv",
+ ".nox",
+ ".git",
+]
+
+reportImplicitOverride = true
+reportOverlappingOverload = false
+
+reportImportCycles = false
+reportPrivateUsage = false
+
[tool.mypy]
pretty = true
show_error_codes = true
@@ -140,7 +154,7 @@ show_error_codes = true
#
# We also exclude our `tests` as mypy doesn't always infer
# types correctly and Pyright will still catch any type errors.
-exclude = ['src/llama_stack_client/_files.py', '_dev/.*.py', 'tests/.*']
+exclude = ['src/ogx_client/_files.py', '_dev/.*.py', 'tests/.*']
strict_equality = true
implicit_reexport = true
@@ -176,7 +190,6 @@ cache_fine_grained = true
disable_error_code = "func-returns-value,overload-cannot-match"
# https://github.com/python/mypy/issues/12162
-
[[tool.mypy.overrides]]
module = "black.files.*"
ignore_errors = true
@@ -186,6 +199,7 @@ ignore_missing_imports = true
[tool.ruff]
line-length = 120
output-format = "grouped"
+target-version = "py38"
[tool.ruff.format]
docstring-code-format = true
@@ -208,7 +222,7 @@ select = [
"T201",
"T203",
# misuse of typing.TYPE_CHECKING
- "TCH004",
+ "TC004",
# import rules
"TID251",
]
@@ -232,14 +246,10 @@ length-sort = true
length-sort-straight = true
combine-as-imports = true
extra-standard-library = ["typing_extensions"]
-known-first-party = ["llama_stack_client", "tests"]
+known-first-party = ["ogx_client", "tests"]
[tool.ruff.lint.per-file-ignores]
"bin/**.py" = ["T201", "T203"]
"scripts/**.py" = ["T201", "T203"]
"tests/**.py" = ["T201", "T203"]
-"examples/**.py" = ["T201", "T203", "TCH004", "I", "B"]
-"src/llama_stack_client/lib/**.py" = ["T201", "T203", "TCH004", "I", "B"]
-
-[project.scripts]
-llama-stack-client = "llama_stack_client.lib.cli.llama_stack_client:main"
+"examples/**.py" = ["T201", "T203"]
diff --git a/release-please-config.json b/release-please-config.json
index 04870019..027d063c 100644
--- a/release-please-config.json
+++ b/release-please-config.json
@@ -61,6 +61,6 @@
],
"release-type": "python",
"extra-files": [
- "src/llama_stack_client/_version.py"
+ "src/ogx_client/_version.py"
]
}
diff --git a/requirements-dev.lock b/requirements-dev.lock
index bd51b81f..9cb14807 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -6,96 +6,60 @@ annotated-types==0.7.0
anyio==4.12.1
# via
# httpx
- # llama-stack-client
-black==26.3.1
+ # ogx-client
+backports-asyncio-runner==1.2.0 ; python_full_version < '3.11'
+ # via pytest-asyncio
certifi==2026.1.4
# via
# httpcore
# httpx
- # requests
-cfgv==3.5.0
- # via pre-commit
-charset-normalizer==3.4.6
- # via requests
-click==8.3.1
- # via
- # black
- # llama-stack-client
colorama==0.4.6 ; sys_platform == 'win32'
- # via
- # click
- # pytest
- # tqdm
+ # via pytest
dirty-equals==0.11
-distlib==0.4.0
- # via virtualenv
distro==1.9.0
- # via llama-stack-client
+ # via ogx-client
+exceptiongroup==1.3.1 ; python_full_version < '3.11'
+ # via
+ # anyio
+ # pytest
execnet==2.1.2
# via pytest-xdist
-filelock==3.25.2
- # via
- # python-discovery
- # virtualenv
-fire==0.7.1
- # via llama-stack-client
h11==0.16.0
# via httpcore
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via
- # llama-stack-client
+ # ogx-client
# respx
-identify==2.6.18
- # via pre-commit
idna==3.11
# via
# anyio
# httpx
- # requests
importlib-metadata==8.7.1
-iniconfig==2.3.0
+iniconfig==2.1.0 ; python_full_version < '3.10'
+ # via pytest
+iniconfig==2.3.0 ; python_full_version >= '3.10'
# via pytest
-markdown-it-py==4.0.0
+markdown-it-py==3.0.0 ; python_full_version < '3.10'
+ # via rich
+markdown-it-py==4.0.0 ; python_full_version >= '3.10'
# via rich
mdurl==0.1.2
# via markdown-it-py
mypy==1.17.0
mypy-extensions==1.1.0
- # via
- # black
- # mypy
+ # via mypy
nodeenv==1.10.0
- # via
- # pre-commit
- # pyright
-numpy==2.4.3
- # via pandas
+ # via pyright
packaging==25.0
- # via
- # black
- # pytest
-pandas==3.0.1
- # via llama-stack-client
+ # via pytest
pathspec==1.0.3
- # via
- # black
- # mypy
-platformdirs==4.9.4
- # via
- # black
- # python-discovery
- # virtualenv
+ # via mypy
pluggy==1.6.0
# via pytest
-pre-commit==4.5.1
-prompt-toolkit==3.0.52
- # via llama-stack-client
-pyaml==26.2.1
- # via llama-stack-client
pydantic==2.12.5
- # via llama-stack-client
+ # via ogx-client
pydantic-core==2.41.5
# via pydantic
pygments==2.19.2
@@ -103,44 +67,38 @@ pygments==2.19.2
# pytest
# rich
pyright==1.1.399
-pytest==9.0.2
+pytest==8.4.2 ; python_full_version < '3.10'
# via
# pytest-asyncio
# pytest-xdist
-pytest-asyncio==1.3.0
-pytest-xdist==3.8.0
-python-dateutil==2.9.0.post0
- # via pandas
-python-discovery==1.2.1
- # via virtualenv
-pytokens==0.4.1
- # via black
-pyyaml==6.0.3
+pytest==9.0.2 ; python_full_version >= '3.10'
# via
- # pre-commit
- # pyaml
-requests==2.33.0
- # via llama-stack-client
+ # pytest-asyncio
+ # pytest-xdist
+pytest-asyncio==1.2.0 ; python_full_version < '3.10'
+pytest-asyncio==1.3.0 ; python_full_version >= '3.10'
+pytest-xdist==3.8.0
+python-dateutil==2.9.0.post0 ; python_full_version < '3.10'
+ # via time-machine
respx==0.22.0
rich==14.2.0
- # via llama-stack-client
ruff==0.14.13
-six==1.17.0
+six==1.17.0 ; python_full_version < '3.10'
# via python-dateutil
sniffio==1.3.1
- # via llama-stack-client
-termcolor==3.3.0
+ # via ogx-client
+time-machine==2.19.0 ; python_full_version < '3.10'
+time-machine==3.2.0 ; python_full_version >= '3.10'
+tomli==2.4.0 ; python_full_version < '3.11'
# via
- # fire
- # llama-stack-client
-time-machine==3.2.0
-tqdm==4.67.3
- # via llama-stack-client
+ # mypy
+ # pytest
typing-extensions==4.15.0
# via
# anyio
- # llama-stack-client
+ # exceptiongroup
# mypy
+ # ogx-client
# pydantic
# pydantic-core
# pyright
@@ -148,13 +106,5 @@ typing-extensions==4.15.0
# typing-inspection
typing-inspection==0.4.2
# via pydantic
-tzdata==2025.3 ; sys_platform == 'emscripten' or sys_platform == 'win32'
- # via pandas
-urllib3==2.6.3
- # via requests
-virtualenv==21.2.0
- # via pre-commit
-wcwidth==0.6.0
- # via prompt-toolkit
zipp==3.23.0
# via importlib-metadata
diff --git a/scripts/bootstrap b/scripts/bootstrap
index 4638ec69..5a23841b 100755
--- a/scripts/bootstrap
+++ b/scripts/bootstrap
@@ -4,7 +4,7 @@ set -e
cd "$(dirname "$0")/.."
-if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ] && [ -t 0 ]; then
+if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "${SKIP_BREW:-}" != "1" ] && [ -t 0 ]; then
brew bundle check >/dev/null 2>&1 || {
echo -n "==> Install Homebrew dependencies? (y/N): "
read -r response
diff --git a/scripts/lint b/scripts/lint
index 842b1a48..0172efb4 100755
--- a/scripts/lint
+++ b/scripts/lint
@@ -16,4 +16,4 @@ fi
# uv run mypy .
echo "==> Making sure it imports"
-uv run python -c 'import llama_stack_client'
+uv run python -c 'import ogx_client'
diff --git a/scripts/mock b/scripts/mock
index 65afdeb8..8159c71d 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -29,9 +29,9 @@ echo "==> Starting mock server with file ${SPEC_PATH}"
# Run steady mock on the given spec
if [ "$1" == "--daemon" ]; then
# Pre-install the package so the download doesn't eat into the startup timeout
- npm exec --package=@stdy/cli@0.20.2 -- steady --version
+ npm exec --package=@stdy/cli@0.22.1 -- steady --version
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log &
# Wait for server to come online via health endpoint (max 30s)
echo -n "Waiting for server"
@@ -47,5 +47,5 @@ if [ "$1" == "--daemon" ]; then
echo
else
- npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
+ npm exec --package=@stdy/cli@0.22.1 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL"
fi
diff --git a/scripts/test b/scripts/test
index 4fa15f98..e6bf8dca 100755
--- a/scripts/test
+++ b/scripts/test
@@ -47,7 +47,7 @@ elif ! prism_is_running; then
echo -e "To run the server, pass in the path or url of your OpenAPI"
echo -e "spec to the steady command:"
echo
- echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.2 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
+ echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.22.1 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}"
echo
exit 1
diff --git a/src/llama_stack_client/_utils/_resources_proxy.py b/src/llama_stack_client/_utils/_resources_proxy.py
deleted file mode 100644
index 364048bc..00000000
--- a/src/llama_stack_client/_utils/_resources_proxy.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from __future__ import annotations
-
-from typing import Any
-from typing_extensions import override
-
-from ._proxy import LazyProxy
-
-
-class ResourcesProxy(LazyProxy[Any]):
- """A proxy for the `llama_stack_client.resources` module.
-
- This is used so that we can lazily import `llama_stack_client.resources` only when
- needed *and* so that users can just import `llama_stack_client` and reference `llama_stack_client.resources`
- """
-
- @override
- def __load__(self) -> Any:
- import importlib
-
- mod = importlib.import_module("llama_stack_client.resources")
- return mod
-
-
-resources = ResourcesProxy().__as_proxied__()
diff --git a/src/llama_stack_client/resources/alpha/benchmarks.py b/src/llama_stack_client/resources/alpha/benchmarks.py
deleted file mode 100644
index 9778fac1..00000000
--- a/src/llama_stack_client/resources/alpha/benchmarks.py
+++ /dev/null
@@ -1,474 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing import Dict, Type, Optional, cast
-
-import httpx
-
-from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
-from ..._utils import path_template, maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._wrappers import DataWrapper
-from ...types.alpha import benchmark_register_params
-from ..._base_client import make_request_options
-from ...types.alpha.benchmark import Benchmark
-from ...types.alpha.benchmark_list_response import BenchmarkListResponse
-
-__all__ = ["BenchmarksResource", "AsyncBenchmarksResource"]
-
-
-class BenchmarksResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> BenchmarksResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return BenchmarksResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> BenchmarksResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return BenchmarksResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- benchmark_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Benchmark:
- """
- Get a benchmark by its ID.
-
- Args:
- benchmark_id: The ID of the benchmark to get.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return self._get(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Benchmark,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BenchmarkListResponse:
- """List all benchmarks."""
- return self._get(
- "/v1alpha/eval/benchmarks",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[BenchmarkListResponse]._unwrapper,
- ),
- cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- def register(
- self,
- *,
- benchmark_id: str,
- dataset_id: str,
- scoring_functions: SequenceNotStr[str],
- metadata: Optional[Dict[str, object]] | Omit = omit,
- provider_benchmark_id: Optional[str] | Omit = omit,
- provider_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark to register.
-
- dataset_id: The ID of the dataset to use for the benchmark.
-
- scoring_functions: The scoring functions to use for the benchmark.
-
- metadata: The metadata to use for the benchmark.
-
- provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
-
- provider_id: The ID of the provider to use for the benchmark.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- "/v1alpha/eval/benchmarks",
- body=maybe_transform(
- {
- "benchmark_id": benchmark_id,
- "dataset_id": dataset_id,
- "scoring_functions": scoring_functions,
- "metadata": metadata,
- "provider_benchmark_id": provider_benchmark_id,
- "provider_id": provider_id,
- },
- benchmark_register_params.BenchmarkRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- @typing_extensions.deprecated("deprecated")
- def unregister(
- self,
- benchmark_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark to unregister.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class AsyncBenchmarksResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncBenchmarksResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncBenchmarksResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncBenchmarksResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncBenchmarksResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- benchmark_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Benchmark:
- """
- Get a benchmark by its ID.
-
- Args:
- benchmark_id: The ID of the benchmark to get.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return await self._get(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Benchmark,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> BenchmarkListResponse:
- """List all benchmarks."""
- return await self._get(
- "/v1alpha/eval/benchmarks",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[BenchmarkListResponse]._unwrapper,
- ),
- cast_to=cast(Type[BenchmarkListResponse], DataWrapper[BenchmarkListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- async def register(
- self,
- *,
- benchmark_id: str,
- dataset_id: str,
- scoring_functions: SequenceNotStr[str],
- metadata: Optional[Dict[str, object]] | Omit = omit,
- provider_benchmark_id: Optional[str] | Omit = omit,
- provider_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark to register.
-
- dataset_id: The ID of the dataset to use for the benchmark.
-
- scoring_functions: The scoring functions to use for the benchmark.
-
- metadata: The metadata to use for the benchmark.
-
- provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
-
- provider_id: The ID of the provider to use for the benchmark.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- "/v1alpha/eval/benchmarks",
- body=await async_maybe_transform(
- {
- "benchmark_id": benchmark_id,
- "dataset_id": dataset_id,
- "scoring_functions": scoring_functions,
- "metadata": metadata,
- "provider_benchmark_id": provider_benchmark_id,
- "provider_id": provider_id,
- },
- benchmark_register_params.BenchmarkRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def unregister(
- self,
- benchmark_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark to unregister.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}", benchmark_id=benchmark_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class BenchmarksResourceWithRawResponse:
- def __init__(self, benchmarks: BenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = to_raw_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = to_raw_response_wrapper(
- benchmarks.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- benchmarks.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- benchmarks.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncBenchmarksResourceWithRawResponse:
- def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = async_to_raw_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- benchmarks.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- benchmarks.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- benchmarks.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class BenchmarksResourceWithStreamingResponse:
- def __init__(self, benchmarks: BenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = to_streamed_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- benchmarks.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- benchmarks.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- benchmarks.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncBenchmarksResourceWithStreamingResponse:
- def __init__(self, benchmarks: AsyncBenchmarksResource) -> None:
- self._benchmarks = benchmarks
-
- self.retrieve = async_to_streamed_response_wrapper(
- benchmarks.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- benchmarks.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- benchmarks.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- benchmarks.unregister, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/llama_stack_client/resources/alpha/eval/__init__.py b/src/llama_stack_client/resources/alpha/eval/__init__.py
deleted file mode 100644
index 3aa93594..00000000
--- a/src/llama_stack_client/resources/alpha/eval/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .eval import (
- EvalResource,
- AsyncEvalResource,
- EvalResourceWithRawResponse,
- AsyncEvalResourceWithRawResponse,
- EvalResourceWithStreamingResponse,
- AsyncEvalResourceWithStreamingResponse,
-)
-from .jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "JobsResource",
- "AsyncJobsResource",
- "JobsResourceWithRawResponse",
- "AsyncJobsResourceWithRawResponse",
- "JobsResourceWithStreamingResponse",
- "AsyncJobsResourceWithStreamingResponse",
- "EvalResource",
- "AsyncEvalResource",
- "EvalResourceWithRawResponse",
- "AsyncEvalResourceWithRawResponse",
- "EvalResourceWithStreamingResponse",
- "AsyncEvalResourceWithStreamingResponse",
-]
diff --git a/src/llama_stack_client/resources/alpha/eval/eval.py b/src/llama_stack_client/resources/alpha/eval/eval.py
deleted file mode 100644
index 7488bef9..00000000
--- a/src/llama_stack_client/resources/alpha/eval/eval.py
+++ /dev/null
@@ -1,578 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable
-
-import httpx
-
-from .jobs import (
- JobsResource,
- AsyncJobsResource,
- JobsResourceWithRawResponse,
- AsyncJobsResourceWithRawResponse,
- JobsResourceWithStreamingResponse,
- AsyncJobsResourceWithStreamingResponse,
-)
-from ...._types import Body, Query, Headers, NotGiven, SequenceNotStr, not_given
-from ...._utils import path_template, maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ....types.alpha import (
- eval_run_eval_params,
- eval_evaluate_rows_params,
- eval_run_eval_alpha_params,
- eval_evaluate_rows_alpha_params,
-)
-from ...._base_client import make_request_options
-from ....types.alpha.job import Job
-from ....types.alpha.evaluate_response import EvaluateResponse
-from ....types.alpha.benchmark_config_param import BenchmarkConfigParam
-
-__all__ = ["EvalResource", "AsyncEvalResource"]
-
-
-class EvalResource(SyncAPIResource):
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
-
- @cached_property
- def jobs(self) -> JobsResource:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return JobsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> EvalResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return EvalResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> EvalResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return EvalResourceWithStreamingResponse(self)
-
- def evaluate_rows(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- input_rows: Iterable[Dict[str, object]],
- scoring_functions: SequenceNotStr[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> EvaluateResponse:
- """
- Evaluate a list of rows on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- input_rows: The rows to evaluate
-
- scoring_functions: The scoring functions to use for the evaluation
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
- body=maybe_transform(
- {
- "benchmark_config": benchmark_config,
- "input_rows": input_rows,
- "scoring_functions": scoring_functions,
- },
- eval_evaluate_rows_params.EvalEvaluateRowsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EvaluateResponse,
- )
-
- def evaluate_rows_alpha(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- input_rows: Iterable[Dict[str, object]],
- scoring_functions: SequenceNotStr[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> EvaluateResponse:
- """
- Evaluate a list of rows on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- input_rows: The rows to evaluate
-
- scoring_functions: The scoring functions to use for the evaluation
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
- body=maybe_transform(
- {
- "benchmark_config": benchmark_config,
- "input_rows": input_rows,
- "scoring_functions": scoring_functions,
- },
- eval_evaluate_rows_alpha_params.EvalEvaluateRowsAlphaParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EvaluateResponse,
- )
-
- def run_eval(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Job:
- """
- Run an evaluation on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
- body=maybe_transform({"benchmark_config": benchmark_config}, eval_run_eval_params.EvalRunEvalParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Job,
- )
-
- def run_eval_alpha(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Job:
- """
- Run an evaluation on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
- body=maybe_transform(
- {"benchmark_config": benchmark_config}, eval_run_eval_alpha_params.EvalRunEvalAlphaParams
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Job,
- )
-
-
-class AsyncEvalResource(AsyncAPIResource):
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
-
- @cached_property
- def jobs(self) -> AsyncJobsResource:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return AsyncJobsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncEvalResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncEvalResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncEvalResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncEvalResourceWithStreamingResponse(self)
-
- async def evaluate_rows(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- input_rows: Iterable[Dict[str, object]],
- scoring_functions: SequenceNotStr[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> EvaluateResponse:
- """
- Evaluate a list of rows on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- input_rows: The rows to evaluate
-
- scoring_functions: The scoring functions to use for the evaluation
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return await self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
- body=await async_maybe_transform(
- {
- "benchmark_config": benchmark_config,
- "input_rows": input_rows,
- "scoring_functions": scoring_functions,
- },
- eval_evaluate_rows_params.EvalEvaluateRowsParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EvaluateResponse,
- )
-
- async def evaluate_rows_alpha(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- input_rows: Iterable[Dict[str, object]],
- scoring_functions: SequenceNotStr[str],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> EvaluateResponse:
- """
- Evaluate a list of rows on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- input_rows: The rows to evaluate
-
- scoring_functions: The scoring functions to use for the evaluation
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return await self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/evaluations", benchmark_id=benchmark_id),
- body=await async_maybe_transform(
- {
- "benchmark_config": benchmark_config,
- "input_rows": input_rows,
- "scoring_functions": scoring_functions,
- },
- eval_evaluate_rows_alpha_params.EvalEvaluateRowsAlphaParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EvaluateResponse,
- )
-
- async def run_eval(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Job:
- """
- Run an evaluation on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return await self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
- body=await async_maybe_transform(
- {"benchmark_config": benchmark_config}, eval_run_eval_params.EvalRunEvalParams
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Job,
- )
-
- async def run_eval_alpha(
- self,
- benchmark_id: str,
- *,
- benchmark_config: BenchmarkConfigParam,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Job:
- """
- Run an evaluation on a benchmark.
-
- Args:
- benchmark_id: The ID of the benchmark
-
- benchmark_config: The configuration for the benchmark
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- return await self._post(
- path_template("/v1alpha/eval/benchmarks/{benchmark_id}/jobs", benchmark_id=benchmark_id),
- body=await async_maybe_transform(
- {"benchmark_config": benchmark_config}, eval_run_eval_alpha_params.EvalRunEvalAlphaParams
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Job,
- )
-
-
-class EvalResourceWithRawResponse:
- def __init__(self, eval: EvalResource) -> None:
- self._eval = eval
-
- self.evaluate_rows = to_raw_response_wrapper(
- eval.evaluate_rows,
- )
- self.evaluate_rows_alpha = to_raw_response_wrapper(
- eval.evaluate_rows_alpha,
- )
- self.run_eval = to_raw_response_wrapper(
- eval.run_eval,
- )
- self.run_eval_alpha = to_raw_response_wrapper(
- eval.run_eval_alpha,
- )
-
- @cached_property
- def jobs(self) -> JobsResourceWithRawResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return JobsResourceWithRawResponse(self._eval.jobs)
-
-
-class AsyncEvalResourceWithRawResponse:
- def __init__(self, eval: AsyncEvalResource) -> None:
- self._eval = eval
-
- self.evaluate_rows = async_to_raw_response_wrapper(
- eval.evaluate_rows,
- )
- self.evaluate_rows_alpha = async_to_raw_response_wrapper(
- eval.evaluate_rows_alpha,
- )
- self.run_eval = async_to_raw_response_wrapper(
- eval.run_eval,
- )
- self.run_eval_alpha = async_to_raw_response_wrapper(
- eval.run_eval_alpha,
- )
-
- @cached_property
- def jobs(self) -> AsyncJobsResourceWithRawResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return AsyncJobsResourceWithRawResponse(self._eval.jobs)
-
-
-class EvalResourceWithStreamingResponse:
- def __init__(self, eval: EvalResource) -> None:
- self._eval = eval
-
- self.evaluate_rows = to_streamed_response_wrapper(
- eval.evaluate_rows,
- )
- self.evaluate_rows_alpha = to_streamed_response_wrapper(
- eval.evaluate_rows_alpha,
- )
- self.run_eval = to_streamed_response_wrapper(
- eval.run_eval,
- )
- self.run_eval_alpha = to_streamed_response_wrapper(
- eval.run_eval_alpha,
- )
-
- @cached_property
- def jobs(self) -> JobsResourceWithStreamingResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return JobsResourceWithStreamingResponse(self._eval.jobs)
-
-
-class AsyncEvalResourceWithStreamingResponse:
- def __init__(self, eval: AsyncEvalResource) -> None:
- self._eval = eval
-
- self.evaluate_rows = async_to_streamed_response_wrapper(
- eval.evaluate_rows,
- )
- self.evaluate_rows_alpha = async_to_streamed_response_wrapper(
- eval.evaluate_rows_alpha,
- )
- self.run_eval = async_to_streamed_response_wrapper(
- eval.run_eval,
- )
- self.run_eval_alpha = async_to_streamed_response_wrapper(
- eval.run_eval_alpha,
- )
-
- @cached_property
- def jobs(self) -> AsyncJobsResourceWithStreamingResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return AsyncJobsResourceWithStreamingResponse(self._eval.jobs)
diff --git a/src/llama_stack_client/resources/alpha/eval/jobs.py b/src/llama_stack_client/resources/alpha/eval/jobs.py
deleted file mode 100644
index 06b59cf3..00000000
--- a/src/llama_stack_client/resources/alpha/eval/jobs.py
+++ /dev/null
@@ -1,367 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import Body, Query, Headers, NoneType, NotGiven, not_given
-from ...._utils import path_template
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.alpha.job import Job
-from ....types.alpha.evaluate_response import EvaluateResponse
-
-__all__ = ["JobsResource", "AsyncJobsResource"]
-
-
-class JobsResource(SyncAPIResource):
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
-
- @cached_property
- def with_raw_response(self) -> JobsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return JobsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> JobsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return JobsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- job_id: str,
- *,
- benchmark_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> EvaluateResponse:
- """
- Get the result of a job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- if not job_id:
- raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
- return self._get(
- path_template(
- "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", benchmark_id=benchmark_id, job_id=job_id
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EvaluateResponse,
- )
-
- def cancel(
- self,
- job_id: str,
- *,
- benchmark_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Cancel a job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- if not job_id:
- raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- path_template(
- "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- def status(
- self,
- job_id: str,
- *,
- benchmark_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Job:
- """
- Get the status of a job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- if not job_id:
- raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
- return self._get(
- path_template(
- "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Job,
- )
-
-
-class AsyncJobsResource(AsyncAPIResource):
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
-
- @cached_property
- def with_raw_response(self) -> AsyncJobsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncJobsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncJobsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncJobsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- job_id: str,
- *,
- benchmark_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> EvaluateResponse:
- """
- Get the result of a job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- if not job_id:
- raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
- return await self._get(
- path_template(
- "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", benchmark_id=benchmark_id, job_id=job_id
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=EvaluateResponse,
- )
-
- async def cancel(
- self,
- job_id: str,
- *,
- benchmark_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Cancel a job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- if not job_id:
- raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- path_template(
- "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- async def status(
- self,
- job_id: str,
- *,
- benchmark_id: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Job:
- """
- Get the status of a job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not benchmark_id:
- raise ValueError(f"Expected a non-empty value for `benchmark_id` but received {benchmark_id!r}")
- if not job_id:
- raise ValueError(f"Expected a non-empty value for `job_id` but received {job_id!r}")
- return await self._get(
- path_template(
- "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", benchmark_id=benchmark_id, job_id=job_id
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Job,
- )
-
-
-class JobsResourceWithRawResponse:
- def __init__(self, jobs: JobsResource) -> None:
- self._jobs = jobs
-
- self.retrieve = to_raw_response_wrapper(
- jobs.retrieve,
- )
- self.cancel = to_raw_response_wrapper(
- jobs.cancel,
- )
- self.status = to_raw_response_wrapper(
- jobs.status,
- )
-
-
-class AsyncJobsResourceWithRawResponse:
- def __init__(self, jobs: AsyncJobsResource) -> None:
- self._jobs = jobs
-
- self.retrieve = async_to_raw_response_wrapper(
- jobs.retrieve,
- )
- self.cancel = async_to_raw_response_wrapper(
- jobs.cancel,
- )
- self.status = async_to_raw_response_wrapper(
- jobs.status,
- )
-
-
-class JobsResourceWithStreamingResponse:
- def __init__(self, jobs: JobsResource) -> None:
- self._jobs = jobs
-
- self.retrieve = to_streamed_response_wrapper(
- jobs.retrieve,
- )
- self.cancel = to_streamed_response_wrapper(
- jobs.cancel,
- )
- self.status = to_streamed_response_wrapper(
- jobs.status,
- )
-
-
-class AsyncJobsResourceWithStreamingResponse:
- def __init__(self, jobs: AsyncJobsResource) -> None:
- self._jobs = jobs
-
- self.retrieve = async_to_streamed_response_wrapper(
- jobs.retrieve,
- )
- self.cancel = async_to_streamed_response_wrapper(
- jobs.cancel,
- )
- self.status = async_to_streamed_response_wrapper(
- jobs.status,
- )
diff --git a/src/llama_stack_client/resources/beta/__init__.py b/src/llama_stack_client/resources/beta/__init__.py
deleted file mode 100644
index f247fcee..00000000
--- a/src/llama_stack_client/resources/beta/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .beta import (
- BetaResource,
- AsyncBetaResource,
- BetaResourceWithRawResponse,
- AsyncBetaResourceWithRawResponse,
- BetaResourceWithStreamingResponse,
- AsyncBetaResourceWithStreamingResponse,
-)
-from .datasets import (
- DatasetsResource,
- AsyncDatasetsResource,
- DatasetsResourceWithRawResponse,
- AsyncDatasetsResourceWithRawResponse,
- DatasetsResourceWithStreamingResponse,
- AsyncDatasetsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "DatasetsResource",
- "AsyncDatasetsResource",
- "DatasetsResourceWithRawResponse",
- "AsyncDatasetsResourceWithRawResponse",
- "DatasetsResourceWithStreamingResponse",
- "AsyncDatasetsResourceWithStreamingResponse",
- "BetaResource",
- "AsyncBetaResource",
- "BetaResourceWithRawResponse",
- "AsyncBetaResourceWithRawResponse",
- "BetaResourceWithStreamingResponse",
- "AsyncBetaResourceWithStreamingResponse",
-]
diff --git a/src/llama_stack_client/resources/beta/beta.py b/src/llama_stack_client/resources/beta/beta.py
deleted file mode 100644
index 2fba0f7b..00000000
--- a/src/llama_stack_client/resources/beta/beta.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .datasets import (
- DatasetsResource,
- AsyncDatasetsResource,
- DatasetsResourceWithRawResponse,
- AsyncDatasetsResourceWithRawResponse,
- DatasetsResourceWithStreamingResponse,
- AsyncDatasetsResourceWithStreamingResponse,
-)
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["BetaResource", "AsyncBetaResource"]
-
-
-class BetaResource(SyncAPIResource):
- @cached_property
- def datasets(self) -> DatasetsResource:
- return DatasetsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> BetaResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return BetaResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> BetaResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return BetaResourceWithStreamingResponse(self)
-
-
-class AsyncBetaResource(AsyncAPIResource):
- @cached_property
- def datasets(self) -> AsyncDatasetsResource:
- return AsyncDatasetsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncBetaResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncBetaResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncBetaResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncBetaResourceWithStreamingResponse(self)
-
-
-class BetaResourceWithRawResponse:
- def __init__(self, beta: BetaResource) -> None:
- self._beta = beta
-
- @cached_property
- def datasets(self) -> DatasetsResourceWithRawResponse:
- return DatasetsResourceWithRawResponse(self._beta.datasets)
-
-
-class AsyncBetaResourceWithRawResponse:
- def __init__(self, beta: AsyncBetaResource) -> None:
- self._beta = beta
-
- @cached_property
- def datasets(self) -> AsyncDatasetsResourceWithRawResponse:
- return AsyncDatasetsResourceWithRawResponse(self._beta.datasets)
-
-
-class BetaResourceWithStreamingResponse:
- def __init__(self, beta: BetaResource) -> None:
- self._beta = beta
-
- @cached_property
- def datasets(self) -> DatasetsResourceWithStreamingResponse:
- return DatasetsResourceWithStreamingResponse(self._beta.datasets)
-
-
-class AsyncBetaResourceWithStreamingResponse:
- def __init__(self, beta: AsyncBetaResource) -> None:
- self._beta = beta
-
- @cached_property
- def datasets(self) -> AsyncDatasetsResourceWithStreamingResponse:
- return AsyncDatasetsResourceWithStreamingResponse(self._beta.datasets)
diff --git a/src/llama_stack_client/resources/beta/datasets.py b/src/llama_stack_client/resources/beta/datasets.py
deleted file mode 100644
index e18e8f0e..00000000
--- a/src/llama_stack_client/resources/beta/datasets.py
+++ /dev/null
@@ -1,685 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing import Dict, Type, Iterable, Optional, cast
-from typing_extensions import Literal
-
-import httpx
-
-from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from ..._utils import path_template, maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._wrappers import DataWrapper
-from ...types.beta import dataset_iterrows_params, dataset_register_params, dataset_appendrows_params
-from ..._base_client import make_request_options
-from ...types.beta.dataset_list_response import DatasetListResponse
-from ...types.beta.dataset_iterrows_response import DatasetIterrowsResponse
-from ...types.beta.dataset_register_response import DatasetRegisterResponse
-from ...types.beta.dataset_retrieve_response import DatasetRetrieveResponse
-
-__all__ = ["DatasetsResource", "AsyncDatasetsResource"]
-
-
-class DatasetsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> DatasetsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return DatasetsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> DatasetsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return DatasetsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRetrieveResponse:
- """
- Get a dataset by its ID.
-
- Args:
- dataset_id: The ID of the dataset to get.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return self._get(
- path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRetrieveResponse,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetListResponse:
- """List all datasets."""
- return self._get(
- "/v1beta/datasets",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[DatasetListResponse]._unwrapper,
- ),
- cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]),
- )
-
- def appendrows(
- self,
- dataset_id: str,
- *,
- rows: Iterable[Dict[str, object]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Append rows to a dataset.
-
- Args:
- dataset_id: The ID of the dataset to append the rows to.
-
- rows: The rows to append to the dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- path_template("/v1beta/datasetio/append-rows/{dataset_id}", dataset_id=dataset_id),
- body=maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- def iterrows(
- self,
- dataset_id: str,
- *,
- limit: Optional[int] | Omit = omit,
- start_index: Optional[int] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetIterrowsResponse:
- """
- Get a paginated list of rows from a dataset.
-
- Uses offset-based pagination where:
-
- - start_index: The starting index (0-based). If None, starts from beginning.
- - limit: Number of items to return. If None or -1, returns all items.
-
- The response includes:
-
- - data: List of items for the current page.
- - has_more: Whether there are more items available after this set.
-
- Args:
- dataset_id: The ID of the dataset to get the rows from.
-
- limit: The number of rows to get.
-
- start_index: Index into dataset for the first row to get. Get all rows if None.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return self._get(
- path_template("/v1beta/datasetio/iterrows/{dataset_id}", dataset_id=dataset_id),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "limit": limit,
- "start_index": start_index,
- },
- dataset_iterrows_params.DatasetIterrowsParams,
- ),
- ),
- cast_to=DatasetIterrowsResponse,
- )
-
- @typing_extensions.deprecated("deprecated")
- def register(
- self,
- *,
- purpose: Literal["eval/question-answer", "eval/messages-answer"],
- source: dataset_register_params.Source,
- dataset_id: Optional[str] | Omit = omit,
- metadata: Optional[Dict[str, object]] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRegisterResponse:
- """
- Register a new dataset.
-
- Args:
- purpose: The purpose of the dataset.
-
- source: The data source of the dataset.
-
- dataset_id: The ID of the dataset. If not provided, an ID will be generated.
-
- metadata: The metadata for the dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1beta/datasets",
- body=maybe_transform(
- {
- "purpose": purpose,
- "source": source,
- "dataset_id": dataset_id,
- "metadata": metadata,
- },
- dataset_register_params.DatasetRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRegisterResponse,
- )
-
- @typing_extensions.deprecated("deprecated")
- def unregister(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a dataset by its ID.
-
- Args:
- dataset_id: The ID of the dataset to unregister.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class AsyncDatasetsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncDatasetsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncDatasetsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncDatasetsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncDatasetsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRetrieveResponse:
- """
- Get a dataset by its ID.
-
- Args:
- dataset_id: The ID of the dataset to get.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return await self._get(
- path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRetrieveResponse,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetListResponse:
- """List all datasets."""
- return await self._get(
- "/v1beta/datasets",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[DatasetListResponse]._unwrapper,
- ),
- cast_to=cast(Type[DatasetListResponse], DataWrapper[DatasetListResponse]),
- )
-
- async def appendrows(
- self,
- dataset_id: str,
- *,
- rows: Iterable[Dict[str, object]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Append rows to a dataset.
-
- Args:
- dataset_id: The ID of the dataset to append the rows to.
-
- rows: The rows to append to the dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- path_template("/v1beta/datasetio/append-rows/{dataset_id}", dataset_id=dataset_id),
- body=await async_maybe_transform({"rows": rows}, dataset_appendrows_params.DatasetAppendrowsParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- async def iterrows(
- self,
- dataset_id: str,
- *,
- limit: Optional[int] | Omit = omit,
- start_index: Optional[int] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetIterrowsResponse:
- """
- Get a paginated list of rows from a dataset.
-
- Uses offset-based pagination where:
-
- - start_index: The starting index (0-based). If None, starts from beginning.
- - limit: Number of items to return. If None or -1, returns all items.
-
- The response includes:
-
- - data: List of items for the current page.
- - has_more: Whether there are more items available after this set.
-
- Args:
- dataset_id: The ID of the dataset to get the rows from.
-
- limit: The number of rows to get.
-
- start_index: Index into dataset for the first row to get. Get all rows if None.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- return await self._get(
- path_template("/v1beta/datasetio/iterrows/{dataset_id}", dataset_id=dataset_id),
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "limit": limit,
- "start_index": start_index,
- },
- dataset_iterrows_params.DatasetIterrowsParams,
- ),
- ),
- cast_to=DatasetIterrowsResponse,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def register(
- self,
- *,
- purpose: Literal["eval/question-answer", "eval/messages-answer"],
- source: dataset_register_params.Source,
- dataset_id: Optional[str] | Omit = omit,
- metadata: Optional[Dict[str, object]] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> DatasetRegisterResponse:
- """
- Register a new dataset.
-
- Args:
- purpose: The purpose of the dataset.
-
- source: The data source of the dataset.
-
- dataset_id: The ID of the dataset. If not provided, an ID will be generated.
-
- metadata: The metadata for the dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1beta/datasets",
- body=await async_maybe_transform(
- {
- "purpose": purpose,
- "source": source,
- "dataset_id": dataset_id,
- "metadata": metadata,
- },
- dataset_register_params.DatasetRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=DatasetRegisterResponse,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def unregister(
- self,
- dataset_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a dataset by its ID.
-
- Args:
- dataset_id: The ID of the dataset to unregister.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not dataset_id:
- raise ValueError(f"Expected a non-empty value for `dataset_id` but received {dataset_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- path_template("/v1beta/datasets/{dataset_id}", dataset_id=dataset_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class DatasetsResourceWithRawResponse:
- def __init__(self, datasets: DatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = to_raw_response_wrapper(
- datasets.retrieve,
- )
- self.list = to_raw_response_wrapper(
- datasets.list,
- )
- self.appendrows = to_raw_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = to_raw_response_wrapper(
- datasets.iterrows,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- datasets.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- datasets.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncDatasetsResourceWithRawResponse:
- def __init__(self, datasets: AsyncDatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = async_to_raw_response_wrapper(
- datasets.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- datasets.list,
- )
- self.appendrows = async_to_raw_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = async_to_raw_response_wrapper(
- datasets.iterrows,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- datasets.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- datasets.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class DatasetsResourceWithStreamingResponse:
- def __init__(self, datasets: DatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = to_streamed_response_wrapper(
- datasets.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- datasets.list,
- )
- self.appendrows = to_streamed_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = to_streamed_response_wrapper(
- datasets.iterrows,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- datasets.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- datasets.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncDatasetsResourceWithStreamingResponse:
- def __init__(self, datasets: AsyncDatasetsResource) -> None:
- self._datasets = datasets
-
- self.retrieve = async_to_streamed_response_wrapper(
- datasets.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- datasets.list,
- )
- self.appendrows = async_to_streamed_response_wrapper(
- datasets.appendrows,
- )
- self.iterrows = async_to_streamed_response_wrapper(
- datasets.iterrows,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- datasets.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- datasets.unregister, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/llama_stack_client/resources/scoring.py b/src/llama_stack_client/resources/scoring.py
deleted file mode 100644
index c8e793b4..00000000
--- a/src/llama_stack_client/resources/scoring.py
+++ /dev/null
@@ -1,300 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable, Optional
-
-import httpx
-
-from ..types import scoring_score_params, scoring_score_batch_params
-from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.scoring_score_response import ScoringScoreResponse
-from ..types.scoring_score_batch_response import ScoringScoreBatchResponse
-
-__all__ = ["ScoringResource", "AsyncScoringResource"]
-
-
-class ScoringResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ScoringResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return ScoringResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ScoringResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return ScoringResourceWithStreamingResponse(self)
-
- def score(
- self,
- *,
- input_rows: Iterable[Dict[str, object]],
- scoring_functions: Dict[str, Optional[scoring_score_params.ScoringFunctions]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringScoreResponse:
- """
- Score a list of rows.
-
- Args:
- input_rows: The rows to score.
-
- scoring_functions: The scoring functions to use for the scoring.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/scoring/score",
- body=maybe_transform(
- {
- "input_rows": input_rows,
- "scoring_functions": scoring_functions,
- },
- scoring_score_params.ScoringScoreParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ScoringScoreResponse,
- )
-
- def score_batch(
- self,
- *,
- dataset_id: str,
- scoring_functions: Dict[str, Optional[scoring_score_batch_params.ScoringFunctions]],
- save_results_dataset: bool | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringScoreBatchResponse:
- """
- Score a batch of rows.
-
- Args:
- dataset_id: The ID of the dataset to score.
-
- scoring_functions: The scoring functions to use for the scoring.
-
- save_results_dataset: Whether to save the results to a dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v1/scoring/score-batch",
- body=maybe_transform(
- {
- "dataset_id": dataset_id,
- "scoring_functions": scoring_functions,
- "save_results_dataset": save_results_dataset,
- },
- scoring_score_batch_params.ScoringScoreBatchParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ScoringScoreBatchResponse,
- )
-
-
-class AsyncScoringResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncScoringResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncScoringResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncScoringResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncScoringResourceWithStreamingResponse(self)
-
- async def score(
- self,
- *,
- input_rows: Iterable[Dict[str, object]],
- scoring_functions: Dict[str, Optional[scoring_score_params.ScoringFunctions]],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringScoreResponse:
- """
- Score a list of rows.
-
- Args:
- input_rows: The rows to score.
-
- scoring_functions: The scoring functions to use for the scoring.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/scoring/score",
- body=await async_maybe_transform(
- {
- "input_rows": input_rows,
- "scoring_functions": scoring_functions,
- },
- scoring_score_params.ScoringScoreParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ScoringScoreResponse,
- )
-
- async def score_batch(
- self,
- *,
- dataset_id: str,
- scoring_functions: Dict[str, Optional[scoring_score_batch_params.ScoringFunctions]],
- save_results_dataset: bool | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringScoreBatchResponse:
- """
- Score a batch of rows.
-
- Args:
- dataset_id: The ID of the dataset to score.
-
- scoring_functions: The scoring functions to use for the scoring.
-
- save_results_dataset: Whether to save the results to a dataset.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v1/scoring/score-batch",
- body=await async_maybe_transform(
- {
- "dataset_id": dataset_id,
- "scoring_functions": scoring_functions,
- "save_results_dataset": save_results_dataset,
- },
- scoring_score_batch_params.ScoringScoreBatchParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ScoringScoreBatchResponse,
- )
-
-
-class ScoringResourceWithRawResponse:
- def __init__(self, scoring: ScoringResource) -> None:
- self._scoring = scoring
-
- self.score = to_raw_response_wrapper(
- scoring.score,
- )
- self.score_batch = to_raw_response_wrapper(
- scoring.score_batch,
- )
-
-
-class AsyncScoringResourceWithRawResponse:
- def __init__(self, scoring: AsyncScoringResource) -> None:
- self._scoring = scoring
-
- self.score = async_to_raw_response_wrapper(
- scoring.score,
- )
- self.score_batch = async_to_raw_response_wrapper(
- scoring.score_batch,
- )
-
-
-class ScoringResourceWithStreamingResponse:
- def __init__(self, scoring: ScoringResource) -> None:
- self._scoring = scoring
-
- self.score = to_streamed_response_wrapper(
- scoring.score,
- )
- self.score_batch = to_streamed_response_wrapper(
- scoring.score_batch,
- )
-
-
-class AsyncScoringResourceWithStreamingResponse:
- def __init__(self, scoring: AsyncScoringResource) -> None:
- self._scoring = scoring
-
- self.score = async_to_streamed_response_wrapper(
- scoring.score,
- )
- self.score_batch = async_to_streamed_response_wrapper(
- scoring.score_batch,
- )
diff --git a/src/llama_stack_client/resources/scoring_functions.py b/src/llama_stack_client/resources/scoring_functions.py
deleted file mode 100644
index 50aeaebd..00000000
--- a/src/llama_stack_client/resources/scoring_functions.py
+++ /dev/null
@@ -1,472 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing import Type, Optional, cast
-
-import httpx
-
-from ..types import scoring_function_register_params
-from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
-from .._utils import path_template, maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._wrappers import DataWrapper
-from .._base_client import make_request_options
-from ..types.scoring_fn import ScoringFn
-from ..types.scoring_function_list_response import ScoringFunctionListResponse
-
-__all__ = ["ScoringFunctionsResource", "AsyncScoringFunctionsResource"]
-
-
-class ScoringFunctionsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ScoringFunctionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return ScoringFunctionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ScoringFunctionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return ScoringFunctionsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- scoring_fn_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringFn:
- """
- Get a scoring function by its ID.
-
- Args:
- scoring_fn_id: The ID of the scoring function to get.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not scoring_fn_id:
- raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
- return self._get(
- path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ScoringFn,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringFunctionListResponse:
- """List all scoring functions."""
- return self._get(
- "/v1/scoring-functions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ScoringFunctionListResponse]._unwrapper,
- ),
- cast_to=cast(Type[ScoringFunctionListResponse], DataWrapper[ScoringFunctionListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- def register(
- self,
- *,
- description: str,
- return_type: scoring_function_register_params.ReturnType,
- scoring_fn_id: str,
- params: Optional[scoring_function_register_params.Params] | Omit = omit,
- provider_id: Optional[str] | Omit = omit,
- provider_scoring_fn_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a scoring function.
-
- Args:
- description: The description of the scoring function.
-
- scoring_fn_id: The ID of the scoring function to register.
-
- params: The parameters for the scoring function for benchmark eval, these can be
- overridden for app eval.
-
- provider_id: The ID of the provider to use for the scoring function.
-
- provider_scoring_fn_id: The ID of the provider scoring function to use for the scoring function.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- "/v1/scoring-functions",
- body=maybe_transform(
- {
- "description": description,
- "return_type": return_type,
- "scoring_fn_id": scoring_fn_id,
- "params": params,
- "provider_id": provider_id,
- "provider_scoring_fn_id": provider_scoring_fn_id,
- },
- scoring_function_register_params.ScoringFunctionRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- @typing_extensions.deprecated("deprecated")
- def unregister(
- self,
- scoring_fn_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a scoring function.
-
- Args:
- scoring_fn_id: The ID of the scoring function to unregister.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not scoring_fn_id:
- raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._delete(
- path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class AsyncScoringFunctionsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncScoringFunctionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
- """
- return AsyncScoringFunctionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncScoringFunctionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
- """
- return AsyncScoringFunctionsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- scoring_fn_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringFn:
- """
- Get a scoring function by its ID.
-
- Args:
- scoring_fn_id: The ID of the scoring function to get.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not scoring_fn_id:
- raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
- return await self._get(
- path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ScoringFn,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ScoringFunctionListResponse:
- """List all scoring functions."""
- return await self._get(
- "/v1/scoring-functions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ScoringFunctionListResponse]._unwrapper,
- ),
- cast_to=cast(Type[ScoringFunctionListResponse], DataWrapper[ScoringFunctionListResponse]),
- )
-
- @typing_extensions.deprecated("deprecated")
- async def register(
- self,
- *,
- description: str,
- return_type: scoring_function_register_params.ReturnType,
- scoring_fn_id: str,
- params: Optional[scoring_function_register_params.Params] | Omit = omit,
- provider_id: Optional[str] | Omit = omit,
- provider_scoring_fn_id: Optional[str] | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Register a scoring function.
-
- Args:
- description: The description of the scoring function.
-
- scoring_fn_id: The ID of the scoring function to register.
-
- params: The parameters for the scoring function for benchmark eval, these can be
- overridden for app eval.
-
- provider_id: The ID of the provider to use for the scoring function.
-
- provider_scoring_fn_id: The ID of the provider scoring function to use for the scoring function.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- "/v1/scoring-functions",
- body=await async_maybe_transform(
- {
- "description": description,
- "return_type": return_type,
- "scoring_fn_id": scoring_fn_id,
- "params": params,
- "provider_id": provider_id,
- "provider_scoring_fn_id": provider_scoring_fn_id,
- },
- scoring_function_register_params.ScoringFunctionRegisterParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
- @typing_extensions.deprecated("deprecated")
- async def unregister(
- self,
- scoring_fn_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Unregister a scoring function.
-
- Args:
- scoring_fn_id: The ID of the scoring function to unregister.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not scoring_fn_id:
- raise ValueError(f"Expected a non-empty value for `scoring_fn_id` but received {scoring_fn_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._delete(
- path_template("/v1/scoring-functions/{scoring_fn_id}", scoring_fn_id=scoring_fn_id),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
-
-
-class ScoringFunctionsResourceWithRawResponse:
- def __init__(self, scoring_functions: ScoringFunctionsResource) -> None:
- self._scoring_functions = scoring_functions
-
- self.retrieve = to_raw_response_wrapper(
- scoring_functions.retrieve,
- )
- self.list = to_raw_response_wrapper(
- scoring_functions.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- scoring_functions.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_raw_response_wrapper(
- scoring_functions.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncScoringFunctionsResourceWithRawResponse:
- def __init__(self, scoring_functions: AsyncScoringFunctionsResource) -> None:
- self._scoring_functions = scoring_functions
-
- self.retrieve = async_to_raw_response_wrapper(
- scoring_functions.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- scoring_functions.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- scoring_functions.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_raw_response_wrapper(
- scoring_functions.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class ScoringFunctionsResourceWithStreamingResponse:
- def __init__(self, scoring_functions: ScoringFunctionsResource) -> None:
- self._scoring_functions = scoring_functions
-
- self.retrieve = to_streamed_response_wrapper(
- scoring_functions.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- scoring_functions.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- scoring_functions.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- to_streamed_response_wrapper(
- scoring_functions.unregister, # pyright: ignore[reportDeprecated],
- )
- )
-
-
-class AsyncScoringFunctionsResourceWithStreamingResponse:
- def __init__(self, scoring_functions: AsyncScoringFunctionsResource) -> None:
- self._scoring_functions = scoring_functions
-
- self.retrieve = async_to_streamed_response_wrapper(
- scoring_functions.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- scoring_functions.list,
- )
- self.register = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- scoring_functions.register, # pyright: ignore[reportDeprecated],
- )
- )
- self.unregister = ( # pyright: ignore[reportDeprecated]
- async_to_streamed_response_wrapper(
- scoring_functions.unregister, # pyright: ignore[reportDeprecated],
- )
- )
diff --git a/src/llama_stack_client/types/alpha/__init__.py b/src/llama_stack_client/types/alpha/__init__.py
deleted file mode 100644
index a1d4e7ec..00000000
--- a/src/llama_stack_client/types/alpha/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .job import Job as Job
-from .benchmark import Benchmark as Benchmark
-from .evaluate_response import EvaluateResponse as EvaluateResponse
-from .eval_run_eval_params import EvalRunEvalParams as EvalRunEvalParams
-from .benchmark_config_param import BenchmarkConfigParam as BenchmarkConfigParam
-from .benchmark_list_response import BenchmarkListResponse as BenchmarkListResponse
-from .inference_rerank_params import InferenceRerankParams as InferenceRerankParams
-from .admin_list_routes_params import AdminListRoutesParams as AdminListRoutesParams
-from .list_benchmarks_response import ListBenchmarksResponse as ListBenchmarksResponse
-from .benchmark_register_params import BenchmarkRegisterParams as BenchmarkRegisterParams
-from .eval_evaluate_rows_params import EvalEvaluateRowsParams as EvalEvaluateRowsParams
-from .inference_rerank_response import InferenceRerankResponse as InferenceRerankResponse
-from .eval_run_eval_alpha_params import EvalRunEvalAlphaParams as EvalRunEvalAlphaParams
-from .eval_evaluate_rows_alpha_params import EvalEvaluateRowsAlphaParams as EvalEvaluateRowsAlphaParams
diff --git a/src/llama_stack_client/types/alpha/benchmark.py b/src/llama_stack_client/types/alpha/benchmark.py
deleted file mode 100644
index ff555036..00000000
--- a/src/llama_stack_client/types/alpha/benchmark.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["Benchmark"]
-
-
-class Benchmark(BaseModel):
- """A benchmark resource for evaluating model performance."""
-
- dataset_id: str
- """Identifier of the dataset to use for the benchmark evaluation."""
-
- identifier: str
- """Unique identifier for this resource in llama stack"""
-
- provider_id: str
- """ID of the provider that owns this resource"""
-
- scoring_functions: List[str]
- """List of scoring function identifiers to apply during evaluation."""
-
- metadata: Optional[Dict[str, object]] = None
- """Metadata for this evaluation task."""
-
- provider_resource_id: Optional[str] = None
- """Unique identifier for this resource in the provider"""
-
- type: Optional[Literal["benchmark"]] = None
- """The resource type, always benchmark."""
diff --git a/src/llama_stack_client/types/alpha/benchmark_config_param.py b/src/llama_stack_client/types/alpha/benchmark_config_param.py
deleted file mode 100644
index 58259b9c..00000000
--- a/src/llama_stack_client/types/alpha/benchmark_config_param.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..._types import SequenceNotStr
-from ..shared_params.system_message import SystemMessage
-from ..shared_params.sampling_params import SamplingParams
-
-__all__ = [
- "BenchmarkConfigParam",
- "EvalCandidate",
- "ScoringParams",
- "ScoringParamsLlmAsJudgeScoringFnParams",
- "ScoringParamsRegexParserScoringFnParams",
- "ScoringParamsBasicScoringFnParams",
-]
-
-
-class EvalCandidate(TypedDict, total=False):
- """The candidate to evaluate"""
-
- model: Required[str]
- """The model ID to evaluate"""
-
- sampling_params: Required[SamplingParams]
- """The sampling parameters for the model"""
-
- system_message: Optional[SystemMessage]
- """A system message providing instructions or context to the model."""
-
- type: Literal["model"]
-
-
-class ScoringParamsLlmAsJudgeScoringFnParams(TypedDict, total=False):
- """Parameters for LLM-as-judge scoring function configuration."""
-
- judge_model: Required[str]
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- judge_score_regexes: SequenceNotStr[str]
- """Regexes to extract the answer from generated response"""
-
- prompt_template: Optional[str]
-
- type: Literal["llm_as_judge"]
-
-
-class ScoringParamsRegexParserScoringFnParams(TypedDict, total=False):
- """Parameters for regex parser scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- parsing_regexes: SequenceNotStr[str]
- """Regex to extract the answer from generated response"""
-
- type: Literal["regex_parser"]
-
-
-class ScoringParamsBasicScoringFnParams(TypedDict, total=False):
- """Parameters for basic scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- type: Literal["basic"]
-
-
-ScoringParams: TypeAlias = Union[
- ScoringParamsLlmAsJudgeScoringFnParams, ScoringParamsRegexParserScoringFnParams, ScoringParamsBasicScoringFnParams
-]
-
-
-class BenchmarkConfigParam(TypedDict, total=False):
- """A benchmark configuration for evaluation."""
-
- eval_candidate: Required[EvalCandidate]
- """The candidate to evaluate"""
-
- num_examples: Optional[int]
- """
- Number of examples to evaluate (useful for testing), if not provided, all
- examples in the dataset will be evaluated
- """
-
- scoring_params: Dict[str, ScoringParams]
- """
- Map between scoring function id and parameters for each scoring function you
- want to run
- """
diff --git a/src/llama_stack_client/types/alpha/benchmark_list_response.py b/src/llama_stack_client/types/alpha/benchmark_list_response.py
deleted file mode 100644
index 56d7d8ba..00000000
--- a/src/llama_stack_client/types/alpha/benchmark_list_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .benchmark import Benchmark
-
-__all__ = ["BenchmarkListResponse"]
-
-BenchmarkListResponse: TypeAlias = List[Benchmark]
diff --git a/src/llama_stack_client/types/alpha/benchmark_register_params.py b/src/llama_stack_client/types/alpha/benchmark_register_params.py
deleted file mode 100644
index b3e09526..00000000
--- a/src/llama_stack_client/types/alpha/benchmark_register_params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Optional
-from typing_extensions import Required, TypedDict
-
-from ..._types import SequenceNotStr
-
-__all__ = ["BenchmarkRegisterParams"]
-
-
-class BenchmarkRegisterParams(TypedDict, total=False):
- benchmark_id: Required[str]
- """The ID of the benchmark to register."""
-
- dataset_id: Required[str]
- """The ID of the dataset to use for the benchmark."""
-
- scoring_functions: Required[SequenceNotStr[str]]
- """The scoring functions to use for the benchmark."""
-
- metadata: Optional[Dict[str, object]]
- """The metadata to use for the benchmark."""
-
- provider_benchmark_id: Optional[str]
- """The ID of the provider benchmark to use for the benchmark."""
-
- provider_id: Optional[str]
- """The ID of the provider to use for the benchmark."""
diff --git a/src/llama_stack_client/types/alpha/eval_evaluate_rows_alpha_params.py b/src/llama_stack_client/types/alpha/eval_evaluate_rows_alpha_params.py
deleted file mode 100644
index 7013c4c4..00000000
--- a/src/llama_stack_client/types/alpha/eval_evaluate_rows_alpha_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable
-from typing_extensions import Required, TypedDict
-
-from ..._types import SequenceNotStr
-from .benchmark_config_param import BenchmarkConfigParam
-
-__all__ = ["EvalEvaluateRowsAlphaParams"]
-
-
-class EvalEvaluateRowsAlphaParams(TypedDict, total=False):
- benchmark_config: Required[BenchmarkConfigParam]
- """The configuration for the benchmark"""
-
- input_rows: Required[Iterable[Dict[str, object]]]
- """The rows to evaluate"""
-
- scoring_functions: Required[SequenceNotStr[str]]
- """The scoring functions to use for the evaluation"""
diff --git a/src/llama_stack_client/types/alpha/eval_evaluate_rows_params.py b/src/llama_stack_client/types/alpha/eval_evaluate_rows_params.py
deleted file mode 100644
index ca567a83..00000000
--- a/src/llama_stack_client/types/alpha/eval_evaluate_rows_params.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable
-from typing_extensions import Required, TypedDict
-
-from ..._types import SequenceNotStr
-from .benchmark_config_param import BenchmarkConfigParam
-
-__all__ = ["EvalEvaluateRowsParams"]
-
-
-class EvalEvaluateRowsParams(TypedDict, total=False):
- benchmark_config: Required[BenchmarkConfigParam]
- """The configuration for the benchmark"""
-
- input_rows: Required[Iterable[Dict[str, object]]]
- """The rows to evaluate"""
-
- scoring_functions: Required[SequenceNotStr[str]]
- """The scoring functions to use for the evaluation"""
diff --git a/src/llama_stack_client/types/alpha/eval_run_eval_alpha_params.py b/src/llama_stack_client/types/alpha/eval_run_eval_alpha_params.py
deleted file mode 100644
index 75df39f2..00000000
--- a/src/llama_stack_client/types/alpha/eval_run_eval_alpha_params.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-from .benchmark_config_param import BenchmarkConfigParam
-
-__all__ = ["EvalRunEvalAlphaParams"]
-
-
-class EvalRunEvalAlphaParams(TypedDict, total=False):
- benchmark_config: Required[BenchmarkConfigParam]
- """The configuration for the benchmark"""
diff --git a/src/llama_stack_client/types/alpha/eval_run_eval_params.py b/src/llama_stack_client/types/alpha/eval_run_eval_params.py
deleted file mode 100644
index 25f9eaa0..00000000
--- a/src/llama_stack_client/types/alpha/eval_run_eval_params.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Required, TypedDict
-
-from .benchmark_config_param import BenchmarkConfigParam
-
-__all__ = ["EvalRunEvalParams"]
-
-
-class EvalRunEvalParams(TypedDict, total=False):
- benchmark_config: Required[BenchmarkConfigParam]
- """The configuration for the benchmark"""
diff --git a/src/llama_stack_client/types/alpha/evaluate_response.py b/src/llama_stack_client/types/alpha/evaluate_response.py
deleted file mode 100644
index 078ec020..00000000
--- a/src/llama_stack_client/types/alpha/evaluate_response.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List
-
-from ..._models import BaseModel
-from ..shared.scoring_result import ScoringResult
-
-__all__ = ["EvaluateResponse"]
-
-
-class EvaluateResponse(BaseModel):
- """The response from an evaluation."""
-
- generations: List[Dict[str, object]]
- """The generations from the evaluation"""
-
- scores: Dict[str, ScoringResult]
- """The scores from the evaluation. Each key in the dict is a scoring function name"""
diff --git a/src/llama_stack_client/types/alpha/job.py b/src/llama_stack_client/types/alpha/job.py
deleted file mode 100644
index 6234f1f0..00000000
--- a/src/llama_stack_client/types/alpha/job.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["Job"]
-
-
-class Job(BaseModel):
- """A job execution instance with status tracking."""
-
- job_id: str
-
- status: Literal["completed", "in_progress", "failed", "scheduled", "cancelled"]
- """Status of a job execution."""
diff --git a/src/llama_stack_client/types/alpha/list_benchmarks_response.py b/src/llama_stack_client/types/alpha/list_benchmarks_response.py
deleted file mode 100644
index 671b1b1d..00000000
--- a/src/llama_stack_client/types/alpha/list_benchmarks_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from ..._models import BaseModel
-from .benchmark_list_response import BenchmarkListResponse
-
-__all__ = ["ListBenchmarksResponse"]
-
-
-class ListBenchmarksResponse(BaseModel):
- """Response containing a list of benchmark objects."""
-
- data: BenchmarkListResponse
- """List of benchmark objects."""
diff --git a/src/llama_stack_client/types/beta/__init__.py b/src/llama_stack_client/types/beta/__init__.py
deleted file mode 100644
index 3f9f5033..00000000
--- a/src/llama_stack_client/types/beta/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .dataset_list_response import DatasetListResponse as DatasetListResponse
-from .list_datasets_response import ListDatasetsResponse as ListDatasetsResponse
-from .dataset_iterrows_params import DatasetIterrowsParams as DatasetIterrowsParams
-from .dataset_register_params import DatasetRegisterParams as DatasetRegisterParams
-from .dataset_appendrows_params import DatasetAppendrowsParams as DatasetAppendrowsParams
-from .dataset_iterrows_response import DatasetIterrowsResponse as DatasetIterrowsResponse
-from .dataset_register_response import DatasetRegisterResponse as DatasetRegisterResponse
-from .dataset_retrieve_response import DatasetRetrieveResponse as DatasetRetrieveResponse
diff --git a/src/llama_stack_client/types/beta/dataset_appendrows_params.py b/src/llama_stack_client/types/beta/dataset_appendrows_params.py
deleted file mode 100644
index bc7f4aca..00000000
--- a/src/llama_stack_client/types/beta/dataset_appendrows_params.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Iterable
-from typing_extensions import Required, TypedDict
-
-__all__ = ["DatasetAppendrowsParams"]
-
-
-class DatasetAppendrowsParams(TypedDict, total=False):
- rows: Required[Iterable[Dict[str, object]]]
- """The rows to append to the dataset."""
diff --git a/src/llama_stack_client/types/beta/dataset_iterrows_params.py b/src/llama_stack_client/types/beta/dataset_iterrows_params.py
deleted file mode 100644
index 33c8f4f1..00000000
--- a/src/llama_stack_client/types/beta/dataset_iterrows_params.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import TypedDict
-
-__all__ = ["DatasetIterrowsParams"]
-
-
-class DatasetIterrowsParams(TypedDict, total=False):
- limit: Optional[int]
- """The number of rows to get."""
-
- start_index: Optional[int]
- """Index into dataset for the first row to get. Get all rows if None."""
diff --git a/src/llama_stack_client/types/beta/dataset_iterrows_response.py b/src/llama_stack_client/types/beta/dataset_iterrows_response.py
deleted file mode 100644
index ea7e1d6d..00000000
--- a/src/llama_stack_client/types/beta/dataset_iterrows_response.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Optional
-
-from ..._models import BaseModel
-
-__all__ = ["DatasetIterrowsResponse"]
-
-
-class DatasetIterrowsResponse(BaseModel):
- """A generic paginated response that follows a simple format."""
-
- data: List[Dict[str, object]]
-
- has_more: bool
-
- url: Optional[str] = None
diff --git a/src/llama_stack_client/types/beta/dataset_list_response.py b/src/llama_stack_client/types/beta/dataset_list_response.py
deleted file mode 100644
index e28253d8..00000000
--- a/src/llama_stack_client/types/beta/dataset_list_response.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from ..._utils import PropertyInfo
-from ..._models import BaseModel
-
-__all__ = [
- "DatasetListResponse",
- "DatasetListResponseItem",
- "DatasetListResponseItemSource",
- "DatasetListResponseItemSourceUriDataSource",
- "DatasetListResponseItemSourceRowsDataSource",
-]
-
-
-class DatasetListResponseItemSourceUriDataSource(BaseModel):
- """A dataset that can be obtained from a URI."""
-
- uri: str
- """The dataset can be obtained from a URI.
-
- E.g. "https://mywebsite.com/mydata.jsonl", "lsfs://mydata.jsonl",
- "data:csv;base64,{base64_content}"
- """
-
- type: Optional[Literal["uri"]] = None
- """The type of data source."""
-
-
-class DatasetListResponseItemSourceRowsDataSource(BaseModel):
- """A dataset stored in rows."""
-
- rows: List[Dict[str, object]]
- """The dataset is stored in rows.
-
- E.g. [{"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]}]
- """
-
- type: Optional[Literal["rows"]] = None
- """The type of data source."""
-
-
-DatasetListResponseItemSource: TypeAlias = Annotated[
- Union[DatasetListResponseItemSourceUriDataSource, DatasetListResponseItemSourceRowsDataSource],
- PropertyInfo(discriminator="type"),
-]
-
-
-class DatasetListResponseItem(BaseModel):
- """Dataset resource for storing and accessing training or evaluation data."""
-
- identifier: str
- """Unique identifier for this resource in llama stack"""
-
- provider_id: str
- """ID of the provider that owns this resource"""
-
- purpose: Literal["eval/question-answer", "eval/messages-answer"]
- """Purpose of the dataset indicating its intended use"""
-
- source: DatasetListResponseItemSource
- """Data source configuration for the dataset"""
-
- metadata: Optional[Dict[str, object]] = None
- """Any additional metadata for this dataset"""
-
- provider_resource_id: Optional[str] = None
- """Unique identifier for this resource in the provider"""
-
- type: Optional[Literal["dataset"]] = None
- """Type of resource, always 'dataset' for datasets"""
-
-
-DatasetListResponse: TypeAlias = List[DatasetListResponseItem]
diff --git a/src/llama_stack_client/types/beta/dataset_register_params.py b/src/llama_stack_client/types/beta/dataset_register_params.py
deleted file mode 100644
index b743b212..00000000
--- a/src/llama_stack_client/types/beta/dataset_register_params.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = ["DatasetRegisterParams", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
-
-
-class DatasetRegisterParams(TypedDict, total=False):
- purpose: Required[Literal["eval/question-answer", "eval/messages-answer"]]
- """The purpose of the dataset."""
-
- source: Required[Source]
- """The data source of the dataset."""
-
- dataset_id: Optional[str]
- """The ID of the dataset. If not provided, an ID will be generated."""
-
- metadata: Optional[Dict[str, object]]
- """The metadata for the dataset."""
-
-
-class SourceUriDataSource(TypedDict, total=False):
- """A dataset that can be obtained from a URI."""
-
- uri: Required[str]
- """The dataset can be obtained from a URI.
-
- E.g. "https://mywebsite.com/mydata.jsonl", "lsfs://mydata.jsonl",
- "data:csv;base64,{base64_content}"
- """
-
- type: Literal["uri"]
- """The type of data source."""
-
-
-class SourceRowsDataSource(TypedDict, total=False):
- """A dataset stored in rows."""
-
- rows: Required[Iterable[Dict[str, object]]]
- """The dataset is stored in rows.
-
- E.g. [{"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]}]
- """
-
- type: Literal["rows"]
- """The type of data source."""
-
-
-Source: TypeAlias = Union[SourceUriDataSource, SourceRowsDataSource]
diff --git a/src/llama_stack_client/types/beta/dataset_register_response.py b/src/llama_stack_client/types/beta/dataset_register_response.py
deleted file mode 100644
index 39596092..00000000
--- a/src/llama_stack_client/types/beta/dataset_register_response.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from ..._utils import PropertyInfo
-from ..._models import BaseModel
-
-__all__ = ["DatasetRegisterResponse", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
-
-
-class SourceUriDataSource(BaseModel):
- """A dataset that can be obtained from a URI."""
-
- uri: str
- """The dataset can be obtained from a URI.
-
- E.g. "https://mywebsite.com/mydata.jsonl", "lsfs://mydata.jsonl",
- "data:csv;base64,{base64_content}"
- """
-
- type: Optional[Literal["uri"]] = None
- """The type of data source."""
-
-
-class SourceRowsDataSource(BaseModel):
- """A dataset stored in rows."""
-
- rows: List[Dict[str, object]]
- """The dataset is stored in rows.
-
- E.g. [{"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]}]
- """
-
- type: Optional[Literal["rows"]] = None
- """The type of data source."""
-
-
-Source: TypeAlias = Annotated[Union[SourceUriDataSource, SourceRowsDataSource], PropertyInfo(discriminator="type")]
-
-
-class DatasetRegisterResponse(BaseModel):
- """Dataset resource for storing and accessing training or evaluation data."""
-
- identifier: str
- """Unique identifier for this resource in llama stack"""
-
- provider_id: str
- """ID of the provider that owns this resource"""
-
- purpose: Literal["eval/question-answer", "eval/messages-answer"]
- """Purpose of the dataset indicating its intended use"""
-
- source: Source
- """Data source configuration for the dataset"""
-
- metadata: Optional[Dict[str, object]] = None
- """Any additional metadata for this dataset"""
-
- provider_resource_id: Optional[str] = None
- """Unique identifier for this resource in the provider"""
-
- type: Optional[Literal["dataset"]] = None
- """Type of resource, always 'dataset' for datasets"""
diff --git a/src/llama_stack_client/types/beta/dataset_retrieve_response.py b/src/llama_stack_client/types/beta/dataset_retrieve_response.py
deleted file mode 100644
index 053c27d8..00000000
--- a/src/llama_stack_client/types/beta/dataset_retrieve_response.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from ..._utils import PropertyInfo
-from ..._models import BaseModel
-
-__all__ = ["DatasetRetrieveResponse", "Source", "SourceUriDataSource", "SourceRowsDataSource"]
-
-
-class SourceUriDataSource(BaseModel):
- """A dataset that can be obtained from a URI."""
-
- uri: str
- """The dataset can be obtained from a URI.
-
- E.g. "https://mywebsite.com/mydata.jsonl", "lsfs://mydata.jsonl",
- "data:csv;base64,{base64_content}"
- """
-
- type: Optional[Literal["uri"]] = None
- """The type of data source."""
-
-
-class SourceRowsDataSource(BaseModel):
- """A dataset stored in rows."""
-
- rows: List[Dict[str, object]]
- """The dataset is stored in rows.
-
- E.g. [{"messages": [{"role": "user", "content": "Hello, world!"}, {"role":
- "assistant", "content": "Hello, world!"}]}]
- """
-
- type: Optional[Literal["rows"]] = None
- """The type of data source."""
-
-
-Source: TypeAlias = Annotated[Union[SourceUriDataSource, SourceRowsDataSource], PropertyInfo(discriminator="type")]
-
-
-class DatasetRetrieveResponse(BaseModel):
- """Dataset resource for storing and accessing training or evaluation data."""
-
- identifier: str
- """Unique identifier for this resource in llama stack"""
-
- provider_id: str
- """ID of the provider that owns this resource"""
-
- purpose: Literal["eval/question-answer", "eval/messages-answer"]
- """Purpose of the dataset indicating its intended use"""
-
- source: Source
- """Data source configuration for the dataset"""
-
- metadata: Optional[Dict[str, object]] = None
- """Any additional metadata for this dataset"""
-
- provider_resource_id: Optional[str] = None
- """Unique identifier for this resource in the provider"""
-
- type: Optional[Literal["dataset"]] = None
- """Type of resource, always 'dataset' for datasets"""
diff --git a/src/llama_stack_client/types/beta/list_datasets_response.py b/src/llama_stack_client/types/beta/list_datasets_response.py
deleted file mode 100644
index 54d79396..00000000
--- a/src/llama_stack_client/types/beta/list_datasets_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from ..._models import BaseModel
-from .dataset_list_response import DatasetListResponse
-
-__all__ = ["ListDatasetsResponse"]
-
-
-class ListDatasetsResponse(BaseModel):
- """Response from listing datasets."""
-
- data: DatasetListResponse
- """List of datasets"""
diff --git a/src/llama_stack_client/types/list_scoring_functions_response.py b/src/llama_stack_client/types/list_scoring_functions_response.py
deleted file mode 100644
index b6c8ade3..00000000
--- a/src/llama_stack_client/types/list_scoring_functions_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .._models import BaseModel
-from .scoring_function_list_response import ScoringFunctionListResponse
-
-__all__ = ["ListScoringFunctionsResponse"]
-
-
-class ListScoringFunctionsResponse(BaseModel):
- """Response containing a list of scoring function objects."""
-
- data: ScoringFunctionListResponse
- """List of scoring function objects."""
diff --git a/src/llama_stack_client/types/model_list_response.py b/src/llama_stack_client/types/model_list_response.py
deleted file mode 100644
index b53ae421..00000000
--- a/src/llama_stack_client/types/model_list_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .model import Model
-
-__all__ = ["ModelListResponse"]
-
-ModelListResponse: TypeAlias = List[Model]
diff --git a/src/llama_stack_client/types/scoring_fn.py b/src/llama_stack_client/types/scoring_fn.py
deleted file mode 100644
index 19998174..00000000
--- a/src/llama_stack_client/types/scoring_fn.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Annotated, TypeAlias
-
-from .._utils import PropertyInfo
-from .._models import BaseModel
-
-__all__ = [
- "ScoringFn",
- "ReturnType",
- "Params",
- "ParamsLlmAsJudgeScoringFnParams",
- "ParamsRegexParserScoringFnParams",
- "ParamsBasicScoringFnParams",
-]
-
-
-class ReturnType(BaseModel):
- type: Literal[
- "string",
- "number",
- "boolean",
- "array",
- "object",
- "json",
- "union",
- "chat_completion_input",
- "completion_input",
- "agent_turn_input",
- ]
-
-
-class ParamsLlmAsJudgeScoringFnParams(BaseModel):
- """Parameters for LLM-as-judge scoring function configuration."""
-
- judge_model: str
-
- aggregation_functions: Optional[
- List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- ] = None
- """Aggregation functions to apply to the scores of each row"""
-
- judge_score_regexes: Optional[List[str]] = None
- """Regexes to extract the answer from generated response"""
-
- prompt_template: Optional[str] = None
-
- type: Optional[Literal["llm_as_judge"]] = None
-
-
-class ParamsRegexParserScoringFnParams(BaseModel):
- """Parameters for regex parser scoring function configuration."""
-
- aggregation_functions: Optional[
- List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- ] = None
- """Aggregation functions to apply to the scores of each row"""
-
- parsing_regexes: Optional[List[str]] = None
- """Regex to extract the answer from generated response"""
-
- type: Optional[Literal["regex_parser"]] = None
-
-
-class ParamsBasicScoringFnParams(BaseModel):
- """Parameters for basic scoring function configuration."""
-
- aggregation_functions: Optional[
- List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- ] = None
- """Aggregation functions to apply to the scores of each row"""
-
- type: Optional[Literal["basic"]] = None
-
-
-Params: TypeAlias = Annotated[
- Union[ParamsLlmAsJudgeScoringFnParams, ParamsRegexParserScoringFnParams, ParamsBasicScoringFnParams, None],
- PropertyInfo(discriminator="type"),
-]
-
-
-class ScoringFn(BaseModel):
- """A scoring function resource for evaluating model outputs."""
-
- identifier: str
- """Unique identifier for this resource in llama stack"""
-
- provider_id: str
- """ID of the provider that owns this resource"""
-
- return_type: ReturnType
-
- description: Optional[str] = None
-
- metadata: Optional[Dict[str, object]] = None
- """Any additional metadata for this definition"""
-
- params: Optional[Params] = None
- """
- The parameters for the scoring function for benchmark eval, these can be
- overridden for app eval
- """
-
- provider_resource_id: Optional[str] = None
- """Unique identifier for this resource in the provider"""
-
- type: Optional[Literal["scoring_function"]] = None
diff --git a/src/llama_stack_client/types/scoring_function_list_response.py b/src/llama_stack_client/types/scoring_function_list_response.py
deleted file mode 100644
index 9372fa59..00000000
--- a/src/llama_stack_client/types/scoring_function_list_response.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import TypeAlias
-
-from .scoring_fn import ScoringFn
-
-__all__ = ["ScoringFunctionListResponse"]
-
-ScoringFunctionListResponse: TypeAlias = List[ScoringFn]
diff --git a/src/llama_stack_client/types/scoring_function_register_params.py b/src/llama_stack_client/types/scoring_function_register_params.py
deleted file mode 100644
index b77da747..00000000
--- a/src/llama_stack_client/types/scoring_function_register_params.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .._types import SequenceNotStr
-
-__all__ = [
- "ScoringFunctionRegisterParams",
- "ReturnType",
- "Params",
- "ParamsLlmAsJudgeScoringFnParams",
- "ParamsRegexParserScoringFnParams",
- "ParamsBasicScoringFnParams",
-]
-
-
-class ScoringFunctionRegisterParams(TypedDict, total=False):
- description: Required[str]
- """The description of the scoring function."""
-
- return_type: Required[ReturnType]
-
- scoring_fn_id: Required[str]
- """The ID of the scoring function to register."""
-
- params: Optional[Params]
- """
- The parameters for the scoring function for benchmark eval, these can be
- overridden for app eval.
- """
-
- provider_id: Optional[str]
- """The ID of the provider to use for the scoring function."""
-
- provider_scoring_fn_id: Optional[str]
- """The ID of the provider scoring function to use for the scoring function."""
-
-
-class ReturnType(TypedDict, total=False):
- type: Required[
- Literal[
- "string",
- "number",
- "boolean",
- "array",
- "object",
- "json",
- "union",
- "chat_completion_input",
- "completion_input",
- "agent_turn_input",
- ]
- ]
-
-
-class ParamsLlmAsJudgeScoringFnParams(TypedDict, total=False):
- """Parameters for LLM-as-judge scoring function configuration."""
-
- judge_model: Required[str]
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- judge_score_regexes: SequenceNotStr[str]
- """Regexes to extract the answer from generated response"""
-
- prompt_template: Optional[str]
-
- type: Literal["llm_as_judge"]
-
-
-class ParamsRegexParserScoringFnParams(TypedDict, total=False):
- """Parameters for regex parser scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- parsing_regexes: SequenceNotStr[str]
- """Regex to extract the answer from generated response"""
-
- type: Literal["regex_parser"]
-
-
-class ParamsBasicScoringFnParams(TypedDict, total=False):
- """Parameters for basic scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- type: Literal["basic"]
-
-
-Params: TypeAlias = Union[ParamsLlmAsJudgeScoringFnParams, ParamsRegexParserScoringFnParams, ParamsBasicScoringFnParams]
diff --git a/src/llama_stack_client/types/scoring_score_batch_params.py b/src/llama_stack_client/types/scoring_score_batch_params.py
deleted file mode 100644
index 4e174c70..00000000
--- a/src/llama_stack_client/types/scoring_score_batch_params.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .._types import SequenceNotStr
-
-__all__ = [
- "ScoringScoreBatchParams",
- "ScoringFunctions",
- "ScoringFunctionsLlmAsJudgeScoringFnParams",
- "ScoringFunctionsRegexParserScoringFnParams",
- "ScoringFunctionsBasicScoringFnParams",
-]
-
-
-class ScoringScoreBatchParams(TypedDict, total=False):
- dataset_id: Required[str]
- """The ID of the dataset to score."""
-
- scoring_functions: Required[Dict[str, Optional[ScoringFunctions]]]
- """The scoring functions to use for the scoring."""
-
- save_results_dataset: bool
- """Whether to save the results to a dataset."""
-
-
-class ScoringFunctionsLlmAsJudgeScoringFnParams(TypedDict, total=False):
- """Parameters for LLM-as-judge scoring function configuration."""
-
- judge_model: Required[str]
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- judge_score_regexes: SequenceNotStr[str]
- """Regexes to extract the answer from generated response"""
-
- prompt_template: Optional[str]
-
- type: Literal["llm_as_judge"]
-
-
-class ScoringFunctionsRegexParserScoringFnParams(TypedDict, total=False):
- """Parameters for regex parser scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- parsing_regexes: SequenceNotStr[str]
- """Regex to extract the answer from generated response"""
-
- type: Literal["regex_parser"]
-
-
-class ScoringFunctionsBasicScoringFnParams(TypedDict, total=False):
- """Parameters for basic scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- type: Literal["basic"]
-
-
-ScoringFunctions: TypeAlias = Union[
- ScoringFunctionsLlmAsJudgeScoringFnParams,
- ScoringFunctionsRegexParserScoringFnParams,
- ScoringFunctionsBasicScoringFnParams,
-]
diff --git a/src/llama_stack_client/types/scoring_score_batch_response.py b/src/llama_stack_client/types/scoring_score_batch_response.py
deleted file mode 100644
index 2c06dbfa..00000000
--- a/src/llama_stack_client/types/scoring_score_batch_response.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, Optional
-
-from .._models import BaseModel
-from .shared.scoring_result import ScoringResult
-
-__all__ = ["ScoringScoreBatchResponse"]
-
-
-class ScoringScoreBatchResponse(BaseModel):
- """Response from batch scoring operations on datasets."""
-
- results: Dict[str, ScoringResult]
- """A map of scoring function name to ScoringResult"""
-
- dataset_id: Optional[str] = None
- """(Optional) The identifier of the dataset that was scored"""
diff --git a/src/llama_stack_client/types/scoring_score_params.py b/src/llama_stack_client/types/scoring_score_params.py
deleted file mode 100644
index 2d4b2167..00000000
--- a/src/llama_stack_client/types/scoring_score_params.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from .._types import SequenceNotStr
-
-__all__ = [
- "ScoringScoreParams",
- "ScoringFunctions",
- "ScoringFunctionsLlmAsJudgeScoringFnParams",
- "ScoringFunctionsRegexParserScoringFnParams",
- "ScoringFunctionsBasicScoringFnParams",
-]
-
-
-class ScoringScoreParams(TypedDict, total=False):
- input_rows: Required[Iterable[Dict[str, object]]]
- """The rows to score."""
-
- scoring_functions: Required[Dict[str, Optional[ScoringFunctions]]]
- """The scoring functions to use for the scoring."""
-
-
-class ScoringFunctionsLlmAsJudgeScoringFnParams(TypedDict, total=False):
- """Parameters for LLM-as-judge scoring function configuration."""
-
- judge_model: Required[str]
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- judge_score_regexes: SequenceNotStr[str]
- """Regexes to extract the answer from generated response"""
-
- prompt_template: Optional[str]
-
- type: Literal["llm_as_judge"]
-
-
-class ScoringFunctionsRegexParserScoringFnParams(TypedDict, total=False):
- """Parameters for regex parser scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- parsing_regexes: SequenceNotStr[str]
- """Regex to extract the answer from generated response"""
-
- type: Literal["regex_parser"]
-
-
-class ScoringFunctionsBasicScoringFnParams(TypedDict, total=False):
- """Parameters for basic scoring function configuration."""
-
- aggregation_functions: List[Literal["average", "weighted_average", "median", "categorical_count", "accuracy"]]
- """Aggregation functions to apply to the scores of each row"""
-
- type: Literal["basic"]
-
-
-ScoringFunctions: TypeAlias = Union[
- ScoringFunctionsLlmAsJudgeScoringFnParams,
- ScoringFunctionsRegexParserScoringFnParams,
- ScoringFunctionsBasicScoringFnParams,
-]
diff --git a/src/llama_stack_client/types/scoring_score_response.py b/src/llama_stack_client/types/scoring_score_response.py
deleted file mode 100644
index 6140a70e..00000000
--- a/src/llama_stack_client/types/scoring_score_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict
-
-from .._models import BaseModel
-from .shared.scoring_result import ScoringResult
-
-__all__ = ["ScoringScoreResponse"]
-
-
-class ScoringScoreResponse(BaseModel):
- """The response from scoring."""
-
- results: Dict[str, ScoringResult]
- """A map of scoring function name to ScoringResult."""
diff --git a/src/llama_stack_client/types/shared/scoring_result.py b/src/llama_stack_client/types/shared/scoring_result.py
deleted file mode 100644
index 2c4512e8..00000000
--- a/src/llama_stack_client/types/shared/scoring_result.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Dict, List
-
-from ..._models import BaseModel
-
-__all__ = ["ScoringResult"]
-
-
-class ScoringResult(BaseModel):
- """A scoring result for a single row."""
-
- aggregated_results: Dict[str, object]
- """Map of metric name to aggregated value"""
-
- score_rows: List[Dict[str, object]]
- """The scoring result for each row. Each row is a map of column name to value."""
diff --git a/src/llama_stack_client/types/shared_params/__init__.py b/src/llama_stack_client/types/shared_params/__init__.py
deleted file mode 100644
index 3cc535a5..00000000
--- a/src/llama_stack_client/types/shared_params/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .system_message import SystemMessage as SystemMessage
-from .sampling_params import SamplingParams as SamplingParams
diff --git a/src/llama_stack_client/types/shared_params/sampling_params.py b/src/llama_stack_client/types/shared_params/sampling_params.py
deleted file mode 100644
index f83ea8be..00000000
--- a/src/llama_stack_client/types/shared_params/sampling_params.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..._types import SequenceNotStr
-
-__all__ = [
- "SamplingParams",
- "Strategy",
- "StrategyGreedySamplingStrategy",
- "StrategyTopPSamplingStrategy",
- "StrategyTopKSamplingStrategy",
-]
-
-
-class StrategyGreedySamplingStrategy(TypedDict, total=False):
- """
- Greedy sampling strategy that selects the highest probability token at each step.
- """
-
- type: Literal["greedy"]
- """Must be 'greedy' to identify this sampling strategy."""
-
-
-class StrategyTopPSamplingStrategy(TypedDict, total=False):
- """
- Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p.
- """
-
- temperature: Required[float]
- """Controls randomness in sampling. Higher values increase randomness."""
-
- top_p: float
- """Cumulative probability threshold for nucleus sampling."""
-
- type: Literal["top_p"]
- """Must be 'top_p' to identify this sampling strategy."""
-
-
-class StrategyTopKSamplingStrategy(TypedDict, total=False):
- """Top-k sampling strategy that restricts sampling to the k most likely tokens."""
-
- top_k: Required[int]
- """Number of top tokens to consider for sampling. Must be at least 1."""
-
- type: Literal["top_k"]
- """Must be 'top_k' to identify this sampling strategy."""
-
-
-Strategy: TypeAlias = Union[StrategyGreedySamplingStrategy, StrategyTopPSamplingStrategy, StrategyTopKSamplingStrategy]
-
-
-class SamplingParams(TypedDict, total=False):
- """Sampling parameters for text generation."""
-
- max_tokens: Optional[int]
- """The maximum number of tokens that can be generated in the completion.
-
- The token count of your prompt plus max_tokens cannot exceed the model's context
- length.
- """
-
- repetition_penalty: Optional[float]
- """Number between -2.0 and 2.0.
-
- Positive values penalize new tokens based on whether they appear in the text so
- far.
- """
-
- stop: Optional[SequenceNotStr[str]]
- """Up to 4 sequences where the API will stop generating further tokens.
-
- The returned text will not contain the stop sequence.
- """
-
- strategy: Strategy
- """The sampling strategy to use."""
diff --git a/src/llama_stack_client/types/shared_params/system_message.py b/src/llama_stack_client/types/shared_params/system_message.py
deleted file mode 100644
index b092f9ba..00000000
--- a/src/llama_stack_client/types/shared_params/system_message.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-__all__ = [
- "SystemMessage",
- "Content",
- "ContentImageContentItemInput",
- "ContentImageContentItemInputImage",
- "ContentImageContentItemInputImageURL",
- "ContentTextContentItem",
- "ContentListImageContentItemInputTextContentItem",
- "ContentListImageContentItemInputTextContentItemImageContentItemInput",
- "ContentListImageContentItemInputTextContentItemImageContentItemInputImage",
- "ContentListImageContentItemInputTextContentItemImageContentItemInputImageURL",
- "ContentListImageContentItemInputTextContentItemTextContentItem",
-]
-
-
-class ContentImageContentItemInputImageURL(TypedDict, total=False):
- """A URL reference to external content."""
-
- uri: Required[str]
-
-
-class ContentImageContentItemInputImage(TypedDict, total=False):
- """A URL or a base64 encoded string"""
-
- data: Optional[str]
-
- url: Optional[ContentImageContentItemInputImageURL]
- """A URL reference to external content."""
-
-
-class ContentImageContentItemInput(TypedDict, total=False):
- """A image content item"""
-
- image: Required[ContentImageContentItemInputImage]
- """A URL or a base64 encoded string"""
-
- type: Literal["image"]
-
-
-class ContentTextContentItem(TypedDict, total=False):
- """A text content item"""
-
- text: Required[str]
-
- type: Literal["text"]
-
-
-class ContentListImageContentItemInputTextContentItemImageContentItemInputImageURL(TypedDict, total=False):
- """A URL reference to external content."""
-
- uri: Required[str]
-
-
-class ContentListImageContentItemInputTextContentItemImageContentItemInputImage(TypedDict, total=False):
- """A URL or a base64 encoded string"""
-
- data: Optional[str]
-
- url: Optional[ContentListImageContentItemInputTextContentItemImageContentItemInputImageURL]
- """A URL reference to external content."""
-
-
-class ContentListImageContentItemInputTextContentItemImageContentItemInput(TypedDict, total=False):
- """A image content item"""
-
- image: Required[ContentListImageContentItemInputTextContentItemImageContentItemInputImage]
- """A URL or a base64 encoded string"""
-
- type: Literal["image"]
-
-
-class ContentListImageContentItemInputTextContentItemTextContentItem(TypedDict, total=False):
- """A text content item"""
-
- text: Required[str]
-
- type: Literal["text"]
-
-
-ContentListImageContentItemInputTextContentItem: TypeAlias = Union[
- ContentListImageContentItemInputTextContentItemImageContentItemInput,
- ContentListImageContentItemInputTextContentItemTextContentItem,
-]
-
-Content: TypeAlias = Union[
- str, ContentImageContentItemInput, ContentTextContentItem, Iterable[ContentListImageContentItemInputTextContentItem]
-]
-
-
-class SystemMessage(TypedDict, total=False):
- """A system message providing instructions or context to the model."""
-
- content: Required[Content]
- """The content of the 'system prompt'.
-
- If multiple system messages are provided, they are concatenated. The underlying
- Llama Stack code may also add other system messages.
- """
-
- role: Literal["system"]
- """Must be 'system' to identify this as a system message."""
diff --git a/src/llama_stack_client/types/vector_stores/file_batch_create_params.py b/src/llama_stack_client/types/vector_stores/file_batch_create_params.py
deleted file mode 100644
index 920da89c..00000000
--- a/src/llama_stack_client/types/vector_stores/file_batch_create_params.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Dict, Union, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
-
-from ..._types import SequenceNotStr
-
-__all__ = [
- "FileBatchCreateParams",
- "ChunkingStrategy",
- "ChunkingStrategyVectorStoreChunkingStrategyAuto",
- "ChunkingStrategyVectorStoreChunkingStrategyStatic",
- "ChunkingStrategyVectorStoreChunkingStrategyStaticStatic",
- "ChunkingStrategyVectorStoreChunkingStrategyContextual",
- "ChunkingStrategyVectorStoreChunkingStrategyContextualContextual",
-]
-
-
-class FileBatchCreateParams(TypedDict, total=False):
- file_ids: Required[SequenceNotStr[str]]
-
- attributes: Optional[Dict[str, object]]
-
- chunking_strategy: Optional[ChunkingStrategy]
- """Automatic chunking strategy for vector store files."""
-
-
-class ChunkingStrategyVectorStoreChunkingStrategyAuto(TypedDict, total=False):
- """Automatic chunking strategy for vector store files."""
-
- type: Literal["auto"]
-
-
-class ChunkingStrategyVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
- """Configuration for static chunking strategy."""
-
- chunk_overlap_tokens: int
-
- max_chunk_size_tokens: int
-
-
-class ChunkingStrategyVectorStoreChunkingStrategyStatic(TypedDict, total=False):
- """Static chunking strategy with configurable parameters."""
-
- static: Required[ChunkingStrategyVectorStoreChunkingStrategyStaticStatic]
- """Configuration for static chunking strategy."""
-
- type: Literal["static"]
-
-
-class ChunkingStrategyVectorStoreChunkingStrategyContextualContextual(TypedDict, total=False):
- """Configuration for contextual chunking."""
-
- chunk_overlap_tokens: int
- """Tokens to overlap between adjacent chunks.
-
- Must be less than max_chunk_size_tokens.
- """
-
- context_prompt: str
- """Prompt template for contextual retrieval.
-
- Uses WHOLE_DOCUMENT and CHUNK_CONTENT placeholders wrapped in double curly
- braces.
- """
-
- max_chunk_size_tokens: int
- """Maximum tokens per chunk. Suggested ~700 to allow room for prepended context."""
-
- max_concurrency: Optional[int]
- """Maximum concurrent LLM calls. Falls back to config default if not provided."""
-
- model_id: Optional[str]
- """LLM model for generating context.
-
- Falls back to VectorStoresConfig.contextual_retrieval_params.model if not
- provided.
- """
-
- timeout_seconds: Optional[int]
- """Timeout per LLM call in seconds. Falls back to config default if not provided."""
-
-
-class ChunkingStrategyVectorStoreChunkingStrategyContextual(TypedDict, total=False):
- """
- Contextual chunking strategy that uses an LLM to situate chunks within the document.
- """
-
- contextual: Required[ChunkingStrategyVectorStoreChunkingStrategyContextualContextual]
- """Configuration for contextual chunking."""
-
- type: Literal["contextual"]
- """Strategy type identifier."""
-
-
-ChunkingStrategy: TypeAlias = Union[
- ChunkingStrategyVectorStoreChunkingStrategyAuto,
- ChunkingStrategyVectorStoreChunkingStrategyStatic,
- ChunkingStrategyVectorStoreChunkingStrategyContextual,
-]
diff --git a/src/llama_stack_client/__init__.py b/src/ogx_client/__init__.py
similarity index 90%
rename from src/llama_stack_client/__init__.py
rename to src/ogx_client/__init__.py
index 572266f9..5966b3c9 100644
--- a/src/llama_stack_client/__init__.py
+++ b/src/ogx_client/__init__.py
@@ -15,12 +15,12 @@
Client,
Stream,
Timeout,
+ OgxClient,
Transport,
AsyncClient,
AsyncStream,
+ AsyncOgxClient,
RequestOptions,
- LlamaStackClient,
- AsyncLlamaStackClient,
)
from ._models import BaseModel
from ._version import __title__, __version__
@@ -31,13 +31,13 @@
ConflictError,
NotFoundError,
APIStatusError,
+ OgxClientError,
RateLimitError,
APITimeoutError,
BadRequestError,
APIConnectionError,
AuthenticationError,
InternalServerError,
- LlamaStackClientError,
PermissionDeniedError,
UnprocessableEntityError,
APIResponseValidationError,
@@ -63,7 +63,7 @@
"not_given",
"Omit",
"omit",
- "LlamaStackClientError",
+ "OgxClientError",
"APIError",
"APIStatusError",
"APITimeoutError",
@@ -83,8 +83,8 @@
"AsyncClient",
"Stream",
"AsyncStream",
- "LlamaStackClient",
- "AsyncLlamaStackClient",
+ "OgxClient",
+ "AsyncOgxClient",
"file_from_path",
"BaseModel",
"DEFAULT_TIMEOUT",
@@ -103,12 +103,12 @@
# Update the __module__ attribute for exported symbols so that
# error messages point to this module instead of the module
# it was originally defined in, e.g.
-# llama_stack_client._exceptions.NotFoundError -> llama_stack_client.NotFoundError
+# ogx_client._exceptions.NotFoundError -> ogx_client.NotFoundError
__locals = locals()
for __name in __all__:
if not __name.startswith("__"):
try:
- __locals[__name].__module__ = "llama_stack_client"
+ __locals[__name].__module__ = "ogx_client"
except (TypeError, AttributeError):
# Some of our exported symbols are builtins which we can't set attributes for.
pass
diff --git a/src/llama_stack_client/_base_client.py b/src/ogx_client/_base_client.py
similarity index 99%
rename from src/llama_stack_client/_base_client.py
rename to src/ogx_client/_base_client.py
index 289bec6a..557dadb1 100644
--- a/src/llama_stack_client/_base_client.py
+++ b/src/ogx_client/_base_client.py
@@ -399,7 +399,7 @@ def __init__(
if max_retries is None: # pyright: ignore[reportUnnecessaryComparison]
raise TypeError(
- "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `llama_stack_client.DEFAULT_MAX_RETRIES`"
+ "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `ogx_client.DEFAULT_MAX_RETRIES`"
)
def _enforce_trailing_slash(self, url: URL) -> URL:
@@ -546,6 +546,10 @@ def _build_request(
files = cast(HttpxRequestFiles, ForceMultipartDict())
prepared_url = self._prepare_url(options.url)
+ # preserve hard-coded query params from the url
+ if params and prepared_url.query:
+ params = {**dict(prepared_url.params.items()), **params}
+ prepared_url = prepared_url.copy_with(raw_path=prepared_url.raw_path.split(b"?", 1)[0])
if "_" in prepared_url.host:
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
diff --git a/src/llama_stack_client/_client.py b/src/ogx_client/_client.py
similarity index 81%
rename from src/llama_stack_client/_client.py
rename to src/ogx_client/_client.py
index de6801c6..67638be5 100644
--- a/src/llama_stack_client/_client.py
+++ b/src/ogx_client/_client.py
@@ -1,15 +1,8 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
-import json
from typing import TYPE_CHECKING, Any, Mapping
from typing_extensions import Self, override
@@ -26,7 +19,11 @@
RequestOptions,
not_given,
)
-from ._utils import is_given, get_async_library
+from ._utils import (
+ is_given,
+ is_mapping_t,
+ get_async_library,
+)
from ._compat import cached_property
from ._version import __version__
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
@@ -39,7 +36,6 @@
if TYPE_CHECKING:
from .resources import (
- beta,
chat,
alpha,
files,
@@ -49,7 +45,6 @@
batches,
inspect,
prompts,
- scoring,
shields,
providers,
responses,
@@ -59,16 +54,13 @@
moderations,
conversations,
vector_stores,
- scoring_functions,
)
from .resources.files import FilesResource, AsyncFilesResource
from .resources.routes import RoutesResource, AsyncRoutesResource
from .resources.safety import SafetyResource, AsyncSafetyResource
from .resources.batches import BatchesResource, AsyncBatchesResource
from .resources.inspect import InspectResource, AsyncInspectResource
- from .resources.scoring import ScoringResource, AsyncScoringResource
from .resources.shields import ShieldsResource, AsyncShieldsResource
- from .resources.beta.beta import BetaResource, AsyncBetaResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.providers import ProvidersResource, AsyncProvidersResource
from .resources.vector_io import VectorIoResource, AsyncVectorIoResource
@@ -78,7 +70,6 @@
from .resources.moderations import ModerationsResource, AsyncModerationsResource
from .resources.models.models import ModelsResource, AsyncModelsResource
from .resources.prompts.prompts import PromptsResource, AsyncPromptsResource
- from .resources.scoring_functions import ScoringFunctionsResource, AsyncScoringFunctionsResource
from .resources.responses.responses import ResponsesResource, AsyncResponsesResource
from .resources.conversations.conversations import ConversationsResource, AsyncConversationsResource
from .resources.vector_stores.vector_stores import VectorStoresResource, AsyncVectorStoresResource
@@ -88,14 +79,14 @@
"Transport",
"ProxiesTypes",
"RequestOptions",
- "LlamaStackClient",
- "AsyncLlamaStackClient",
+ "OgxClient",
+ "AsyncOgxClient",
"Client",
"AsyncClient",
]
-class LlamaStackClient(SyncAPIClient):
+class OgxClient(SyncAPIClient):
# client options
api_key: str | None
@@ -121,25 +112,28 @@ def __init__(
# outlining your use-case to help us decide if it should be
# part of our public interface in the future.
_strict_response_validation: bool = False,
- provider_data: Mapping[str, Any] | None = None,
) -> None:
- """Construct a new synchronous LlamaStackClient client instance.
+ """Construct a new synchronous OgxClient client instance.
- This automatically infers the `api_key` argument from the `LLAMA_STACK_CLIENT_API_KEY` environment variable if it is not provided.
+ This automatically infers the `api_key` argument from the `OGX_CLIENT_API_KEY` environment variable if it is not provided.
"""
if api_key is None:
- api_key = os.environ.get("LLAMA_STACK_CLIENT_API_KEY")
+ api_key = os.environ.get("OGX_CLIENT_API_KEY")
self.api_key = api_key
if base_url is None:
- base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL")
+ base_url = os.environ.get("OGX_CLIENT_BASE_URL")
if base_url is None:
- base_url = f"http://any-hosted-llama-stack.com"
+ base_url = f"http://any-hosted-ogx.com"
- custom_headers = default_headers or {}
- custom_headers["X-LlamaStack-Client-Version"] = __version__
- if provider_data is not None:
- custom_headers["X-LlamaStack-Provider-Data"] = json.dumps(provider_data)
+ custom_headers_env = os.environ.get("OGX_CLIENT_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
super().__init__(
version=__version__,
@@ -147,7 +141,7 @@ def __init__(
max_retries=max_retries,
timeout=timeout,
http_client=http_client,
- custom_headers=custom_headers,
+ custom_headers=default_headers,
custom_query=default_query,
_strict_response_validation=_strict_response_validation,
)
@@ -156,6 +150,9 @@ def __init__(
@cached_property
def responses(self) -> ResponsesResource:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
from .resources.responses import ResponsesResource
return ResponsesResource(self)
@@ -177,7 +174,7 @@ def conversations(self) -> ConversationsResource:
@cached_property
def inspect(self) -> InspectResource:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.inspect import InspectResource
@@ -185,8 +182,7 @@ def inspect(self) -> InspectResource:
@cached_property
def embeddings(self) -> EmbeddingsResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -205,8 +201,7 @@ def chat(self) -> ChatResource:
@cached_property
def completions(self) -> CompletionsResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -247,7 +242,7 @@ def providers(self) -> ProvidersResource:
@cached_property
def routes(self) -> RoutesResource:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.routes import RoutesResource
@@ -273,23 +268,9 @@ def shields(self) -> ShieldsResource:
return ShieldsResource(self)
- @cached_property
- def scoring(self) -> ScoringResource:
- from .resources.scoring import ScoringResource
-
- return ScoringResource(self)
-
- @cached_property
- def scoring_functions(self) -> ScoringFunctionsResource:
- from .resources.scoring_functions import ScoringFunctionsResource
-
- return ScoringFunctionsResource(self)
-
@cached_property
def files(self) -> FilesResource:
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
from .resources.files import FilesResource
return FilesResource(self)
@@ -315,18 +296,12 @@ def alpha(self) -> AlphaResource:
return AlphaResource(self)
@cached_property
- def beta(self) -> BetaResource:
- from .resources.beta import BetaResource
-
- return BetaResource(self)
-
- @cached_property
- def with_raw_response(self) -> LlamaStackClientWithRawResponse:
- return LlamaStackClientWithRawResponse(self)
+ def with_raw_response(self) -> OgxClientWithRawResponse:
+ return OgxClientWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> LlamaStackClientWithStreamedResponse:
- return LlamaStackClientWithStreamedResponse(self)
+ def with_streaming_response(self) -> OgxClientWithStreamedResponse:
+ return OgxClientWithStreamedResponse(self)
@property
@override
@@ -435,7 +410,7 @@ def _make_status_error(
return APIStatusError(err_msg, response=response, body=body)
-class AsyncLlamaStackClient(AsyncAPIClient):
+class AsyncOgxClient(AsyncAPIClient):
# client options
api_key: str | None
@@ -461,25 +436,28 @@ def __init__(
# outlining your use-case to help us decide if it should be
# part of our public interface in the future.
_strict_response_validation: bool = False,
- provider_data: Mapping[str, Any] | None = None,
) -> None:
- """Construct a new async AsyncLlamaStackClient client instance.
+ """Construct a new async AsyncOgxClient client instance.
- This automatically infers the `api_key` argument from the `LLAMA_STACK_CLIENT_API_KEY` environment variable if it is not provided.
+ This automatically infers the `api_key` argument from the `OGX_CLIENT_API_KEY` environment variable if it is not provided.
"""
if api_key is None:
- api_key = os.environ.get("LLAMA_STACK_CLIENT_API_KEY")
+ api_key = os.environ.get("OGX_CLIENT_API_KEY")
self.api_key = api_key
if base_url is None:
- base_url = os.environ.get("LLAMA_STACK_CLIENT_BASE_URL")
+ base_url = os.environ.get("OGX_CLIENT_BASE_URL")
if base_url is None:
- base_url = f"http://any-hosted-llama-stack.com"
+ base_url = f"http://any-hosted-ogx.com"
- custom_headers = default_headers or {}
- custom_headers["X-LlamaStack-Client-Version"] = __version__
- if provider_data is not None:
- custom_headers["X-LlamaStack-Provider-Data"] = json.dumps(provider_data)
+ custom_headers_env = os.environ.get("OGX_CLIENT_CUSTOM_HEADERS")
+ if custom_headers_env is not None:
+ parsed: dict[str, str] = {}
+ for line in custom_headers_env.split("\n"):
+ colon = line.find(":")
+ if colon >= 0:
+ parsed[line[:colon].strip()] = line[colon + 1 :].strip()
+ default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
super().__init__(
version=__version__,
@@ -487,7 +465,7 @@ def __init__(
max_retries=max_retries,
timeout=timeout,
http_client=http_client,
- custom_headers=custom_headers,
+ custom_headers=default_headers,
custom_query=default_query,
_strict_response_validation=_strict_response_validation,
)
@@ -496,6 +474,9 @@ def __init__(
@cached_property
def responses(self) -> AsyncResponsesResource:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
from .resources.responses import AsyncResponsesResource
return AsyncResponsesResource(self)
@@ -517,7 +498,7 @@ def conversations(self) -> AsyncConversationsResource:
@cached_property
def inspect(self) -> AsyncInspectResource:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.inspect import AsyncInspectResource
@@ -525,8 +506,7 @@ def inspect(self) -> AsyncInspectResource:
@cached_property
def embeddings(self) -> AsyncEmbeddingsResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -545,8 +525,7 @@ def chat(self) -> AsyncChatResource:
@cached_property
def completions(self) -> AsyncCompletionsResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -587,7 +566,7 @@ def providers(self) -> AsyncProvidersResource:
@cached_property
def routes(self) -> AsyncRoutesResource:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.routes import AsyncRoutesResource
@@ -613,23 +592,9 @@ def shields(self) -> AsyncShieldsResource:
return AsyncShieldsResource(self)
- @cached_property
- def scoring(self) -> AsyncScoringResource:
- from .resources.scoring import AsyncScoringResource
-
- return AsyncScoringResource(self)
-
- @cached_property
- def scoring_functions(self) -> AsyncScoringFunctionsResource:
- from .resources.scoring_functions import AsyncScoringFunctionsResource
-
- return AsyncScoringFunctionsResource(self)
-
@cached_property
def files(self) -> AsyncFilesResource:
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
from .resources.files import AsyncFilesResource
return AsyncFilesResource(self)
@@ -655,18 +620,12 @@ def alpha(self) -> AsyncAlphaResource:
return AsyncAlphaResource(self)
@cached_property
- def beta(self) -> AsyncBetaResource:
- from .resources.beta import AsyncBetaResource
-
- return AsyncBetaResource(self)
-
- @cached_property
- def with_raw_response(self) -> AsyncLlamaStackClientWithRawResponse:
- return AsyncLlamaStackClientWithRawResponse(self)
+ def with_raw_response(self) -> AsyncOgxClientWithRawResponse:
+ return AsyncOgxClientWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> AsyncLlamaStackClientWithStreamedResponse:
- return AsyncLlamaStackClientWithStreamedResponse(self)
+ def with_streaming_response(self) -> AsyncOgxClientWithStreamedResponse:
+ return AsyncOgxClientWithStreamedResponse(self)
@property
@override
@@ -775,14 +734,17 @@ def _make_status_error(
return APIStatusError(err_msg, response=response, body=body)
-class LlamaStackClientWithRawResponse:
- _client: LlamaStackClient
+class OgxClientWithRawResponse:
+ _client: OgxClient
- def __init__(self, client: LlamaStackClient) -> None:
+ def __init__(self, client: OgxClient) -> None:
self._client = client
@cached_property
def responses(self) -> responses.ResponsesResourceWithRawResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
from .resources.responses import ResponsesResourceWithRawResponse
return ResponsesResourceWithRawResponse(self._client.responses)
@@ -804,7 +766,7 @@ def conversations(self) -> conversations.ConversationsResourceWithRawResponse:
@cached_property
def inspect(self) -> inspect.InspectResourceWithRawResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.inspect import InspectResourceWithRawResponse
@@ -812,8 +774,7 @@ def inspect(self) -> inspect.InspectResourceWithRawResponse:
@cached_property
def embeddings(self) -> embeddings.EmbeddingsResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -832,8 +793,7 @@ def chat(self) -> chat.ChatResourceWithRawResponse:
@cached_property
def completions(self) -> completions.CompletionsResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -874,7 +834,7 @@ def providers(self) -> providers.ProvidersResourceWithRawResponse:
@cached_property
def routes(self) -> routes.RoutesResourceWithRawResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.routes import RoutesResourceWithRawResponse
@@ -900,23 +860,9 @@ def shields(self) -> shields.ShieldsResourceWithRawResponse:
return ShieldsResourceWithRawResponse(self._client.shields)
- @cached_property
- def scoring(self) -> scoring.ScoringResourceWithRawResponse:
- from .resources.scoring import ScoringResourceWithRawResponse
-
- return ScoringResourceWithRawResponse(self._client.scoring)
-
- @cached_property
- def scoring_functions(self) -> scoring_functions.ScoringFunctionsResourceWithRawResponse:
- from .resources.scoring_functions import ScoringFunctionsResourceWithRawResponse
-
- return ScoringFunctionsResourceWithRawResponse(self._client.scoring_functions)
-
@cached_property
def files(self) -> files.FilesResourceWithRawResponse:
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
from .resources.files import FilesResourceWithRawResponse
return FilesResourceWithRawResponse(self._client.files)
@@ -941,21 +887,18 @@ def alpha(self) -> alpha.AlphaResourceWithRawResponse:
return AlphaResourceWithRawResponse(self._client.alpha)
- @cached_property
- def beta(self) -> beta.BetaResourceWithRawResponse:
- from .resources.beta import BetaResourceWithRawResponse
-
- return BetaResourceWithRawResponse(self._client.beta)
+class AsyncOgxClientWithRawResponse:
+ _client: AsyncOgxClient
-class AsyncLlamaStackClientWithRawResponse:
- _client: AsyncLlamaStackClient
-
- def __init__(self, client: AsyncLlamaStackClient) -> None:
+ def __init__(self, client: AsyncOgxClient) -> None:
self._client = client
@cached_property
def responses(self) -> responses.AsyncResponsesResourceWithRawResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
from .resources.responses import AsyncResponsesResourceWithRawResponse
return AsyncResponsesResourceWithRawResponse(self._client.responses)
@@ -977,7 +920,7 @@ def conversations(self) -> conversations.AsyncConversationsResourceWithRawRespon
@cached_property
def inspect(self) -> inspect.AsyncInspectResourceWithRawResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.inspect import AsyncInspectResourceWithRawResponse
@@ -985,8 +928,7 @@ def inspect(self) -> inspect.AsyncInspectResourceWithRawResponse:
@cached_property
def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -1005,8 +947,7 @@ def chat(self) -> chat.AsyncChatResourceWithRawResponse:
@cached_property
def completions(self) -> completions.AsyncCompletionsResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -1047,7 +988,7 @@ def providers(self) -> providers.AsyncProvidersResourceWithRawResponse:
@cached_property
def routes(self) -> routes.AsyncRoutesResourceWithRawResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.routes import AsyncRoutesResourceWithRawResponse
@@ -1073,23 +1014,9 @@ def shields(self) -> shields.AsyncShieldsResourceWithRawResponse:
return AsyncShieldsResourceWithRawResponse(self._client.shields)
- @cached_property
- def scoring(self) -> scoring.AsyncScoringResourceWithRawResponse:
- from .resources.scoring import AsyncScoringResourceWithRawResponse
-
- return AsyncScoringResourceWithRawResponse(self._client.scoring)
-
- @cached_property
- def scoring_functions(self) -> scoring_functions.AsyncScoringFunctionsResourceWithRawResponse:
- from .resources.scoring_functions import AsyncScoringFunctionsResourceWithRawResponse
-
- return AsyncScoringFunctionsResourceWithRawResponse(self._client.scoring_functions)
-
@cached_property
def files(self) -> files.AsyncFilesResourceWithRawResponse:
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
from .resources.files import AsyncFilesResourceWithRawResponse
return AsyncFilesResourceWithRawResponse(self._client.files)
@@ -1114,21 +1041,18 @@ def alpha(self) -> alpha.AsyncAlphaResourceWithRawResponse:
return AsyncAlphaResourceWithRawResponse(self._client.alpha)
- @cached_property
- def beta(self) -> beta.AsyncBetaResourceWithRawResponse:
- from .resources.beta import AsyncBetaResourceWithRawResponse
-
- return AsyncBetaResourceWithRawResponse(self._client.beta)
+class OgxClientWithStreamedResponse:
+ _client: OgxClient
-class LlamaStackClientWithStreamedResponse:
- _client: LlamaStackClient
-
- def __init__(self, client: LlamaStackClient) -> None:
+ def __init__(self, client: OgxClient) -> None:
self._client = client
@cached_property
def responses(self) -> responses.ResponsesResourceWithStreamingResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
from .resources.responses import ResponsesResourceWithStreamingResponse
return ResponsesResourceWithStreamingResponse(self._client.responses)
@@ -1150,7 +1074,7 @@ def conversations(self) -> conversations.ConversationsResourceWithStreamingRespo
@cached_property
def inspect(self) -> inspect.InspectResourceWithStreamingResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.inspect import InspectResourceWithStreamingResponse
@@ -1158,8 +1082,7 @@ def inspect(self) -> inspect.InspectResourceWithStreamingResponse:
@cached_property
def embeddings(self) -> embeddings.EmbeddingsResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -1178,8 +1101,7 @@ def chat(self) -> chat.ChatResourceWithStreamingResponse:
@cached_property
def completions(self) -> completions.CompletionsResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -1220,7 +1142,7 @@ def providers(self) -> providers.ProvidersResourceWithStreamingResponse:
@cached_property
def routes(self) -> routes.RoutesResourceWithStreamingResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.routes import RoutesResourceWithStreamingResponse
@@ -1246,23 +1168,9 @@ def shields(self) -> shields.ShieldsResourceWithStreamingResponse:
return ShieldsResourceWithStreamingResponse(self._client.shields)
- @cached_property
- def scoring(self) -> scoring.ScoringResourceWithStreamingResponse:
- from .resources.scoring import ScoringResourceWithStreamingResponse
-
- return ScoringResourceWithStreamingResponse(self._client.scoring)
-
- @cached_property
- def scoring_functions(self) -> scoring_functions.ScoringFunctionsResourceWithStreamingResponse:
- from .resources.scoring_functions import ScoringFunctionsResourceWithStreamingResponse
-
- return ScoringFunctionsResourceWithStreamingResponse(self._client.scoring_functions)
-
@cached_property
def files(self) -> files.FilesResourceWithStreamingResponse:
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
from .resources.files import FilesResourceWithStreamingResponse
return FilesResourceWithStreamingResponse(self._client.files)
@@ -1287,21 +1195,18 @@ def alpha(self) -> alpha.AlphaResourceWithStreamingResponse:
return AlphaResourceWithStreamingResponse(self._client.alpha)
- @cached_property
- def beta(self) -> beta.BetaResourceWithStreamingResponse:
- from .resources.beta import BetaResourceWithStreamingResponse
-
- return BetaResourceWithStreamingResponse(self._client.beta)
-
-class AsyncLlamaStackClientWithStreamedResponse:
- _client: AsyncLlamaStackClient
+class AsyncOgxClientWithStreamedResponse:
+ _client: AsyncOgxClient
- def __init__(self, client: AsyncLlamaStackClient) -> None:
+ def __init__(self, client: AsyncOgxClient) -> None:
self._client = client
@cached_property
def responses(self) -> responses.AsyncResponsesResourceWithStreamingResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
from .resources.responses import AsyncResponsesResourceWithStreamingResponse
return AsyncResponsesResourceWithStreamingResponse(self._client.responses)
@@ -1323,7 +1228,7 @@ def conversations(self) -> conversations.AsyncConversationsResourceWithStreaming
@cached_property
def inspect(self) -> inspect.AsyncInspectResourceWithStreamingResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.inspect import AsyncInspectResourceWithStreamingResponse
@@ -1331,8 +1236,7 @@ def inspect(self) -> inspect.AsyncInspectResourceWithStreamingResponse:
@cached_property
def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -1351,8 +1255,7 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
@cached_property
def completions(self) -> completions.AsyncCompletionsResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -1393,7 +1296,7 @@ def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse:
@cached_property
def routes(self) -> routes.AsyncRoutesResourceWithStreamingResponse:
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
from .resources.routes import AsyncRoutesResourceWithStreamingResponse
@@ -1419,23 +1322,9 @@ def shields(self) -> shields.AsyncShieldsResourceWithStreamingResponse:
return AsyncShieldsResourceWithStreamingResponse(self._client.shields)
- @cached_property
- def scoring(self) -> scoring.AsyncScoringResourceWithStreamingResponse:
- from .resources.scoring import AsyncScoringResourceWithStreamingResponse
-
- return AsyncScoringResourceWithStreamingResponse(self._client.scoring)
-
- @cached_property
- def scoring_functions(self) -> scoring_functions.AsyncScoringFunctionsResourceWithStreamingResponse:
- from .resources.scoring_functions import AsyncScoringFunctionsResourceWithStreamingResponse
-
- return AsyncScoringFunctionsResourceWithStreamingResponse(self._client.scoring_functions)
-
@cached_property
def files(self) -> files.AsyncFilesResourceWithStreamingResponse:
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
from .resources.files import AsyncFilesResourceWithStreamingResponse
return AsyncFilesResourceWithStreamingResponse(self._client.files)
@@ -1460,13 +1349,7 @@ def alpha(self) -> alpha.AsyncAlphaResourceWithStreamingResponse:
return AsyncAlphaResourceWithStreamingResponse(self._client.alpha)
- @cached_property
- def beta(self) -> beta.AsyncBetaResourceWithStreamingResponse:
- from .resources.beta import AsyncBetaResourceWithStreamingResponse
-
- return AsyncBetaResourceWithStreamingResponse(self._client.beta)
-
-Client = LlamaStackClient
+Client = OgxClient
-AsyncClient = AsyncLlamaStackClient
+AsyncClient = AsyncOgxClient
diff --git a/src/llama_stack_client/_compat.py b/src/ogx_client/_compat.py
similarity index 100%
rename from src/llama_stack_client/_compat.py
rename to src/ogx_client/_compat.py
diff --git a/src/llama_stack_client/_constants.py b/src/ogx_client/_constants.py
similarity index 100%
rename from src/llama_stack_client/_constants.py
rename to src/ogx_client/_constants.py
diff --git a/src/llama_stack_client/_exceptions.py b/src/ogx_client/_exceptions.py
similarity index 97%
rename from src/llama_stack_client/_exceptions.py
rename to src/ogx_client/_exceptions.py
index c36e94a7..f4b70661 100644
--- a/src/llama_stack_client/_exceptions.py
+++ b/src/ogx_client/_exceptions.py
@@ -24,11 +24,11 @@
]
-class LlamaStackClientError(Exception):
+class OgxClientError(Exception):
pass
-class APIError(LlamaStackClientError):
+class APIError(OgxClientError):
message: str
request: httpx.Request
diff --git a/src/llama_stack_client/_files.py b/src/ogx_client/_files.py
similarity index 63%
rename from src/llama_stack_client/_files.py
rename to src/ogx_client/_files.py
index f368c297..d710165d 100644
--- a/src/llama_stack_client/_files.py
+++ b/src/ogx_client/_files.py
@@ -9,8 +9,8 @@
import io
import os
import pathlib
-from typing import overload
-from typing_extensions import TypeGuard
+from typing import Sequence, cast, overload
+from typing_extensions import TypeVar, TypeGuard
import anyio
@@ -23,7 +23,9 @@
HttpxFileContent,
HttpxRequestFiles,
)
-from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
+from ._utils import is_list, is_mapping, is_tuple_t, is_mapping_t, is_sequence_t
+
+_T = TypeVar("_T")
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
@@ -40,7 +42,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
if not is_file_content(obj):
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
raise RuntimeError(
- f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/llamastack/llama-stack-client-python/tree/main#file-uploads"
+ f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/ogx-ai/ogx-client-python/tree/main#file-uploads"
) from None
@@ -127,3 +129,51 @@ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
return await anyio.Path(file).read_bytes()
return file
+
+
+def deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]]) -> _T:
+ """Copy only the containers along the given paths.
+
+ Used to guard against mutation by extract_files without copying the entire structure.
+ Only dicts and lists that lie on a path are copied; everything else
+ is returned by reference.
+
+ For example, given paths=[["foo", "files", "file"]] and the structure:
+ {
+ "foo": {
+ "bar": {"baz": {}},
+ "files": {"file": }
+ }
+ }
+ The root dict, "foo", and "files" are copied (they lie on the path).
+ "bar" and "baz" are returned by reference (off the path).
+ """
+ return _deepcopy_with_paths(item, paths, 0)
+
+
+def _deepcopy_with_paths(item: _T, paths: Sequence[Sequence[str]], index: int) -> _T:
+ if not paths:
+ return item
+ if is_mapping(item):
+ key_to_paths: dict[str, list[Sequence[str]]] = {}
+ for path in paths:
+ if index < len(path):
+ key_to_paths.setdefault(path[index], []).append(path)
+
+ # if no path continues through this mapping, it won't be mutated and copying it is redundant
+ if not key_to_paths:
+ return item
+
+ result = dict(item)
+ for key, subpaths in key_to_paths.items():
+ if key in result:
+ result[key] = _deepcopy_with_paths(result[key], subpaths, index + 1)
+ return cast(_T, result)
+ if is_list(item):
+ array_paths = [path for path in paths if index < len(path) and path[index] == ""]
+
+ # if no path expects a list here, nothing will be mutated inside it - return by reference
+ if not array_paths:
+ return cast(_T, item)
+ return cast(_T, [_deepcopy_with_paths(entry, array_paths, index + 1) for entry in item])
+ return item
diff --git a/src/llama_stack_client/_models.py b/src/ogx_client/_models.py
similarity index 100%
rename from src/llama_stack_client/_models.py
rename to src/ogx_client/_models.py
diff --git a/src/llama_stack_client/_qs.py b/src/ogx_client/_qs.py
similarity index 96%
rename from src/llama_stack_client/_qs.py
rename to src/ogx_client/_qs.py
index 1a49e008..a73897c1 100644
--- a/src/llama_stack_client/_qs.py
+++ b/src/ogx_client/_qs.py
@@ -8,17 +8,13 @@
from typing import Any, List, Tuple, Union, Mapping, TypeVar
from urllib.parse import parse_qs, urlencode
-from typing_extensions import Literal, get_args
+from typing_extensions import get_args
-from ._types import NotGiven, not_given
+from ._types import NotGiven, ArrayFormat, NestedFormat, not_given
from ._utils import flatten
_T = TypeVar("_T")
-
-ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
-NestedFormat = Literal["dots", "brackets"]
-
PrimitiveData = Union[str, int, float, bool, None]
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
# https://github.com/microsoft/pyright/issues/3555
diff --git a/src/llama_stack_client/_resource.py b/src/ogx_client/_resource.py
similarity index 81%
rename from src/llama_stack_client/_resource.py
rename to src/ogx_client/_resource.py
index 03b3b142..e1d86aa0 100644
--- a/src/llama_stack_client/_resource.py
+++ b/src/ogx_client/_resource.py
@@ -14,13 +14,13 @@
import anyio
if TYPE_CHECKING:
- from ._client import LlamaStackClient, AsyncLlamaStackClient
+ from ._client import OgxClient, AsyncOgxClient
class SyncAPIResource:
- _client: LlamaStackClient
+ _client: OgxClient
- def __init__(self, client: LlamaStackClient) -> None:
+ def __init__(self, client: OgxClient) -> None:
self._client = client
self._get = client.get
self._post = client.post
@@ -34,9 +34,9 @@ def _sleep(self, seconds: float) -> None:
class AsyncAPIResource:
- _client: AsyncLlamaStackClient
+ _client: AsyncOgxClient
- def __init__(self, client: AsyncLlamaStackClient) -> None:
+ def __init__(self, client: AsyncOgxClient) -> None:
self._client = client
self._get = client.get
self._post = client.post
diff --git a/src/llama_stack_client/_response.py b/src/ogx_client/_response.py
similarity index 98%
rename from src/llama_stack_client/_response.py
rename to src/ogx_client/_response.py
index bb6099cd..de2302be 100644
--- a/src/llama_stack_client/_response.py
+++ b/src/ogx_client/_response.py
@@ -35,7 +35,7 @@
from ._models import BaseModel, is_basemodel
from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
-from ._exceptions import LlamaStackClientError, APIResponseValidationError
+from ._exceptions import OgxClientError, APIResponseValidationError
if TYPE_CHECKING:
from ._models import FinalRequestOptions
@@ -227,7 +227,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
and issubclass(origin, pydantic.BaseModel)
):
raise TypeError(
- "Pydantic models must subclass our base model type, e.g. `from llama_stack_client import BaseModel`"
+ "Pydantic models must subclass our base model type, e.g. `from ogx_client import BaseModel`"
)
if (
@@ -294,7 +294,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T:
the `to` argument, e.g.
```py
- from llama_stack_client import BaseModel
+ from ogx_client import BaseModel
class MyModel(BaseModel):
@@ -396,7 +396,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T:
the `to` argument, e.g.
```py
- from llama_stack_client import BaseModel
+ from ogx_client import BaseModel
class MyModel(BaseModel):
@@ -567,11 +567,11 @@ async def stream_to_file(
class MissingStreamClassError(TypeError):
def __init__(self) -> None:
super().__init__(
- "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `llama_stack_client._streaming` for reference",
+ "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `ogx_client._streaming` for reference",
)
-class StreamAlreadyConsumed(LlamaStackClientError):
+class StreamAlreadyConsumed(OgxClientError):
"""
Attempted to read or stream content, but the content has already
been streamed.
diff --git a/src/llama_stack_client/_streaming.py b/src/ogx_client/_streaming.py
similarity index 98%
rename from src/llama_stack_client/_streaming.py
rename to src/ogx_client/_streaming.py
index ba8992e1..761b8ea7 100644
--- a/src/llama_stack_client/_streaming.py
+++ b/src/ogx_client/_streaming.py
@@ -19,7 +19,7 @@
from ._exceptions import APIError
if TYPE_CHECKING:
- from ._client import LlamaStackClient, AsyncLlamaStackClient
+ from ._client import OgxClient, AsyncOgxClient
from ._models import FinalRequestOptions
@@ -38,7 +38,7 @@ def __init__(
*,
cast_to: type[_T],
response: httpx.Response,
- client: LlamaStackClient,
+ client: OgxClient,
options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
@@ -121,7 +121,7 @@ def __init__(
*,
cast_to: type[_T],
response: httpx.Response,
- client: AsyncLlamaStackClient,
+ client: AsyncOgxClient,
options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
diff --git a/src/llama_stack_client/_types.py b/src/ogx_client/_types.py
similarity index 98%
rename from src/llama_stack_client/_types.py
rename to src/ogx_client/_types.py
index 508aa414..06a420e9 100644
--- a/src/llama_stack_client/_types.py
+++ b/src/ogx_client/_types.py
@@ -53,6 +53,9 @@
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
_T = TypeVar("_T")
+ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
+NestedFormat = Literal["dots", "brackets"]
+
# Approximates httpx internal ProxiesTypes and RequestFiles types
# while adding support for `PathLike` instances
@@ -107,7 +110,7 @@
# This unfortunately means that you will either have
# to import this type and pass it explicitly:
#
-# from llama_stack_client import NoneType
+# from ogx_client import NoneType
# client.get('/foo', cast_to=NoneType)
#
# or build it yourself:
diff --git a/src/llama_stack_client/_utils/__init__.py b/src/ogx_client/_utils/__init__.py
similarity index 98%
rename from src/llama_stack_client/_utils/__init__.py
rename to src/ogx_client/_utils/__init__.py
index fdb249b3..f82bfad7 100644
--- a/src/llama_stack_client/_utils/__init__.py
+++ b/src/ogx_client/_utils/__init__.py
@@ -30,7 +30,6 @@
coerce_integer as coerce_integer,
file_from_path as file_from_path,
strip_not_given as strip_not_given,
- deepcopy_minimal as deepcopy_minimal,
get_async_library as get_async_library,
maybe_coerce_float as maybe_coerce_float,
get_required_header as get_required_header,
diff --git a/src/llama_stack_client/_utils/_compat.py b/src/ogx_client/_utils/_compat.py
similarity index 100%
rename from src/llama_stack_client/_utils/_compat.py
rename to src/ogx_client/_utils/_compat.py
diff --git a/src/llama_stack_client/_utils/_datetime_parse.py b/src/ogx_client/_utils/_datetime_parse.py
similarity index 100%
rename from src/llama_stack_client/_utils/_datetime_parse.py
rename to src/ogx_client/_utils/_datetime_parse.py
diff --git a/src/llama_stack_client/_utils/_json.py b/src/ogx_client/_utils/_json.py
similarity index 100%
rename from src/llama_stack_client/_utils/_json.py
rename to src/ogx_client/_utils/_json.py
diff --git a/src/llama_stack_client/_utils/_logs.py b/src/ogx_client/_utils/_logs.py
similarity index 50%
rename from src/llama_stack_client/_utils/_logs.py
rename to src/ogx_client/_utils/_logs.py
index 61999301..2e932e09 100644
--- a/src/llama_stack_client/_utils/_logs.py
+++ b/src/ogx_client/_utils/_logs.py
@@ -1,29 +1,20 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
import os
import logging
-from rich.logging import RichHandler
-
-logger: logging.Logger = logging.getLogger("llama_stack_client")
+logger: logging.Logger = logging.getLogger("ogx_client")
httpx_logger: logging.Logger = logging.getLogger("httpx")
def _basic_config() -> None:
- # e.g. [2023-10-05 14:12:26 - llama_stack_client._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
+ # e.g. [2023-10-05 14:12:26 - ogx_client._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
logging.basicConfig(
format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
- handlers=[RichHandler(rich_tracebacks=True)],
)
def setup_logging() -> None:
- env = os.environ.get("LLAMA_STACK_CLIENT_LOG")
+ env = os.environ.get("OGX_CLIENT_LOG")
if env == "debug":
_basic_config()
logger.setLevel(logging.DEBUG)
diff --git a/src/llama_stack_client/_utils/_path.py b/src/ogx_client/_utils/_path.py
similarity index 100%
rename from src/llama_stack_client/_utils/_path.py
rename to src/ogx_client/_utils/_path.py
diff --git a/src/llama_stack_client/_utils/_proxy.py b/src/ogx_client/_utils/_proxy.py
similarity index 100%
rename from src/llama_stack_client/_utils/_proxy.py
rename to src/ogx_client/_utils/_proxy.py
diff --git a/src/llama_stack_client/_utils/_reflection.py b/src/ogx_client/_utils/_reflection.py
similarity index 100%
rename from src/llama_stack_client/_utils/_reflection.py
rename to src/ogx_client/_utils/_reflection.py
diff --git a/src/ogx_client/_utils/_resources_proxy.py b/src/ogx_client/_utils/_resources_proxy.py
new file mode 100644
index 00000000..b8936f4e
--- /dev/null
+++ b/src/ogx_client/_utils/_resources_proxy.py
@@ -0,0 +1,24 @@
+from __future__ import annotations
+
+from typing import Any
+from typing_extensions import override
+
+from ._proxy import LazyProxy
+
+
+class ResourcesProxy(LazyProxy[Any]):
+ """A proxy for the `ogx_client.resources` module.
+
+ This is used so that we can lazily import `ogx_client.resources` only when
+ needed *and* so that users can just import `ogx_client` and reference `ogx_client.resources`
+ """
+
+ @override
+ def __load__(self) -> Any:
+ import importlib
+
+ mod = importlib.import_module("ogx_client.resources")
+ return mod
+
+
+resources = ResourcesProxy().__as_proxied__()
diff --git a/src/llama_stack_client/_utils/_streams.py b/src/ogx_client/_utils/_streams.py
similarity index 100%
rename from src/llama_stack_client/_utils/_streams.py
rename to src/ogx_client/_utils/_streams.py
diff --git a/src/llama_stack_client/_utils/_sync.py b/src/ogx_client/_utils/_sync.py
similarity index 100%
rename from src/llama_stack_client/_utils/_sync.py
rename to src/ogx_client/_utils/_sync.py
diff --git a/src/llama_stack_client/_utils/_transform.py b/src/ogx_client/_utils/_transform.py
similarity index 100%
rename from src/llama_stack_client/_utils/_transform.py
rename to src/ogx_client/_utils/_transform.py
diff --git a/src/llama_stack_client/_utils/_typing.py b/src/ogx_client/_utils/_typing.py
similarity index 100%
rename from src/llama_stack_client/_utils/_typing.py
rename to src/ogx_client/_utils/_typing.py
diff --git a/src/llama_stack_client/_utils/_utils.py b/src/ogx_client/_utils/_utils.py
similarity index 86%
rename from src/llama_stack_client/_utils/_utils.py
rename to src/ogx_client/_utils/_utils.py
index b2dcaab2..9f08bd02 100644
--- a/src/llama_stack_client/_utils/_utils.py
+++ b/src/ogx_client/_utils/_utils.py
@@ -23,11 +23,11 @@
)
from pathlib import Path
from datetime import date, datetime
-from typing_extensions import TypeGuard
+from typing_extensions import TypeGuard, get_args
import sniffio
-from .._types import Omit, NotGiven, FileTypes, HeadersLike
+from .._types import Omit, NotGiven, FileTypes, ArrayFormat, HeadersLike
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -46,25 +46,45 @@ def extract_files(
query: Mapping[str, object],
*,
paths: Sequence[Sequence[str]],
+ array_format: ArrayFormat = "brackets",
) -> list[tuple[str, FileTypes]]:
"""Recursively extract files from the given dictionary based on specified paths.
A path may look like this ['foo', 'files', '', 'data'].
+ ``array_format`` controls how ```` segments contribute to the emitted
+ field name. Supported values: ``"brackets"`` (``foo[]``), ``"repeat"`` and
+ ``"comma"`` (``foo``), ``"indices"`` (``foo[0]``, ``foo[1]``).
+
Note: this mutates the given dictionary.
"""
files: list[tuple[str, FileTypes]] = []
for path in paths:
- files.extend(_extract_items(query, path, index=0, flattened_key=None))
+ files.extend(_extract_items(query, path, index=0, flattened_key=None, array_format=array_format))
return files
+def _array_suffix(array_format: ArrayFormat, array_index: int) -> str:
+ if array_format == "brackets":
+ return "[]"
+ if array_format == "indices":
+ return f"[{array_index}]"
+ if array_format == "repeat" or array_format == "comma":
+ # Both repeat the bare field name for each file part; there is no
+ # meaningful way to comma-join binary parts.
+ return ""
+ raise NotImplementedError(
+ f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
+ )
+
+
def _extract_items(
obj: object,
path: Sequence[str],
*,
index: int,
flattened_key: str | None,
+ array_format: ArrayFormat,
) -> list[tuple[str, FileTypes]]:
try:
key = path[index]
@@ -81,9 +101,11 @@ def _extract_items(
if is_list(obj):
files: list[tuple[str, FileTypes]] = []
- for entry in obj:
- assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
- files.append((flattened_key + "[]", cast(FileTypes, entry)))
+ for array_index, entry in enumerate(obj):
+ suffix = _array_suffix(array_format, array_index)
+ emitted_key = (flattened_key + suffix) if flattened_key else suffix
+ assert_is_file_content(entry, key=emitted_key)
+ files.append((emitted_key, cast(FileTypes, entry)))
return files
assert_is_file_content(obj, key=flattened_key)
@@ -92,8 +114,9 @@ def _extract_items(
index += 1
if is_dict(obj):
try:
- # We are at the last entry in the path so we must remove the field
- if (len(path)) == index:
+ # Remove the field if there are no more dict keys in the path,
+ # only "" traversal markers or end.
+ if all(p == "" for p in path[index:]):
item = obj.pop(key)
else:
item = obj[key]
@@ -111,6 +134,7 @@ def _extract_items(
path,
index=index,
flattened_key=flattened_key,
+ array_format=array_format,
)
elif is_list(obj):
if key != "":
@@ -122,9 +146,12 @@ def _extract_items(
item,
path,
index=index,
- flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
+ flattened_key=(
+ (flattened_key if flattened_key is not None else "") + _array_suffix(array_format, array_index)
+ ),
+ array_format=array_format,
)
- for item in obj
+ for array_index, item in enumerate(obj)
]
)
@@ -182,21 +209,6 @@ def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
return isinstance(obj, Iterable)
-def deepcopy_minimal(item: _T) -> _T:
- """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
-
- - mappings, e.g. `dict`
- - list
-
- This is done for performance reasons.
- """
- if is_mapping(item):
- return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
- if is_list(item):
- return cast(_T, [deepcopy_minimal(entry) for entry in item])
- return item
-
-
# copied from https://github.com/Rapptz/RoboDanny
def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
size = len(seq)
diff --git a/src/llama_stack_client/_version.py b/src/ogx_client/_version.py
similarity index 75%
rename from src/llama_stack_client/_version.py
rename to src/ogx_client/_version.py
index a515d37b..8ce6912f 100644
--- a/src/llama_stack_client/_version.py
+++ b/src/ogx_client/_version.py
@@ -6,5 +6,5 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-__title__ = "llama_stack_client"
-__version__ = "0.7.0-alpha.3" # x-release-please-version
+__title__ = "ogx_client"
+__version__ = "0.7.2-alpha.4" # x-release-please-version
diff --git a/src/llama_stack_client/_wrappers.py b/src/ogx_client/_wrappers.py
similarity index 100%
rename from src/llama_stack_client/_wrappers.py
rename to src/ogx_client/_wrappers.py
diff --git a/src/llama_stack_client/lib/.keep b/src/ogx_client/lib/.keep
similarity index 100%
rename from src/llama_stack_client/lib/.keep
rename to src/ogx_client/lib/.keep
diff --git a/src/llama_stack_client/lib/__init__.py b/src/ogx_client/lib/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/__init__.py
rename to src/ogx_client/lib/__init__.py
diff --git a/src/llama_stack_client/lib/agents/__init__.py b/src/ogx_client/lib/agents/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/agents/__init__.py
rename to src/ogx_client/lib/agents/__init__.py
diff --git a/src/llama_stack_client/lib/agents/agent.py b/src/ogx_client/lib/agents/agent.py
similarity index 99%
rename from src/llama_stack_client/lib/agents/agent.py
rename to src/ogx_client/lib/agents/agent.py
index 417a53e2..b2fc8aa2 100644
--- a/src/llama_stack_client/lib/agents/agent.py
+++ b/src/ogx_client/lib/agents/agent.py
@@ -3,36 +3,38 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+from __future__ import annotations
+
import json
import logging
+from uuid import uuid4
from typing import (
Any,
- AsyncIterator,
- Callable,
Dict,
- Iterator,
List,
- Optional,
Tuple,
Union,
+ Callable,
+ Iterator,
+ Optional,
TypedDict,
+ AsyncIterator,
)
-from uuid import uuid4
+from .types import ToolCall, ToolResponse, CompletionMessage
from ..._types import Headers
from .client_tool import ClientTool, client_tool
from .tool_parser import ToolParser
from .turn_events import (
- AgentStreamChunk,
- StepCompleted,
- StepProgress,
+ TurnFailed,
StepStarted,
+ StepProgress,
+ StepCompleted,
+ AgentStreamChunk,
ToolCallIssuedDelta,
- TurnFailed,
ToolExecutionStepResult,
)
from .event_synthesizer import TurnEventSynthesizer
-from .types import CompletionMessage, ToolCall, ToolResponse
class ToolResponsePayload(TypedDict, total=False):
diff --git a/src/llama_stack_client/lib/agents/client_tool.py b/src/ogx_client/lib/agents/client_tool.py
similarity index 99%
rename from src/llama_stack_client/lib/agents/client_tool.py
rename to src/ogx_client/lib/agents/client_tool.py
index 63a5bfd7..be49b7c6 100644
--- a/src/llama_stack_client/lib/agents/client_tool.py
+++ b/src/ogx_client/lib/agents/client_tool.py
@@ -4,24 +4,23 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import inspect
import json
+import inspect
from abc import abstractmethod
from typing import (
Any,
- Callable,
Dict,
+ List,
+ Union,
+ TypeVar,
+ Callable,
get_args,
get_origin,
get_type_hints,
- List,
- TypeVar,
- Union,
)
-
from typing_extensions import TypedDict
-from .types import CompletionMessage, Message, ToolDefinition, ToolResponse
+from .types import Message, ToolResponse, ToolDefinition, CompletionMessage
class JSONSchema(TypedDict, total=False):
diff --git a/src/llama_stack_client/lib/agents/event_logger.py b/src/ogx_client/lib/agents/event_logger.py
similarity index 100%
rename from src/llama_stack_client/lib/agents/event_logger.py
rename to src/ogx_client/lib/agents/event_logger.py
index 8b56f398..c7b9d109 100644
--- a/src/llama_stack_client/lib/agents/event_logger.py
+++ b/src/ogx_client/lib/agents/event_logger.py
@@ -13,16 +13,16 @@
from typing import Iterator
from .turn_events import (
- AgentStreamChunk,
- TurnStarted,
- TurnCompleted,
+ TextDelta,
TurnFailed,
StepStarted,
+ TurnStarted,
StepProgress,
StepCompleted,
- TextDelta,
- ToolCallIssuedDelta,
ToolCallDelta,
+ TurnCompleted,
+ AgentStreamChunk,
+ ToolCallIssuedDelta,
)
__all__ = ["AgentEventLogger", "EventLogger"]
diff --git a/src/llama_stack_client/lib/agents/event_synthesizer.py b/src/ogx_client/lib/agents/event_synthesizer.py
similarity index 99%
rename from src/llama_stack_client/lib/agents/event_synthesizer.py
rename to src/ogx_client/lib/agents/event_synthesizer.py
index 22ce5c24..df763cdf 100644
--- a/src/llama_stack_client/lib/agents/event_synthesizer.py
+++ b/src/ogx_client/lib/agents/event_synthesizer.py
@@ -15,26 +15,24 @@
from __future__ import annotations
import json
-from dataclasses import dataclass
-from typing import Any, Dict, Iterable, Iterator, List, Optional
-
+from typing import Any, Dict, List, Iterable, Iterator, Optional
from logging import getLogger
+from dataclasses import dataclass
from .types import ToolCall
-
from .turn_events import (
+ TextDelta,
AgentEvent,
- InferenceStepResult,
- StepCompleted,
- StepProgress,
+ TurnFailed,
StepStarted,
- TextDelta,
+ TurnStarted,
+ StepProgress,
+ StepCompleted,
ToolCallDelta,
+ TurnCompleted,
+ InferenceStepResult,
ToolCallIssuedDelta,
ToolExecutionStepResult,
- TurnCompleted,
- TurnFailed,
- TurnStarted,
)
logger = getLogger(__name__)
@@ -242,7 +240,7 @@ def process_raw_stream(self, events: Iterable[Any]) -> Iterator[AgentEvent]:
response_id = getattr(event, "response_id", None)
if response_id is None and hasattr(event, "response"):
- response = getattr(event, "response")
+ response = event.response
response_id = getattr(response, "id", None)
if response_id is not None:
current_response_id = response_id
diff --git a/src/llama_stack_client/lib/agents/react/__init__.py b/src/ogx_client/lib/agents/react/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/agents/react/__init__.py
rename to src/ogx_client/lib/agents/react/__init__.py
diff --git a/src/llama_stack_client/lib/agents/react/agent.py b/src/ogx_client/lib/agents/react/agent.py
similarity index 96%
rename from src/llama_stack_client/lib/agents/react/agent.py
rename to src/ogx_client/lib/agents/react/agent.py
index cd14c6bf..2d0d6a63 100644
--- a/src/llama_stack_client/lib/agents/react/agent.py
+++ b/src/ogx_client/lib/agents/react/agent.py
@@ -3,16 +3,18 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+from __future__ import annotations
+
import logging
+from typing import Any, Dict, List, Tuple, Union, Callable, Optional
from collections.abc import Mapping
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-from ...._types import Headers
from ..agent import Agent, AgentUtils
-from ..client_tool import ClientTool
-from ..tool_parser import ToolParser
from .prompts import DEFAULT_REACT_AGENT_SYSTEM_PROMPT_TEMPLATE
+from ...._types import Headers
from .tool_parser import ReActToolParser
+from ..client_tool import ClientTool
+from ..tool_parser import ToolParser
logger = logging.getLogger(__name__)
diff --git a/src/llama_stack_client/lib/agents/react/prompts.py b/src/ogx_client/lib/agents/react/prompts.py
similarity index 100%
rename from src/llama_stack_client/lib/agents/react/prompts.py
rename to src/ogx_client/lib/agents/react/prompts.py
diff --git a/src/llama_stack_client/lib/agents/react/tool_parser.py b/src/ogx_client/lib/agents/react/tool_parser.py
similarity index 91%
rename from src/llama_stack_client/lib/agents/react/tool_parser.py
rename to src/ogx_client/lib/agents/react/tool_parser.py
index 9120f83d..be946231 100644
--- a/src/llama_stack_client/lib/agents/react/tool_parser.py
+++ b/src/ogx_client/lib/agents/react/tool_parser.py
@@ -6,12 +6,11 @@
import json
import uuid
-from typing import List, Optional, Union
-
-from ..types import CompletionMessage, ToolCall
+from typing import List, Union, Optional
from pydantic import BaseModel, ValidationError
+from ..types import ToolCall, CompletionMessage
from ..tool_parser import ToolParser
@@ -38,7 +37,7 @@ def get_tool_calls(self, output_message: CompletionMessage) -> List[ToolCall]:
try:
react_output = ReActOutput.model_validate_json(response_text)
except ValidationError as e:
- print(f"Error parsing action: {e}")
+ print(f"Error parsing action: {e}") # noqa: T201
return tool_calls
if react_output.answer:
diff --git a/src/llama_stack_client/lib/agents/tool_parser.py b/src/ogx_client/lib/agents/tool_parser.py
similarity index 96%
rename from src/llama_stack_client/lib/agents/tool_parser.py
rename to src/ogx_client/lib/agents/tool_parser.py
index 0e6a97ad..022a10aa 100644
--- a/src/llama_stack_client/lib/agents/tool_parser.py
+++ b/src/ogx_client/lib/agents/tool_parser.py
@@ -7,7 +7,7 @@
from abc import abstractmethod
from typing import List
-from .types import CompletionMessage, ToolCall
+from .types import ToolCall, CompletionMessage
class ToolParser:
diff --git a/src/llama_stack_client/lib/agents/turn_events.py b/src/ogx_client/lib/agents/turn_events.py
similarity index 99%
rename from src/llama_stack_client/lib/agents/turn_events.py
rename to src/ogx_client/lib/agents/turn_events.py
index cca11095..bfc3c84b 100644
--- a/src/llama_stack_client/lib/agents/turn_events.py
+++ b/src/ogx_client/lib/agents/turn_events.py
@@ -17,8 +17,8 @@
- Result: Complete output when a step finishes
"""
+from typing import Any, Dict, List, Union, Literal, Optional
from dataclasses import dataclass
-from typing import Union, List, Optional, Dict, Any, Literal
from .types import ToolCall
diff --git a/src/llama_stack_client/lib/agents/types.py b/src/ogx_client/lib/agents/types.py
similarity index 97%
rename from src/llama_stack_client/lib/agents/types.py
rename to src/ogx_client/lib/agents/types.py
index 8ec67485..b72eee98 100644
--- a/src/llama_stack_client/lib/agents/types.py
+++ b/src/ogx_client/lib/agents/types.py
@@ -8,8 +8,8 @@
from __future__ import annotations
-from dataclasses import dataclass, field
from typing import Any, Dict, List, Protocol, TypedDict
+from dataclasses import field, dataclass
@dataclass
diff --git a/src/llama_stack_client/lib/cli/__init__.py b/src/ogx_client/lib/cli/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/__init__.py
rename to src/ogx_client/lib/cli/__init__.py
diff --git a/src/llama_stack_client/lib/cli/common/__init__.py b/src/ogx_client/lib/cli/common/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/common/__init__.py
rename to src/ogx_client/lib/cli/common/__init__.py
diff --git a/src/llama_stack_client/lib/cli/common/utils.py b/src/ogx_client/lib/cli/common/utils.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/common/utils.py
rename to src/ogx_client/lib/cli/common/utils.py
index faf9ac26..fd5bd3db 100644
--- a/src/llama_stack_client/lib/cli/common/utils.py
+++ b/src/ogx_client/lib/cli/common/utils.py
@@ -5,9 +5,9 @@
# the root directory of this source tree.
from functools import wraps
-from rich.console import Console
from rich.panel import Panel
from rich.table import Table
+from rich.console import Console
def create_bar_chart(data, labels, title=""):
diff --git a/src/llama_stack_client/lib/cli/configure.py b/src/ogx_client/lib/cli/configure.py
similarity index 96%
rename from src/llama_stack_client/lib/cli/configure.py
rename to src/ogx_client/lib/cli/configure.py
index 59554580..fa1674b4 100644
--- a/src/llama_stack_client/lib/cli/configure.py
+++ b/src/ogx_client/lib/cli/configure.py
@@ -4,14 +4,15 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+from __future__ import annotations
+
import os
+from urllib.parse import urlparse
-import click
import yaml
+import click
from prompt_toolkit import prompt
from prompt_toolkit.validation import Validator
-from urllib.parse import urlparse
-
from llama_stack_client.lib.cli.constants import LLAMA_STACK_CLIENT_CONFIG_DIR, get_config_file_path
@@ -65,4 +66,4 @@ def configure(endpoint: str | None, api_key: str | None):
)
)
- print(f"Done! You can now use the Llama Stack Client CLI with endpoint {final_endpoint}")
+ print(f"Done! You can now use the Llama Stack Client CLI with endpoint {final_endpoint}") # noqa: T201
diff --git a/src/llama_stack_client/lib/cli/constants.py b/src/ogx_client/lib/cli/constants.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/constants.py
rename to src/ogx_client/lib/cli/constants.py
diff --git a/src/llama_stack_client/lib/cli/datasets/__init__.py b/src/ogx_client/lib/cli/datasets/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/datasets/__init__.py
rename to src/ogx_client/lib/cli/datasets/__init__.py
diff --git a/src/llama_stack_client/lib/cli/datasets/datasets.py b/src/ogx_client/lib/cli/datasets/datasets.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/datasets/datasets.py
rename to src/ogx_client/lib/cli/datasets/datasets.py
diff --git a/src/llama_stack_client/lib/cli/datasets/list.py b/src/ogx_client/lib/cli/datasets/list.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/datasets/list.py
rename to src/ogx_client/lib/cli/datasets/list.py
index 61d625c9..b7e625e8 100644
--- a/src/llama_stack_client/lib/cli/datasets/list.py
+++ b/src/ogx_client/lib/cli/datasets/list.py
@@ -5,8 +5,8 @@
# the root directory of this source tree.
import click
-from rich.console import Console
from rich.table import Table
+from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/datasets/register.py b/src/ogx_client/lib/cli/datasets/register.py
similarity index 98%
rename from src/llama_stack_client/lib/cli/datasets/register.py
rename to src/ogx_client/lib/cli/datasets/register.py
index d990e30c..d76457c6 100644
--- a/src/llama_stack_client/lib/cli/datasets/register.py
+++ b/src/ogx_client/lib/cli/datasets/register.py
@@ -3,14 +3,14 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import base64
+import os
import json
+import base64
import mimetypes
-import os
-from typing import Optional, Literal
+from typing import Literal, Optional
-import click
import yaml
+import click
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/datasets/unregister.py b/src/ogx_client/lib/cli/datasets/unregister.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/datasets/unregister.py
rename to src/ogx_client/lib/cli/datasets/unregister.py
diff --git a/src/llama_stack_client/lib/cli/eval/__init__.py b/src/ogx_client/lib/cli/eval/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/eval/__init__.py
rename to src/ogx_client/lib/cli/eval/__init__.py
diff --git a/src/llama_stack_client/lib/cli/eval/eval.py b/src/ogx_client/lib/cli/eval/eval.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/eval/eval.py
rename to src/ogx_client/lib/cli/eval/eval.py
index dd162809..0ce29169 100644
--- a/src/llama_stack_client/lib/cli/eval/eval.py
+++ b/src/ogx_client/lib/cli/eval/eval.py
@@ -7,8 +7,8 @@
import click
-from .run_benchmark import run_benchmark
from .run_scoring import run_scoring
+from .run_benchmark import run_benchmark
@click.group()
diff --git a/src/llama_stack_client/lib/cli/eval/run_benchmark.py b/src/ogx_client/lib/cli/eval/run_benchmark.py
similarity index 99%
rename from src/llama_stack_client/lib/cli/eval/run_benchmark.py
rename to src/ogx_client/lib/cli/eval/run_benchmark.py
index e088137e..ee3aae4f 100644
--- a/src/llama_stack_client/lib/cli/eval/run_benchmark.py
+++ b/src/ogx_client/lib/cli/eval/run_benchmark.py
@@ -4,22 +4,24 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import json
+from __future__ import annotations
+
import os
+import json
from typing import Optional
import click
from rich import print as rprint
from tqdm.rich import tqdm
-from ..common.utils import create_bar_chart
from .utils import (
- aggregate_accuracy,
+ aggregate_median,
aggregate_average,
+ aggregate_accuracy,
aggregate_weighted_average,
aggregate_categorical_count,
- aggregate_median,
)
+from ..common.utils import create_bar_chart
@click.command("run-benchmark")
diff --git a/src/llama_stack_client/lib/cli/eval/run_scoring.py b/src/ogx_client/lib/cli/eval/run_scoring.py
similarity index 98%
rename from src/llama_stack_client/lib/cli/eval/run_scoring.py
rename to src/ogx_client/lib/cli/eval/run_scoring.py
index a9b29bbb..e51d406c 100644
--- a/src/llama_stack_client/lib/cli/eval/run_scoring.py
+++ b/src/ogx_client/lib/cli/eval/run_scoring.py
@@ -4,8 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import json
+from __future__ import annotations
+
import os
+import json
from typing import Optional
import click
@@ -114,6 +116,6 @@ def run_scoring(
output_file = os.path.join(output_dir, f"{dataset_path or dataset_id}_score_results.csv")
df = pandas.DataFrame(output_res)
df.to_csv(output_file, index=False)
- print(df)
+ print(df) # noqa: T201
rprint(f"[green]âś“[/green] Results saved to: [blue]{output_file}[/blue]!\n")
diff --git a/src/llama_stack_client/lib/cli/eval/utils.py b/src/ogx_client/lib/cli/eval/utils.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/eval/utils.py
rename to src/ogx_client/lib/cli/eval/utils.py
diff --git a/src/llama_stack_client/lib/cli/eval_tasks/__init__.py b/src/ogx_client/lib/cli/eval_tasks/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/eval_tasks/__init__.py
rename to src/ogx_client/lib/cli/eval_tasks/__init__.py
diff --git a/src/llama_stack_client/lib/cli/eval_tasks/eval_tasks.py b/src/ogx_client/lib/cli/eval_tasks/eval_tasks.py
similarity index 98%
rename from src/llama_stack_client/lib/cli/eval_tasks/eval_tasks.py
rename to src/ogx_client/lib/cli/eval_tasks/eval_tasks.py
index 183498fb..8eca5925 100644
--- a/src/llama_stack_client/lib/cli/eval_tasks/eval_tasks.py
+++ b/src/ogx_client/lib/cli/eval_tasks/eval_tasks.py
@@ -5,14 +5,16 @@
# the root directory of this source tree.
+from __future__ import annotations
+
import json
from typing import Optional
-import click
import yaml
+import click
-from ..common.utils import handle_client_errors
from .list import list_eval_tasks
+from ..common.utils import handle_client_errors
@click.group()
diff --git a/src/llama_stack_client/lib/cli/eval_tasks/list.py b/src/ogx_client/lib/cli/eval_tasks/list.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/eval_tasks/list.py
rename to src/ogx_client/lib/cli/eval_tasks/list.py
index d7eb9c53..de4a5d14 100644
--- a/src/llama_stack_client/lib/cli/eval_tasks/list.py
+++ b/src/ogx_client/lib/cli/eval_tasks/list.py
@@ -5,8 +5,8 @@
# the root directory of this source tree.
import click
-from rich.console import Console
from rich.table import Table
+from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/inference/__init__.py b/src/ogx_client/lib/cli/inference/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/inference/__init__.py
rename to src/ogx_client/lib/cli/inference/__init__.py
diff --git a/src/llama_stack_client/lib/cli/inference/inference.py b/src/ogx_client/lib/cli/inference/inference.py
similarity index 98%
rename from src/llama_stack_client/lib/cli/inference/inference.py
rename to src/ogx_client/lib/cli/inference/inference.py
index 0cc16396..721b2336 100644
--- a/src/llama_stack_client/lib/cli/inference/inference.py
+++ b/src/ogx_client/lib/cli/inference/inference.py
@@ -4,14 +4,14 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from typing import Optional, List, Dict
import traceback
+from typing import Dict, List, Optional
import click
from rich.console import Console
-from ...inference.event_logger import EventLogger
from ..common.utils import handle_client_errors
+from ...inference.event_logger import EventLogger
@click.group()
diff --git a/src/llama_stack_client/lib/cli/inspect/__init__.py b/src/ogx_client/lib/cli/inspect/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/inspect/__init__.py
rename to src/ogx_client/lib/cli/inspect/__init__.py
diff --git a/src/llama_stack_client/lib/cli/inspect/inspect.py b/src/ogx_client/lib/cli/inspect/inspect.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/inspect/inspect.py
rename to src/ogx_client/lib/cli/inspect/inspect.py
diff --git a/src/llama_stack_client/lib/cli/inspect/version.py b/src/ogx_client/lib/cli/inspect/version.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/inspect/version.py
rename to src/ogx_client/lib/cli/inspect/version.py
diff --git a/src/llama_stack_client/lib/cli/llama_stack_client.py b/src/ogx_client/lib/cli/llama_stack_client.py
similarity index 94%
rename from src/llama_stack_client/lib/cli/llama_stack_client.py
rename to src/ogx_client/lib/cli/llama_stack_client.py
index 98070d17..2c7d53c2 100644
--- a/src/llama_stack_client/lib/cli/llama_stack_client.py
+++ b/src/ogx_client/lib/cli/llama_stack_client.py
@@ -4,31 +4,33 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+from __future__ import annotations
+
import os
from importlib.metadata import version
-import click
import yaml
+import click
-from llama_stack_client import LlamaStackClient
+from ogx_client import OgxClient
+from .eval import eval
+from .models import models
+from .inspect import inspect
+from .shields import shields
+from .datasets import datasets
from .configure import configure
from .constants import get_config_file_path
-from .datasets import datasets
-from .eval import eval
-from .eval_tasks import eval_tasks
from .inference import inference
-from .inspect import inspect
-from .models import models
from .providers import providers
-from .scoring_functions import scoring_functions
-from .shields import shields
+from .eval_tasks import eval_tasks
from .vector_stores import vector_stores
+from .scoring_functions import scoring_functions
@click.group()
@click.help_option("-h", "--help")
-@click.version_option(version=version("llama-stack-client"), prog_name="llama-stack-client")
+@click.version_option(version=version("ogx-client"), prog_name="llama-stack-client")
@click.option("--endpoint", type=str, help="Llama Stack distribution endpoint", default="")
@click.option("--api-key", type=str, help="Llama Stack distribution API key", default="")
@click.option("--config", type=str, help="Path to config file", default=None)
@@ -65,7 +67,7 @@ def llama_stack_client(ctx, endpoint: str, api_key: str, config: str | None):
"Authorization": f"Bearer {api_key}",
}
- client = LlamaStackClient(
+ client = OgxClient(
base_url=endpoint,
provider_data={
"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY", ""),
diff --git a/src/llama_stack_client/lib/cli/models/__init__.py b/src/ogx_client/lib/cli/models/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/models/__init__.py
rename to src/ogx_client/lib/cli/models/__init__.py
diff --git a/src/llama_stack_client/lib/cli/models/models.py b/src/ogx_client/lib/cli/models/models.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/models/models.py
rename to src/ogx_client/lib/cli/models/models.py
index 24662a55..f18d2530 100644
--- a/src/llama_stack_client/lib/cli/models/models.py
+++ b/src/ogx_client/lib/cli/models/models.py
@@ -8,8 +8,8 @@
from typing import Optional
import click
-from rich.console import Console
from rich.table import Table
+from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/providers/__init__.py b/src/ogx_client/lib/cli/providers/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/providers/__init__.py
rename to src/ogx_client/lib/cli/providers/__init__.py
diff --git a/src/llama_stack_client/lib/cli/providers/inspect.py b/src/ogx_client/lib/cli/providers/inspect.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/providers/inspect.py
rename to src/ogx_client/lib/cli/providers/inspect.py
index b70b556f..69d1d59e 100644
--- a/src/llama_stack_client/lib/cli/providers/inspect.py
+++ b/src/ogx_client/lib/cli/providers/inspect.py
@@ -4,8 +4,8 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import click
import yaml
+import click
from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/providers/list.py b/src/ogx_client/lib/cli/providers/list.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/providers/list.py
rename to src/ogx_client/lib/cli/providers/list.py
index 708ed9c8..b3c90a64 100644
--- a/src/llama_stack_client/lib/cli/providers/list.py
+++ b/src/ogx_client/lib/cli/providers/list.py
@@ -5,8 +5,8 @@
# the root directory of this source tree.
import click
-from rich.console import Console
from rich.table import Table
+from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/providers/providers.py b/src/ogx_client/lib/cli/providers/providers.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/providers/providers.py
rename to src/ogx_client/lib/cli/providers/providers.py
diff --git a/src/llama_stack_client/lib/cli/scoring_functions/__init__.py b/src/ogx_client/lib/cli/scoring_functions/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/scoring_functions/__init__.py
rename to src/ogx_client/lib/cli/scoring_functions/__init__.py
diff --git a/src/llama_stack_client/lib/cli/scoring_functions/list.py b/src/ogx_client/lib/cli/scoring_functions/list.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/scoring_functions/list.py
rename to src/ogx_client/lib/cli/scoring_functions/list.py
index b4bb3b70..ad69a82e 100644
--- a/src/llama_stack_client/lib/cli/scoring_functions/list.py
+++ b/src/ogx_client/lib/cli/scoring_functions/list.py
@@ -5,8 +5,8 @@
# the root directory of this source tree.
import click
-from rich.console import Console
from rich.table import Table
+from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/scoring_functions/scoring_functions.py b/src/ogx_client/lib/cli/scoring_functions/scoring_functions.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/scoring_functions/scoring_functions.py
rename to src/ogx_client/lib/cli/scoring_functions/scoring_functions.py
index ba7b58eb..aea7f46a 100644
--- a/src/llama_stack_client/lib/cli/scoring_functions/scoring_functions.py
+++ b/src/ogx_client/lib/cli/scoring_functions/scoring_functions.py
@@ -7,8 +7,8 @@
import json
from typing import Optional
-import click
import yaml
+import click
from .list import list_scoring_functions
diff --git a/src/llama_stack_client/lib/cli/shields/__init__.py b/src/ogx_client/lib/cli/shields/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/shields/__init__.py
rename to src/ogx_client/lib/cli/shields/__init__.py
diff --git a/src/llama_stack_client/lib/cli/shields/shields.py b/src/ogx_client/lib/cli/shields/shields.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/shields/shields.py
rename to src/ogx_client/lib/cli/shields/shields.py
index 5a3177f9..49ee919d 100644
--- a/src/llama_stack_client/lib/cli/shields/shields.py
+++ b/src/ogx_client/lib/cli/shields/shields.py
@@ -6,10 +6,10 @@
from typing import Optional
-import click
import yaml
-from rich.console import Console
+import click
from rich.table import Table
+from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/cli/vector_stores/__init__.py b/src/ogx_client/lib/cli/vector_stores/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/vector_stores/__init__.py
rename to src/ogx_client/lib/cli/vector_stores/__init__.py
diff --git a/src/llama_stack_client/lib/cli/vector_stores/vector_stores.py b/src/ogx_client/lib/cli/vector_stores/vector_stores.py
similarity index 100%
rename from src/llama_stack_client/lib/cli/vector_stores/vector_stores.py
rename to src/ogx_client/lib/cli/vector_stores/vector_stores.py
index 128f3f2e..0c0408c7 100644
--- a/src/llama_stack_client/lib/cli/vector_stores/vector_stores.py
+++ b/src/ogx_client/lib/cli/vector_stores/vector_stores.py
@@ -6,10 +6,10 @@
from typing import Optional
-import click
import yaml
-from rich.console import Console
+import click
from rich.table import Table
+from rich.console import Console
from ..common.utils import handle_client_errors
diff --git a/src/llama_stack_client/lib/inference/__init__.py b/src/ogx_client/lib/inference/__init__.py
similarity index 100%
rename from src/llama_stack_client/lib/inference/__init__.py
rename to src/ogx_client/lib/inference/__init__.py
diff --git a/src/llama_stack_client/lib/inference/event_logger.py b/src/ogx_client/lib/inference/event_logger.py
similarity index 97%
rename from src/llama_stack_client/lib/inference/event_logger.py
rename to src/ogx_client/lib/inference/event_logger.py
index cbf5f680..b9f39045 100644
--- a/src/llama_stack_client/lib/inference/event_logger.py
+++ b/src/ogx_client/lib/inference/event_logger.py
@@ -4,8 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Generator
+
from termcolor import cprint
-from llama_stack_client.types import ChatCompletionChunk
+
+from ogx_client.types import ChatCompletionChunk
class InferenceStreamPrintableEvent:
diff --git a/src/llama_stack_client/lib/inference/utils.py b/src/ogx_client/lib/inference/utils.py
similarity index 100%
rename from src/llama_stack_client/lib/inference/utils.py
rename to src/ogx_client/lib/inference/utils.py
index 24ed7cd1..060ce8b3 100644
--- a/src/llama_stack_client/lib/inference/utils.py
+++ b/src/ogx_client/lib/inference/utils.py
@@ -4,8 +4,8 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import pathlib
import base64
+import pathlib
class MessageAttachment:
diff --git a/src/llama_stack_client/lib/inline/inline.py b/src/ogx_client/lib/inline/inline.py
similarity index 100%
rename from src/llama_stack_client/lib/inline/inline.py
rename to src/ogx_client/lib/inline/inline.py
diff --git a/src/llama_stack_client/lib/stream_printer.py b/src/ogx_client/lib/stream_printer.py
similarity index 100%
rename from src/llama_stack_client/lib/stream_printer.py
rename to src/ogx_client/lib/stream_printer.py
diff --git a/src/llama_stack_client/lib/tools/mcp_oauth.py b/src/ogx_client/lib/tools/mcp_oauth.py
similarity index 99%
rename from src/llama_stack_client/lib/tools/mcp_oauth.py
rename to src/ogx_client/lib/tools/mcp_oauth.py
index 6125b873..bbc73e6d 100644
--- a/src/llama_stack_client/lib/tools/mcp_oauth.py
+++ b/src/ogx_client/lib/tools/mcp_oauth.py
@@ -4,17 +4,19 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import asyncio
+from __future__ import annotations
+
+import os
+import time
+import uuid
import base64
+import socket
+import asyncio
import hashlib
import logging
-import os
-import socket
import threading
-import time
import urllib.parse
-import uuid
-from http.server import BaseHTTPRequestHandler, HTTPServer
+from http.server import HTTPServer, BaseHTTPRequestHandler
import fire
import requests
diff --git a/src/llama_stack_client/pagination.py b/src/ogx_client/pagination.py
similarity index 58%
rename from src/llama_stack_client/pagination.py
rename to src/ogx_client/pagination.py
index 7ee7118d..ae8fa048 100644
--- a/src/llama_stack_client/pagination.py
+++ b/src/ogx_client/pagination.py
@@ -11,57 +11,11 @@
from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage
-__all__ = ["SyncDatasetsIterrows", "AsyncDatasetsIterrows", "SyncOpenAICursorPage", "AsyncOpenAICursorPage"]
+__all__ = ["SyncOpenAICursorPage", "AsyncOpenAICursorPage"]
_T = TypeVar("_T")
-class SyncDatasetsIterrows(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
- data: List[_T]
- next_index: Optional[int] = None
-
- @override
- def _get_page_items(self) -> List[_T]:
- data = self.data
- if not data:
- return []
- return data
-
- @override
- def next_page_info(self) -> Optional[PageInfo]:
- next_index = self.next_index
- if next_index is None:
- return None # type: ignore[unreachable]
-
- length = len(self._get_page_items())
- current_count = next_index + length
-
- return PageInfo(params={"start_index": current_count})
-
-
-class AsyncDatasetsIterrows(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
- data: List[_T]
- next_index: Optional[int] = None
-
- @override
- def _get_page_items(self) -> List[_T]:
- data = self.data
- if not data:
- return []
- return data
-
- @override
- def next_page_info(self) -> Optional[PageInfo]:
- next_index = self.next_index
- if next_index is None:
- return None # type: ignore[unreachable]
-
- length = len(self._get_page_items())
- current_count = next_index + length
-
- return PageInfo(params={"start_index": current_count})
-
-
class SyncOpenAICursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
data: List[_T]
has_more: Optional[bool] = None
diff --git a/src/llama_stack_client/py.typed b/src/ogx_client/py.typed
similarity index 100%
rename from src/llama_stack_client/py.typed
rename to src/ogx_client/py.typed
diff --git a/src/llama_stack_client/resources/__init__.py b/src/ogx_client/resources/__init__.py
similarity index 85%
rename from src/llama_stack_client/resources/__init__.py
rename to src/ogx_client/resources/__init__.py
index 882b3c9d..533a4d37 100644
--- a/src/llama_stack_client/resources/__init__.py
+++ b/src/ogx_client/resources/__init__.py
@@ -6,14 +6,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from .beta import (
- BetaResource,
- AsyncBetaResource,
- BetaResourceWithRawResponse,
- AsyncBetaResourceWithRawResponse,
- BetaResourceWithStreamingResponse,
- AsyncBetaResourceWithStreamingResponse,
-)
from .chat import (
ChatResource,
AsyncChatResource,
@@ -86,14 +78,6 @@
PromptsResourceWithStreamingResponse,
AsyncPromptsResourceWithStreamingResponse,
)
-from .scoring import (
- ScoringResource,
- AsyncScoringResource,
- ScoringResourceWithRawResponse,
- AsyncScoringResourceWithRawResponse,
- ScoringResourceWithStreamingResponse,
- AsyncScoringResourceWithStreamingResponse,
-)
from .shields import (
ShieldsResource,
AsyncShieldsResource,
@@ -166,14 +150,6 @@
VectorStoresResourceWithStreamingResponse,
AsyncVectorStoresResourceWithStreamingResponse,
)
-from .scoring_functions import (
- ScoringFunctionsResource,
- AsyncScoringFunctionsResource,
- ScoringFunctionsResourceWithRawResponse,
- AsyncScoringFunctionsResourceWithRawResponse,
- ScoringFunctionsResourceWithStreamingResponse,
- AsyncScoringFunctionsResourceWithStreamingResponse,
-)
__all__ = [
"ResponsesResource",
@@ -266,18 +242,6 @@
"AsyncShieldsResourceWithRawResponse",
"ShieldsResourceWithStreamingResponse",
"AsyncShieldsResourceWithStreamingResponse",
- "ScoringResource",
- "AsyncScoringResource",
- "ScoringResourceWithRawResponse",
- "AsyncScoringResourceWithRawResponse",
- "ScoringResourceWithStreamingResponse",
- "AsyncScoringResourceWithStreamingResponse",
- "ScoringFunctionsResource",
- "AsyncScoringFunctionsResource",
- "ScoringFunctionsResourceWithRawResponse",
- "AsyncScoringFunctionsResourceWithRawResponse",
- "ScoringFunctionsResourceWithStreamingResponse",
- "AsyncScoringFunctionsResourceWithStreamingResponse",
"FilesResource",
"AsyncFilesResource",
"FilesResourceWithRawResponse",
@@ -296,10 +260,4 @@
"AsyncAlphaResourceWithRawResponse",
"AlphaResourceWithStreamingResponse",
"AsyncAlphaResourceWithStreamingResponse",
- "BetaResource",
- "AsyncBetaResource",
- "BetaResourceWithRawResponse",
- "AsyncBetaResourceWithRawResponse",
- "BetaResourceWithStreamingResponse",
- "AsyncBetaResourceWithStreamingResponse",
]
diff --git a/src/llama_stack_client/resources/alpha/__init__.py b/src/ogx_client/resources/alpha/__init__.py
similarity index 64%
rename from src/llama_stack_client/resources/alpha/__init__.py
rename to src/ogx_client/resources/alpha/__init__.py
index 681e5bb0..673e6393 100644
--- a/src/llama_stack_client/resources/alpha/__init__.py
+++ b/src/ogx_client/resources/alpha/__init__.py
@@ -6,14 +6,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from .eval import (
- EvalResource,
- AsyncEvalResource,
- EvalResourceWithRawResponse,
- AsyncEvalResourceWithRawResponse,
- EvalResourceWithStreamingResponse,
- AsyncEvalResourceWithStreamingResponse,
-)
from .admin import (
AdminResource,
AsyncAdminResource,
@@ -38,28 +30,8 @@
InferenceResourceWithStreamingResponse,
AsyncInferenceResourceWithStreamingResponse,
)
-from .benchmarks import (
- BenchmarksResource,
- AsyncBenchmarksResource,
- BenchmarksResourceWithRawResponse,
- AsyncBenchmarksResourceWithRawResponse,
- BenchmarksResourceWithStreamingResponse,
- AsyncBenchmarksResourceWithStreamingResponse,
-)
__all__ = [
- "BenchmarksResource",
- "AsyncBenchmarksResource",
- "BenchmarksResourceWithRawResponse",
- "AsyncBenchmarksResourceWithRawResponse",
- "BenchmarksResourceWithStreamingResponse",
- "AsyncBenchmarksResourceWithStreamingResponse",
- "EvalResource",
- "AsyncEvalResource",
- "EvalResourceWithRawResponse",
- "AsyncEvalResourceWithRawResponse",
- "EvalResourceWithStreamingResponse",
- "AsyncEvalResourceWithStreamingResponse",
"AdminResource",
"AsyncAdminResource",
"AdminResourceWithRawResponse",
diff --git a/src/llama_stack_client/resources/alpha/admin.py b/src/ogx_client/resources/alpha/admin.py
similarity index 96%
rename from src/llama_stack_client/resources/alpha/admin.py
rename to src/ogx_client/resources/alpha/admin.py
index 766181c6..02e94944 100644
--- a/src/llama_stack_client/resources/alpha/admin.py
+++ b/src/ogx_client/resources/alpha/admin.py
@@ -36,13 +36,15 @@
class AdminResource(SyncAPIResource):
+ """Administrative APIs for inspecting providers, routes, health, and version."""
+
@cached_property
def with_raw_response(self) -> AdminResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AdminResourceWithRawResponse(self)
@@ -51,7 +53,7 @@ def with_streaming_response(self) -> AdminResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AdminResourceWithStreamingResponse(self)
@@ -194,13 +196,15 @@ def version(
class AsyncAdminResource(AsyncAPIResource):
+ """Administrative APIs for inspecting providers, routes, health, and version."""
+
@cached_property
def with_raw_response(self) -> AsyncAdminResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncAdminResourceWithRawResponse(self)
@@ -209,7 +213,7 @@ def with_streaming_response(self) -> AsyncAdminResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncAdminResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/alpha/alpha.py b/src/ogx_client/resources/alpha/alpha.py
similarity index 62%
rename from src/llama_stack_client/resources/alpha/alpha.py
rename to src/ogx_client/resources/alpha/alpha.py
index 3c471fc4..434e3a13 100644
--- a/src/llama_stack_client/resources/alpha/alpha.py
+++ b/src/ogx_client/resources/alpha/alpha.py
@@ -17,14 +17,6 @@
AsyncAdminResourceWithStreamingResponse,
)
from ..._compat import cached_property
-from .eval.eval import (
- EvalResource,
- AsyncEvalResource,
- EvalResourceWithRawResponse,
- AsyncEvalResourceWithRawResponse,
- EvalResourceWithStreamingResponse,
- AsyncEvalResourceWithStreamingResponse,
-)
from .inference import (
InferenceResource,
AsyncInferenceResource,
@@ -33,39 +25,20 @@
InferenceResourceWithStreamingResponse,
AsyncInferenceResourceWithStreamingResponse,
)
-from .benchmarks import (
- BenchmarksResource,
- AsyncBenchmarksResource,
- BenchmarksResourceWithRawResponse,
- AsyncBenchmarksResourceWithRawResponse,
- BenchmarksResourceWithStreamingResponse,
- AsyncBenchmarksResourceWithStreamingResponse,
-)
from ..._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["AlphaResource", "AsyncAlphaResource"]
class AlphaResource(SyncAPIResource):
- @cached_property
- def benchmarks(self) -> BenchmarksResource:
- return BenchmarksResource(self._client)
-
- @cached_property
- def eval(self) -> EvalResource:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return EvalResource(self._client)
-
@cached_property
def admin(self) -> AdminResource:
+ """Administrative APIs for inspecting providers, routes, health, and version."""
return AdminResource(self._client)
@cached_property
def inference(self) -> InferenceResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -80,7 +53,7 @@ def with_raw_response(self) -> AlphaResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AlphaResourceWithRawResponse(self)
@@ -89,31 +62,20 @@ def with_streaming_response(self) -> AlphaResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AlphaResourceWithStreamingResponse(self)
class AsyncAlphaResource(AsyncAPIResource):
- @cached_property
- def benchmarks(self) -> AsyncBenchmarksResource:
- return AsyncBenchmarksResource(self._client)
-
- @cached_property
- def eval(self) -> AsyncEvalResource:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return AsyncEvalResource(self._client)
-
@cached_property
def admin(self) -> AsyncAdminResource:
+ """Administrative APIs for inspecting providers, routes, health, and version."""
return AsyncAdminResource(self._client)
@cached_property
def inference(self) -> AsyncInferenceResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -128,7 +90,7 @@ def with_raw_response(self) -> AsyncAlphaResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncAlphaResourceWithRawResponse(self)
@@ -137,7 +99,7 @@ def with_streaming_response(self) -> AsyncAlphaResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncAlphaResourceWithStreamingResponse(self)
@@ -146,25 +108,14 @@ class AlphaResourceWithRawResponse:
def __init__(self, alpha: AlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def benchmarks(self) -> BenchmarksResourceWithRawResponse:
- return BenchmarksResourceWithRawResponse(self._alpha.benchmarks)
-
- @cached_property
- def eval(self) -> EvalResourceWithRawResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return EvalResourceWithRawResponse(self._alpha.eval)
-
@cached_property
def admin(self) -> AdminResourceWithRawResponse:
+ """Administrative APIs for inspecting providers, routes, health, and version."""
return AdminResourceWithRawResponse(self._alpha.admin)
@cached_property
def inference(self) -> InferenceResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -178,25 +129,14 @@ class AsyncAlphaResourceWithRawResponse:
def __init__(self, alpha: AsyncAlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def benchmarks(self) -> AsyncBenchmarksResourceWithRawResponse:
- return AsyncBenchmarksResourceWithRawResponse(self._alpha.benchmarks)
-
- @cached_property
- def eval(self) -> AsyncEvalResourceWithRawResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return AsyncEvalResourceWithRawResponse(self._alpha.eval)
-
@cached_property
def admin(self) -> AsyncAdminResourceWithRawResponse:
+ """Administrative APIs for inspecting providers, routes, health, and version."""
return AsyncAdminResourceWithRawResponse(self._alpha.admin)
@cached_property
def inference(self) -> AsyncInferenceResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -210,25 +150,14 @@ class AlphaResourceWithStreamingResponse:
def __init__(self, alpha: AlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def benchmarks(self) -> BenchmarksResourceWithStreamingResponse:
- return BenchmarksResourceWithStreamingResponse(self._alpha.benchmarks)
-
- @cached_property
- def eval(self) -> EvalResourceWithStreamingResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return EvalResourceWithStreamingResponse(self._alpha.eval)
-
@cached_property
def admin(self) -> AdminResourceWithStreamingResponse:
+ """Administrative APIs for inspecting providers, routes, health, and version."""
return AdminResourceWithStreamingResponse(self._alpha.admin)
@cached_property
def inference(self) -> InferenceResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -242,25 +171,14 @@ class AsyncAlphaResourceWithStreamingResponse:
def __init__(self, alpha: AsyncAlphaResource) -> None:
self._alpha = alpha
- @cached_property
- def benchmarks(self) -> AsyncBenchmarksResourceWithStreamingResponse:
- return AsyncBenchmarksResourceWithStreamingResponse(self._alpha.benchmarks)
-
- @cached_property
- def eval(self) -> AsyncEvalResourceWithStreamingResponse:
- """
- Llama Stack Evaluation API for running evaluations on model and agent candidates.
- """
- return AsyncEvalResourceWithStreamingResponse(self._alpha.eval)
-
@cached_property
def admin(self) -> AsyncAdminResourceWithStreamingResponse:
+ """Administrative APIs for inspecting providers, routes, health, and version."""
return AsyncAdminResourceWithStreamingResponse(self._alpha.admin)
@cached_property
def inference(self) -> AsyncInferenceResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
diff --git a/src/llama_stack_client/resources/alpha/inference.py b/src/ogx_client/resources/alpha/inference.py
similarity index 92%
rename from src/llama_stack_client/resources/alpha/inference.py
rename to src/ogx_client/resources/alpha/inference.py
index 874e6c1e..229a8372 100644
--- a/src/llama_stack_client/resources/alpha/inference.py
+++ b/src/ogx_client/resources/alpha/inference.py
@@ -31,8 +31,7 @@
class InferenceResource(SyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -46,7 +45,7 @@ def with_raw_response(self) -> InferenceResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return InferenceResourceWithRawResponse(self)
@@ -55,7 +54,7 @@ def with_streaming_response(self) -> InferenceResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return InferenceResourceWithStreamingResponse(self)
@@ -118,8 +117,7 @@ def rerank(
class AsyncInferenceResource(AsyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -133,7 +131,7 @@ def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncInferenceResourceWithRawResponse(self)
@@ -142,7 +140,7 @@ def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncInferenceResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/batches.py b/src/ogx_client/resources/batches.py
similarity index 97%
rename from src/llama_stack_client/resources/batches.py
rename to src/ogx_client/resources/batches.py
index fd6e6040..99b370dc 100644
--- a/src/llama_stack_client/resources/batches.py
+++ b/src/ogx_client/resources/batches.py
@@ -50,7 +50,7 @@ def with_raw_response(self) -> BatchesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return BatchesResourceWithRawResponse(self)
@@ -59,7 +59,7 @@ def with_streaming_response(self) -> BatchesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return BatchesResourceWithStreamingResponse(self)
@@ -252,7 +252,7 @@ def with_raw_response(self) -> AsyncBatchesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncBatchesResourceWithRawResponse(self)
@@ -261,7 +261,7 @@ def with_streaming_response(self) -> AsyncBatchesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncBatchesResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/chat/__init__.py b/src/ogx_client/resources/chat/__init__.py
similarity index 100%
rename from src/llama_stack_client/resources/chat/__init__.py
rename to src/ogx_client/resources/chat/__init__.py
diff --git a/src/llama_stack_client/resources/chat/chat.py b/src/ogx_client/resources/chat/chat.py
similarity index 83%
rename from src/llama_stack_client/resources/chat/chat.py
rename to src/ogx_client/resources/chat/chat.py
index ce3247ef..69a41e4a 100644
--- a/src/llama_stack_client/resources/chat/chat.py
+++ b/src/ogx_client/resources/chat/chat.py
@@ -25,8 +25,7 @@
class ChatResource(SyncAPIResource):
@cached_property
def completions(self) -> CompletionsResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -41,7 +40,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ChatResourceWithRawResponse(self)
@@ -50,7 +49,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ChatResourceWithStreamingResponse(self)
@@ -58,8 +57,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse:
class AsyncChatResource(AsyncAPIResource):
@cached_property
def completions(self) -> AsyncCompletionsResource:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -74,7 +72,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatResourceWithRawResponse(self)
@@ -83,7 +81,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncChatResourceWithStreamingResponse(self)
@@ -94,8 +92,7 @@ def __init__(self, chat: ChatResource) -> None:
@cached_property
def completions(self) -> CompletionsResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -111,8 +108,7 @@ def __init__(self, chat: AsyncChatResource) -> None:
@cached_property
def completions(self) -> AsyncCompletionsResourceWithRawResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -128,8 +124,7 @@ def __init__(self, chat: ChatResource) -> None:
@cached_property
def completions(self) -> CompletionsResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -145,8 +140,7 @@ def __init__(self, chat: AsyncChatResource) -> None:
@cached_property
def completions(self) -> AsyncCompletionsResourceWithStreamingResponse:
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
diff --git a/src/llama_stack_client/resources/chat/completions.py b/src/ogx_client/resources/chat/completions.py
similarity index 98%
rename from src/llama_stack_client/resources/chat/completions.py
rename to src/ogx_client/resources/chat/completions.py
index 182c8517..7a093472 100644
--- a/src/llama_stack_client/resources/chat/completions.py
+++ b/src/ogx_client/resources/chat/completions.py
@@ -35,8 +35,7 @@
class CompletionsResource(SyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -50,7 +49,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return CompletionsResourceWithRawResponse(self)
@@ -59,7 +58,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return CompletionsResourceWithStreamingResponse(self)
@@ -557,8 +556,7 @@ def list(
class AsyncCompletionsResource(AsyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -572,7 +570,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncCompletionsResourceWithRawResponse(self)
@@ -581,7 +579,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncCompletionsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/completions.py b/src/ogx_client/resources/completions.py
similarity index 97%
rename from src/llama_stack_client/resources/completions.py
rename to src/ogx_client/resources/completions.py
index 8c2f1990..ae8189f3 100644
--- a/src/llama_stack_client/resources/completions.py
+++ b/src/ogx_client/resources/completions.py
@@ -32,8 +32,7 @@
class CompletionsResource(SyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -47,7 +46,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return CompletionsResourceWithRawResponse(self)
@@ -56,7 +55,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return CompletionsResourceWithStreamingResponse(self)
@@ -366,8 +365,7 @@ def create(
class AsyncCompletionsResource(AsyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -381,7 +379,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncCompletionsResourceWithRawResponse(self)
@@ -390,7 +388,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncCompletionsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/conversations/__init__.py b/src/ogx_client/resources/conversations/__init__.py
similarity index 100%
rename from src/llama_stack_client/resources/conversations/__init__.py
rename to src/ogx_client/resources/conversations/__init__.py
diff --git a/src/llama_stack_client/resources/conversations/conversations.py b/src/ogx_client/resources/conversations/conversations.py
similarity index 97%
rename from src/llama_stack_client/resources/conversations/conversations.py
rename to src/ogx_client/resources/conversations/conversations.py
index d363e963..67e5fda3 100644
--- a/src/llama_stack_client/resources/conversations/conversations.py
+++ b/src/ogx_client/resources/conversations/conversations.py
@@ -52,7 +52,7 @@ def with_raw_response(self) -> ConversationsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ConversationsResourceWithRawResponse(self)
@@ -61,7 +61,7 @@ def with_streaming_response(self) -> ConversationsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ConversationsResourceWithStreamingResponse(self)
@@ -232,7 +232,7 @@ def with_raw_response(self) -> AsyncConversationsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncConversationsResourceWithRawResponse(self)
@@ -241,7 +241,7 @@ def with_streaming_response(self) -> AsyncConversationsResourceWithStreamingResp
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncConversationsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/conversations/items.py b/src/ogx_client/resources/conversations/items.py
similarity index 97%
rename from src/llama_stack_client/resources/conversations/items.py
rename to src/ogx_client/resources/conversations/items.py
index c337272a..0c878bbc 100644
--- a/src/llama_stack_client/resources/conversations/items.py
+++ b/src/ogx_client/resources/conversations/items.py
@@ -43,7 +43,7 @@ def with_raw_response(self) -> ItemsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ItemsResourceWithRawResponse(self)
@@ -52,7 +52,7 @@ def with_streaming_response(self) -> ItemsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ItemsResourceWithStreamingResponse(self)
@@ -260,7 +260,7 @@ def with_raw_response(self) -> AsyncItemsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncItemsResourceWithRawResponse(self)
@@ -269,7 +269,7 @@ def with_streaming_response(self) -> AsyncItemsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncItemsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/embeddings.py b/src/ogx_client/resources/embeddings.py
similarity index 92%
rename from src/llama_stack_client/resources/embeddings.py
rename to src/ogx_client/resources/embeddings.py
index 57fe1ac1..689fff92 100644
--- a/src/llama_stack_client/resources/embeddings.py
+++ b/src/ogx_client/resources/embeddings.py
@@ -31,8 +31,7 @@
class EmbeddingsResource(SyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -46,7 +45,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return EmbeddingsResourceWithRawResponse(self)
@@ -55,7 +54,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return EmbeddingsResourceWithStreamingResponse(self)
@@ -117,8 +116,7 @@ def create(
class AsyncEmbeddingsResource(AsyncAPIResource):
- """
- Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """OGX Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Three kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -132,7 +130,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncEmbeddingsResourceWithRawResponse(self)
@@ -141,7 +139,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncEmbeddingsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/files.py b/src/ogx_client/resources/files.py
similarity index 96%
rename from src/llama_stack_client/resources/files.py
rename to src/ogx_client/resources/files.py
index 27df58c7..2e51fd9b 100644
--- a/src/llama_stack_client/resources/files.py
+++ b/src/ogx_client/resources/files.py
@@ -14,8 +14,9 @@
import httpx
from ..types import file_list_params, file_create_params
+from .._files import deepcopy_with_paths
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
-from .._utils import extract_files, path_template, maybe_transform, deepcopy_minimal, async_maybe_transform
+from .._utils import extract_files, path_template, maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -33,9 +34,7 @@
class FilesResource(SyncAPIResource):
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
@cached_property
def with_raw_response(self) -> FilesResourceWithRawResponse:
@@ -43,7 +42,7 @@ def with_raw_response(self) -> FilesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return FilesResourceWithRawResponse(self)
@@ -52,7 +51,7 @@ def with_streaming_response(self) -> FilesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return FilesResourceWithStreamingResponse(self)
@@ -87,12 +86,13 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
@@ -272,9 +272,7 @@ def content(
class AsyncFilesResource(AsyncAPIResource):
- """
- This API is used to upload documents that can be used with other Llama Stack APIs.
- """
+ """This API is used to upload documents that can be used with other OGX APIs."""
@cached_property
def with_raw_response(self) -> AsyncFilesResourceWithRawResponse:
@@ -282,7 +280,7 @@ def with_raw_response(self) -> AsyncFilesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesResourceWithRawResponse(self)
@@ -291,7 +289,7 @@ def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncFilesResourceWithStreamingResponse(self)
@@ -326,12 +324,13 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
- body = deepcopy_minimal(
+ body = deepcopy_with_paths(
{
"file": file,
"purpose": purpose,
"expires_after": expires_after,
- }
+ },
+ [["file"]],
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
diff --git a/src/llama_stack_client/resources/inspect.py b/src/ogx_client/resources/inspect.py
similarity index 89%
rename from src/llama_stack_client/resources/inspect.py
rename to src/ogx_client/resources/inspect.py
index 872af4ef..28e0cbc7 100644
--- a/src/llama_stack_client/resources/inspect.py
+++ b/src/ogx_client/resources/inspect.py
@@ -28,7 +28,7 @@
class InspectResource(SyncAPIResource):
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
@cached_property
@@ -37,7 +37,7 @@ def with_raw_response(self) -> InspectResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return InspectResourceWithRawResponse(self)
@@ -46,7 +46,7 @@ def with_streaming_response(self) -> InspectResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return InspectResourceWithStreamingResponse(self)
@@ -91,7 +91,7 @@ def version(
class AsyncInspectResource(AsyncAPIResource):
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
@cached_property
@@ -100,7 +100,7 @@ def with_raw_response(self) -> AsyncInspectResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncInspectResourceWithRawResponse(self)
@@ -109,7 +109,7 @@ def with_streaming_response(self) -> AsyncInspectResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncInspectResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/models/__init__.py b/src/ogx_client/resources/models/__init__.py
similarity index 100%
rename from src/llama_stack_client/resources/models/__init__.py
rename to src/ogx_client/resources/models/__init__.py
diff --git a/src/llama_stack_client/resources/models/models.py b/src/ogx_client/resources/models/models.py
similarity index 86%
rename from src/llama_stack_client/resources/models/models.py
rename to src/ogx_client/resources/models/models.py
index 4b2a3cb2..4c0a2229 100644
--- a/src/llama_stack_client/resources/models/models.py
+++ b/src/ogx_client/resources/models/models.py
@@ -8,8 +8,6 @@
from __future__ import annotations
-from typing import Type, cast
-
import httpx
from .openai import (
@@ -30,9 +28,8 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ..._wrappers import DataWrapper
from ..._base_client import make_request_options
-from ...types.model_list_response import ModelListResponse
+from ...types.list_models_response import ListModelsResponse
from ...types.model_retrieve_response import ModelRetrieveResponse
__all__ = ["ModelsResource", "AsyncModelsResource"]
@@ -49,7 +46,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ModelsResourceWithRawResponse(self)
@@ -58,7 +55,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ModelsResourceWithStreamingResponse(self)
@@ -106,18 +103,14 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ModelListResponse:
+ ) -> ListModelsResponse:
"""List models using the OpenAI API."""
return self._get(
"/v1/models",
options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ModelListResponse]._unwrapper,
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]),
+ cast_to=ListModelsResponse,
)
@@ -132,7 +125,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncModelsResourceWithRawResponse(self)
@@ -141,7 +134,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncModelsResourceWithStreamingResponse(self)
@@ -189,18 +182,14 @@ async def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ModelListResponse:
+ ) -> ListModelsResponse:
"""List models using the OpenAI API."""
return await self._get(
"/v1/models",
options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ModelListResponse]._unwrapper,
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]),
+ cast_to=ListModelsResponse,
)
diff --git a/src/llama_stack_client/resources/models/openai.py b/src/ogx_client/resources/models/openai.py
similarity index 76%
rename from src/llama_stack_client/resources/models/openai.py
rename to src/ogx_client/resources/models/openai.py
index 954d3c6b..9463a785 100644
--- a/src/llama_stack_client/resources/models/openai.py
+++ b/src/ogx_client/resources/models/openai.py
@@ -8,8 +8,6 @@
from __future__ import annotations
-from typing import Type, cast
-
import httpx
from ..._types import Body, Query, Headers, NotGiven, not_given
@@ -21,9 +19,8 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ..._wrappers import DataWrapper
from ..._base_client import make_request_options
-from ...types.model_list_response import ModelListResponse
+from ...types.list_models_response import ListModelsResponse
__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
@@ -35,7 +32,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return OpenAIResourceWithRawResponse(self)
@@ -44,7 +41,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return OpenAIResourceWithStreamingResponse(self)
@@ -57,18 +54,14 @@ def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ModelListResponse:
+ ) -> ListModelsResponse:
"""List models using the OpenAI API."""
return self._get(
"/v1/models",
options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ModelListResponse]._unwrapper,
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]),
+ cast_to=ListModelsResponse,
)
@@ -79,7 +72,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncOpenAIResourceWithRawResponse(self)
@@ -88,7 +81,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncOpenAIResourceWithStreamingResponse(self)
@@ -101,18 +94,14 @@ async def list(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ModelListResponse:
+ ) -> ListModelsResponse:
"""List models using the OpenAI API."""
return await self._get(
"/v1/models",
options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- post_parser=DataWrapper[ModelListResponse]._unwrapper,
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=cast(Type[ModelListResponse], DataWrapper[ModelListResponse]),
+ cast_to=ListModelsResponse,
)
diff --git a/src/llama_stack_client/resources/moderations.py b/src/ogx_client/resources/moderations.py
similarity index 93%
rename from src/llama_stack_client/resources/moderations.py
rename to src/ogx_client/resources/moderations.py
index bb61a3a7..2c881f1d 100644
--- a/src/llama_stack_client/resources/moderations.py
+++ b/src/ogx_client/resources/moderations.py
@@ -38,7 +38,7 @@ def with_raw_response(self) -> ModerationsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ModerationsResourceWithRawResponse(self)
@@ -47,7 +47,7 @@ def with_streaming_response(self) -> ModerationsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ModerationsResourceWithStreamingResponse(self)
@@ -106,7 +106,7 @@ def with_raw_response(self) -> AsyncModerationsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncModerationsResourceWithRawResponse(self)
@@ -115,7 +115,7 @@ def with_streaming_response(self) -> AsyncModerationsResourceWithStreamingRespon
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncModerationsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/prompts/__init__.py b/src/ogx_client/resources/prompts/__init__.py
similarity index 100%
rename from src/llama_stack_client/resources/prompts/__init__.py
rename to src/ogx_client/resources/prompts/__init__.py
diff --git a/src/llama_stack_client/resources/prompts/prompts.py b/src/ogx_client/resources/prompts/prompts.py
similarity index 98%
rename from src/llama_stack_client/resources/prompts/prompts.py
rename to src/ogx_client/resources/prompts/prompts.py
index 85fc691f..4853d7b1 100644
--- a/src/llama_stack_client/resources/prompts/prompts.py
+++ b/src/ogx_client/resources/prompts/prompts.py
@@ -58,7 +58,7 @@ def with_raw_response(self) -> PromptsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return PromptsResourceWithRawResponse(self)
@@ -67,7 +67,7 @@ def with_streaming_response(self) -> PromptsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return PromptsResourceWithStreamingResponse(self)
@@ -325,7 +325,7 @@ def with_raw_response(self) -> AsyncPromptsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncPromptsResourceWithRawResponse(self)
@@ -334,7 +334,7 @@ def with_streaming_response(self) -> AsyncPromptsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncPromptsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/prompts/versions.py b/src/ogx_client/resources/prompts/versions.py
similarity index 92%
rename from src/llama_stack_client/resources/prompts/versions.py
rename to src/ogx_client/resources/prompts/versions.py
index a6dbc0c7..5c890891 100644
--- a/src/llama_stack_client/resources/prompts/versions.py
+++ b/src/ogx_client/resources/prompts/versions.py
@@ -38,7 +38,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return VersionsResourceWithRawResponse(self)
@@ -47,7 +47,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return VersionsResourceWithStreamingResponse(self)
@@ -100,7 +100,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncVersionsResourceWithRawResponse(self)
@@ -109,7 +109,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncVersionsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/providers.py b/src/ogx_client/resources/providers.py
similarity index 94%
rename from src/llama_stack_client/resources/providers.py
rename to src/ogx_client/resources/providers.py
index 0701883b..ef4f93de 100644
--- a/src/llama_stack_client/resources/providers.py
+++ b/src/ogx_client/resources/providers.py
@@ -41,7 +41,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ProvidersResourceWithRawResponse(self)
@@ -50,7 +50,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ProvidersResourceWithStreamingResponse(self)
@@ -124,7 +124,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncProvidersResourceWithRawResponse(self)
@@ -133,7 +133,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncProvidersResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/responses/__init__.py b/src/ogx_client/resources/responses/__init__.py
similarity index 100%
rename from src/llama_stack_client/resources/responses/__init__.py
rename to src/ogx_client/resources/responses/__init__.py
diff --git a/src/llama_stack_client/resources/responses/input_items.py b/src/ogx_client/resources/responses/input_items.py
similarity index 92%
rename from src/llama_stack_client/resources/responses/input_items.py
rename to src/ogx_client/resources/responses/input_items.py
index 84433369..b84abc1d 100644
--- a/src/llama_stack_client/resources/responses/input_items.py
+++ b/src/ogx_client/resources/responses/input_items.py
@@ -31,13 +31,17 @@
class InputItemsResource(SyncAPIResource):
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
+
@cached_property
def with_raw_response(self) -> InputItemsResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return InputItemsResourceWithRawResponse(self)
@@ -46,7 +50,7 @@ def with_streaming_response(self) -> InputItemsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return InputItemsResourceWithStreamingResponse(self)
@@ -129,13 +133,17 @@ def list(
class AsyncInputItemsResource(AsyncAPIResource):
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncInputItemsResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncInputItemsResourceWithRawResponse(self)
@@ -144,7 +152,7 @@ def with_streaming_response(self) -> AsyncInputItemsResourceWithStreamingRespons
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncInputItemsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/responses/responses.py b/src/ogx_client/resources/responses/responses.py
similarity index 78%
rename from src/llama_stack_client/resources/responses/responses.py
rename to src/ogx_client/resources/responses/responses.py
index 6bc14161..3e2b53e5 100644
--- a/src/llama_stack_client/resources/responses/responses.py
+++ b/src/ogx_client/resources/responses/responses.py
@@ -13,7 +13,7 @@
import httpx
-from ...types import response_list_params, response_create_params
+from ...types import response_list_params, response_create_params, response_compact_params
from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import path_template, required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
@@ -36,6 +36,7 @@
from ...pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.response_object import ResponseObject
+from ...types.compacted_response import CompactedResponse
from ...types.response_list_response import ResponseListResponse
from ...types.response_object_stream import ResponseObjectStream
from ...types.response_delete_response import ResponseDeleteResponse
@@ -44,8 +45,15 @@
class ResponsesResource(SyncAPIResource):
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
+
@cached_property
def input_items(self) -> InputItemsResource:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
return InputItemsResource(self._client)
@cached_property
@@ -54,7 +62,7 @@ def with_raw_response(self) -> ResponsesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ResponsesResourceWithRawResponse(self)
@@ -63,7 +71,7 @@ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ResponsesResourceWithStreamingResponse(self)
@@ -74,25 +82,24 @@ def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -109,8 +116,8 @@ def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
- stream: Optional[Literal[False]] | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -137,6 +144,9 @@ def create(
background: Whether to run the model response in the background. When true, returns
immediately with status 'queued'.
+ context_management: Context management configuration. When set with type 'compaction', automatically
+ compacts conversation history when token count exceeds the compact_threshold.
+
conversation: Optional ID of a conversation to add the response to.
frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
@@ -213,26 +223,25 @@ def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
stream: Literal[True],
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -249,7 +258,7 @@ def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
+ store: bool | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -278,6 +287,9 @@ def create(
background: Whether to run the model response in the background. When true, returns
immediately with status 'queued'.
+ context_management: Context management configuration. When set with type 'compaction', automatically
+ compacts conversation history when token count exceeds the compact_threshold.
+
conversation: Optional ID of a conversation to add the response to.
frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
@@ -352,26 +364,25 @@ def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
stream: bool,
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -388,7 +399,7 @@ def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
+ store: bool | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -417,6 +428,9 @@ def create(
background: Whether to run the model response in the background. When true, returns
immediately with status 'queued'.
+ context_management: Context management configuration. When set with type 'compaction', automatically
+ compacts conversation history when token count exceeds the compact_threshold.
+
conversation: Optional ID of a conversation to add the response to.
frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
@@ -491,25 +505,24 @@ def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -526,8 +539,8 @@ def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
- stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Literal[True] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -550,6 +563,7 @@ def create(
"input": input,
"model": model,
"background": background,
+ "context_management": context_management,
"conversation": conversation,
"frequency_penalty": frequency_penalty,
"guardrails": guardrails,
@@ -715,10 +729,100 @@ def delete(
cast_to=ResponseDeleteResponse,
)
+ def compact(
+ self,
+ *,
+ model: str,
+ input: Union[
+ str,
+ Iterable[
+ response_compact_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
+ ],
+ None,
+ ]
+ | Omit = omit,
+ instructions: Optional[str] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ previous_response_id: Optional[str] | Omit = omit,
+ prompt_cache_key: Optional[str] | Omit = omit,
+ reasoning: Optional[response_compact_params.Reasoning] | Omit = omit,
+ text: Optional[response_compact_params.Text] | Omit = omit,
+ tools: Optional[Iterable[response_compact_params.Tool]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompactedResponse:
+ """
+ **[alpha]** Compresses conversation history into a smaller representation while
+ preserving context. This endpoint is in alpha and may change without notice.
+
+ Args:
+ model: The model to use for generating the compacted summary.
+
+ input: Input message(s) to compact.
+
+ instructions: Instructions to guide the compaction.
+
+ parallel_tool_calls: Whether to enable parallel tool calls. Accepted for compatibility but not used
+ during compaction.
+
+ previous_response_id: ID of a previous response whose history to compact.
+
+ prompt_cache_key: A key to use when reading from or writing to the prompt cache.
+
+ reasoning: Configuration for reasoning effort in OpenAI responses.
+
+ Controls how much reasoning the model performs before generating a response.
+
+ text: Text response configuration for OpenAI responses.
+
+ tools: List of tools available to the model. Accepted for compatibility but not used
+ during compaction.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v1/responses/compact",
+ body=maybe_transform(
+ {
+ "model": model,
+ "input": input,
+ "instructions": instructions,
+ "parallel_tool_calls": parallel_tool_calls,
+ "previous_response_id": previous_response_id,
+ "prompt_cache_key": prompt_cache_key,
+ "reasoning": reasoning,
+ "text": text,
+ "tools": tools,
+ },
+ response_compact_params.ResponseCompactParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CompactedResponse,
+ )
+
class AsyncResponsesResource(AsyncAPIResource):
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
+
@cached_property
def input_items(self) -> AsyncInputItemsResource:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
return AsyncInputItemsResource(self._client)
@cached_property
@@ -727,7 +831,7 @@ def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncResponsesResourceWithRawResponse(self)
@@ -736,7 +840,7 @@ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncResponsesResourceWithStreamingResponse(self)
@@ -747,25 +851,24 @@ async def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -782,8 +885,8 @@ async def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
- stream: Optional[Literal[False]] | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -810,6 +913,9 @@ async def create(
background: Whether to run the model response in the background. When true, returns
immediately with status 'queued'.
+ context_management: Context management configuration. When set with type 'compaction', automatically
+ compacts conversation history when token count exceeds the compact_threshold.
+
conversation: Optional ID of a conversation to add the response to.
frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
@@ -886,26 +992,25 @@ async def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
stream: Literal[True],
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -922,7 +1027,7 @@ async def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
+ store: bool | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -951,6 +1056,9 @@ async def create(
background: Whether to run the model response in the background. When true, returns
immediately with status 'queued'.
+ context_management: Context management configuration. When set with type 'compaction', automatically
+ compacts conversation history when token count exceeds the compact_threshold.
+
conversation: Optional ID of a conversation to add the response to.
frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
@@ -1025,26 +1133,25 @@ async def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
stream: bool,
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -1061,7 +1168,7 @@ async def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
+ store: bool | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -1090,6 +1197,9 @@ async def create(
background: Whether to run the model response in the background. When true, returns
immediately with status 'queued'.
+ context_management: Context management configuration. When set with type 'compaction', automatically
+ compacts conversation history when token count exceeds the compact_threshold.
+
conversation: Optional ID of a conversation to add the response to.
frequency_penalty: Penalizes new tokens based on their frequency in the text so far.
@@ -1164,25 +1274,24 @@ async def create(
input: Union[
str,
Iterable[
- response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
+ response_create_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
],
],
model: str,
background: bool | Omit = omit,
+ context_management: Optional[Iterable[response_create_params.ContextManagement]] | Omit = omit,
conversation: Optional[str] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
guardrails: Optional[SequenceNotStr[response_create_params.Guardrail]] | Omit = omit,
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
| Omit = omit,
@@ -1199,8 +1308,8 @@ async def create(
reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
safety_identifier: Optional[str] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "priority"]] | Omit = omit,
- store: Optional[bool] | Omit = omit,
- stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ store: bool | Omit = omit,
+ stream: Literal[False] | Literal[True] | Omit = omit,
stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
temperature: Optional[float] | Omit = omit,
text: Optional[response_create_params.Text] | Omit = omit,
@@ -1223,6 +1332,7 @@ async def create(
"input": input,
"model": model,
"background": background,
+ "context_management": context_management,
"conversation": conversation,
"frequency_penalty": frequency_penalty,
"guardrails": guardrails,
@@ -1388,6 +1498,89 @@ async def delete(
cast_to=ResponseDeleteResponse,
)
+ async def compact(
+ self,
+ *,
+ model: str,
+ input: Union[
+ str,
+ Iterable[
+ response_compact_params.InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput
+ ],
+ None,
+ ]
+ | Omit = omit,
+ instructions: Optional[str] | Omit = omit,
+ parallel_tool_calls: Optional[bool] | Omit = omit,
+ previous_response_id: Optional[str] | Omit = omit,
+ prompt_cache_key: Optional[str] | Omit = omit,
+ reasoning: Optional[response_compact_params.Reasoning] | Omit = omit,
+ text: Optional[response_compact_params.Text] | Omit = omit,
+ tools: Optional[Iterable[response_compact_params.Tool]] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> CompactedResponse:
+ """
+ **[alpha]** Compresses conversation history into a smaller representation while
+ preserving context. This endpoint is in alpha and may change without notice.
+
+ Args:
+ model: The model to use for generating the compacted summary.
+
+ input: Input message(s) to compact.
+
+ instructions: Instructions to guide the compaction.
+
+ parallel_tool_calls: Whether to enable parallel tool calls. Accepted for compatibility but not used
+ during compaction.
+
+ previous_response_id: ID of a previous response whose history to compact.
+
+ prompt_cache_key: A key to use when reading from or writing to the prompt cache.
+
+ reasoning: Configuration for reasoning effort in OpenAI responses.
+
+ Controls how much reasoning the model performs before generating a response.
+
+ text: Text response configuration for OpenAI responses.
+
+ tools: List of tools available to the model. Accepted for compatibility but not used
+ during compaction.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v1/responses/compact",
+ body=await async_maybe_transform(
+ {
+ "model": model,
+ "input": input,
+ "instructions": instructions,
+ "parallel_tool_calls": parallel_tool_calls,
+ "previous_response_id": previous_response_id,
+ "prompt_cache_key": prompt_cache_key,
+ "reasoning": reasoning,
+ "text": text,
+ "tools": tools,
+ },
+ response_compact_params.ResponseCompactParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=CompactedResponse,
+ )
+
class ResponsesResourceWithRawResponse:
def __init__(self, responses: ResponsesResource) -> None:
@@ -1405,9 +1598,15 @@ def __init__(self, responses: ResponsesResource) -> None:
self.delete = to_raw_response_wrapper(
responses.delete,
)
+ self.compact = to_raw_response_wrapper(
+ responses.compact,
+ )
@cached_property
def input_items(self) -> InputItemsResourceWithRawResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
return InputItemsResourceWithRawResponse(self._responses.input_items)
@@ -1427,9 +1626,15 @@ def __init__(self, responses: AsyncResponsesResource) -> None:
self.delete = async_to_raw_response_wrapper(
responses.delete,
)
+ self.compact = async_to_raw_response_wrapper(
+ responses.compact,
+ )
@cached_property
def input_items(self) -> AsyncInputItemsResourceWithRawResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
return AsyncInputItemsResourceWithRawResponse(self._responses.input_items)
@@ -1449,9 +1654,15 @@ def __init__(self, responses: ResponsesResource) -> None:
self.delete = to_streamed_response_wrapper(
responses.delete,
)
+ self.compact = to_streamed_response_wrapper(
+ responses.compact,
+ )
@cached_property
def input_items(self) -> InputItemsResourceWithStreamingResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
return InputItemsResourceWithStreamingResponse(self._responses.input_items)
@@ -1471,7 +1682,13 @@ def __init__(self, responses: AsyncResponsesResource) -> None:
self.delete = async_to_streamed_response_wrapper(
responses.delete,
)
+ self.compact = async_to_streamed_response_wrapper(
+ responses.compact,
+ )
@cached_property
def input_items(self) -> AsyncInputItemsResourceWithStreamingResponse:
+ """
+ OpenAI Responses API for agent orchestration with tool use, multi-turn conversations, and background processing.
+ """
return AsyncInputItemsResourceWithStreamingResponse(self._responses.input_items)
diff --git a/src/llama_stack_client/resources/routes.py b/src/ogx_client/resources/routes.py
similarity index 90%
rename from src/llama_stack_client/resources/routes.py
rename to src/ogx_client/resources/routes.py
index 92f49c90..6ab42032 100644
--- a/src/llama_stack_client/resources/routes.py
+++ b/src/ogx_client/resources/routes.py
@@ -33,7 +33,7 @@
class RoutesResource(SyncAPIResource):
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
@cached_property
@@ -42,7 +42,7 @@ def with_raw_response(self) -> RoutesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return RoutesResourceWithRawResponse(self)
@@ -51,7 +51,7 @@ def with_streaming_response(self) -> RoutesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return RoutesResourceWithStreamingResponse(self)
@@ -99,7 +99,7 @@ def list(
class AsyncRoutesResource(AsyncAPIResource):
"""
- APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ APIs for inspecting the OGX service, including health status, available API routes with methods and implementing providers.
"""
@cached_property
@@ -108,7 +108,7 @@ def with_raw_response(self) -> AsyncRoutesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncRoutesResourceWithRawResponse(self)
@@ -117,7 +117,7 @@ def with_streaming_response(self) -> AsyncRoutesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncRoutesResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/safety.py b/src/ogx_client/resources/safety.py
similarity index 92%
rename from src/llama_stack_client/resources/safety.py
rename to src/ogx_client/resources/safety.py
index 9590a875..365b1f76 100644
--- a/src/llama_stack_client/resources/safety.py
+++ b/src/ogx_client/resources/safety.py
@@ -38,7 +38,7 @@ def with_raw_response(self) -> SafetyResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return SafetyResourceWithRawResponse(self)
@@ -47,7 +47,7 @@ def with_streaming_response(self) -> SafetyResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return SafetyResourceWithStreamingResponse(self)
@@ -104,7 +104,7 @@ def with_raw_response(self) -> AsyncSafetyResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncSafetyResourceWithRawResponse(self)
@@ -113,7 +113,7 @@ def with_streaming_response(self) -> AsyncSafetyResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncSafetyResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/shields.py b/src/ogx_client/resources/shields.py
similarity index 97%
rename from src/llama_stack_client/resources/shields.py
rename to src/ogx_client/resources/shields.py
index 0358c6f0..3abc3489 100644
--- a/src/llama_stack_client/resources/shields.py
+++ b/src/ogx_client/resources/shields.py
@@ -39,7 +39,7 @@ def with_raw_response(self) -> ShieldsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return ShieldsResourceWithRawResponse(self)
@@ -48,7 +48,7 @@ def with_streaming_response(self) -> ShieldsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return ShieldsResourceWithStreamingResponse(self)
@@ -207,7 +207,7 @@ def with_raw_response(self) -> AsyncShieldsResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncShieldsResourceWithRawResponse(self)
@@ -216,7 +216,7 @@ def with_streaming_response(self) -> AsyncShieldsResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncShieldsResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/vector_io.py b/src/ogx_client/resources/vector_io.py
similarity index 95%
rename from src/llama_stack_client/resources/vector_io.py
rename to src/ogx_client/resources/vector_io.py
index 95b550a2..0e972ca6 100644
--- a/src/llama_stack_client/resources/vector_io.py
+++ b/src/ogx_client/resources/vector_io.py
@@ -36,7 +36,7 @@ def with_raw_response(self) -> VectorIoResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return VectorIoResourceWithRawResponse(self)
@@ -45,7 +45,7 @@ def with_streaming_response(self) -> VectorIoResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return VectorIoResourceWithStreamingResponse(self)
@@ -152,7 +152,7 @@ def with_raw_response(self) -> AsyncVectorIoResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncVectorIoResourceWithRawResponse(self)
@@ -161,7 +161,7 @@ def with_streaming_response(self) -> AsyncVectorIoResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncVectorIoResourceWithStreamingResponse(self)
diff --git a/src/llama_stack_client/resources/vector_stores/__init__.py b/src/ogx_client/resources/vector_stores/__init__.py
similarity index 100%
rename from src/llama_stack_client/resources/vector_stores/__init__.py
rename to src/ogx_client/resources/vector_stores/__init__.py
diff --git a/src/llama_stack_client/resources/vector_stores/file_batches.py b/src/ogx_client/resources/vector_stores/file_batches.py
similarity index 92%
rename from src/llama_stack_client/resources/vector_stores/file_batches.py
rename to src/ogx_client/resources/vector_stores/file_batches.py
index e9ea9e65..ea2da2c9 100644
--- a/src/llama_stack_client/resources/vector_stores/file_batches.py
+++ b/src/ogx_client/resources/vector_stores/file_batches.py
@@ -8,7 +8,7 @@
from __future__ import annotations
-from typing import Dict, Optional
+from typing import Dict, Union, Iterable, Optional
import httpx
@@ -38,7 +38,7 @@ def with_raw_response(self) -> FileBatchesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return FileBatchesResourceWithRawResponse(self)
@@ -47,7 +47,7 @@ def with_streaming_response(self) -> FileBatchesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return FileBatchesResourceWithStreamingResponse(self)
@@ -55,9 +55,10 @@ def create(
self,
vector_store_id: str,
*,
- file_ids: SequenceNotStr[str],
- attributes: Optional[Dict[str, object]] | Omit = omit,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: Optional[file_batch_create_params.ChunkingStrategy] | Omit = omit,
+ file_ids: SequenceNotStr[str] | Omit = omit,
+ files: Optional[Iterable[file_batch_create_params.File]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -71,6 +72,12 @@ def create(
Args:
vector_store_id: The vector store identifier.
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
chunking_strategy: Automatic chunking strategy for vector store files.
extra_headers: Send extra headers
@@ -87,9 +94,10 @@ def create(
path_template("/v1/vector_stores/{vector_store_id}/file_batches", vector_store_id=vector_store_id),
body=maybe_transform(
{
- "file_ids": file_ids,
"attributes": attributes,
"chunking_strategy": chunking_strategy,
+ "file_ids": file_ids,
+ "files": files,
},
file_batch_create_params.FileBatchCreateParams,
),
@@ -268,7 +276,7 @@ def with_raw_response(self) -> AsyncFileBatchesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncFileBatchesResourceWithRawResponse(self)
@@ -277,7 +285,7 @@ def with_streaming_response(self) -> AsyncFileBatchesResourceWithStreamingRespon
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncFileBatchesResourceWithStreamingResponse(self)
@@ -285,9 +293,10 @@ async def create(
self,
vector_store_id: str,
*,
- file_ids: SequenceNotStr[str],
- attributes: Optional[Dict[str, object]] | Omit = omit,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: Optional[file_batch_create_params.ChunkingStrategy] | Omit = omit,
+ file_ids: SequenceNotStr[str] | Omit = omit,
+ files: Optional[Iterable[file_batch_create_params.File]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -301,6 +310,12 @@ async def create(
Args:
vector_store_id: The vector store identifier.
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
+
chunking_strategy: Automatic chunking strategy for vector store files.
extra_headers: Send extra headers
@@ -317,9 +332,10 @@ async def create(
path_template("/v1/vector_stores/{vector_store_id}/file_batches", vector_store_id=vector_store_id),
body=await async_maybe_transform(
{
- "file_ids": file_ids,
"attributes": attributes,
"chunking_strategy": chunking_strategy,
+ "file_ids": file_ids,
+ "files": files,
},
file_batch_create_params.FileBatchCreateParams,
),
diff --git a/src/llama_stack_client/resources/vector_stores/files.py b/src/ogx_client/resources/vector_stores/files.py
similarity index 94%
rename from src/llama_stack_client/resources/vector_stores/files.py
rename to src/ogx_client/resources/vector_stores/files.py
index e7b486a8..e9492dd8 100644
--- a/src/llama_stack_client/resources/vector_stores/files.py
+++ b/src/ogx_client/resources/vector_stores/files.py
@@ -8,7 +8,7 @@
from __future__ import annotations
-from typing import Dict, Optional
+from typing import Dict, Union, Optional
from typing_extensions import Literal
import httpx
@@ -40,7 +40,7 @@ def with_raw_response(self) -> FilesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return FilesResourceWithRawResponse(self)
@@ -49,7 +49,7 @@ def with_streaming_response(self) -> FilesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return FilesResourceWithStreamingResponse(self)
@@ -58,7 +58,7 @@ def create(
vector_store_id: str,
*,
file_id: str,
- attributes: Optional[Dict[str, object]] | Omit = omit,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: Optional[file_create_params.ChunkingStrategy] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -75,7 +75,11 @@ def create(
file_id: The ID of the file to attach.
- attributes: Attributes to associate with the file.
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
chunking_strategy: Strategy for chunking the file content.
@@ -199,7 +203,7 @@ def list(
*,
after: Optional[str] | Omit = omit,
before: Optional[str] | Omit = omit,
- filter: Optional[Literal["completed", "in_progress", "cancelled", "failed"]] | Omit = omit,
+ filter: Optional[Literal["in_progress", "completed", "cancelled", "failed"]] | Omit = omit,
limit: Optional[int] | Omit = omit,
order: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -367,7 +371,7 @@ def with_raw_response(self) -> AsyncFilesResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesResourceWithRawResponse(self)
@@ -376,7 +380,7 @@ def with_streaming_response(self) -> AsyncFilesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncFilesResourceWithStreamingResponse(self)
@@ -385,7 +389,7 @@ async def create(
vector_store_id: str,
*,
file_id: str,
- attributes: Optional[Dict[str, object]] | Omit = omit,
+ attributes: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
chunking_strategy: Optional[file_create_params.ChunkingStrategy] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -402,7 +406,11 @@ async def create(
file_id: The ID of the file to attach.
- attributes: Attributes to associate with the file.
+ attributes: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters, booleans, or numbers.
chunking_strategy: Strategy for chunking the file content.
@@ -526,7 +534,7 @@ def list(
*,
after: Optional[str] | Omit = omit,
before: Optional[str] | Omit = omit,
- filter: Optional[Literal["completed", "in_progress", "cancelled", "failed"]] | Omit = omit,
+ filter: Optional[Literal["in_progress", "completed", "cancelled", "failed"]] | Omit = omit,
limit: Optional[int] | Omit = omit,
order: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
diff --git a/src/llama_stack_client/resources/vector_stores/vector_stores.py b/src/ogx_client/resources/vector_stores/vector_stores.py
similarity index 95%
rename from src/llama_stack_client/resources/vector_stores/vector_stores.py
rename to src/ogx_client/resources/vector_stores/vector_stores.py
index e6b47c7d..05550a4e 100644
--- a/src/llama_stack_client/resources/vector_stores/vector_stores.py
+++ b/src/ogx_client/resources/vector_stores/vector_stores.py
@@ -68,7 +68,7 @@ def with_raw_response(self) -> VectorStoresResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return VectorStoresResourceWithRawResponse(self)
@@ -77,7 +77,7 @@ def with_streaming_response(self) -> VectorStoresResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return VectorStoresResourceWithStreamingResponse(self)
@@ -85,7 +85,8 @@ def create(
self,
*,
chunking_strategy: Optional[vector_store_create_params.ChunkingStrategy] | Omit = omit,
- expires_after: Optional[Dict[str, object]] | Omit = omit,
+ description: Optional[str] | Omit = omit,
+ expires_after: Optional[vector_store_create_params.ExpiresAfter] | Omit = omit,
file_ids: Optional[SequenceNotStr[str]] | Omit = omit,
metadata: Optional[Dict[str, object]] | Omit = omit,
name: Optional[str] | Omit = omit,
@@ -102,6 +103,8 @@ def create(
Args:
chunking_strategy: Automatic chunking strategy for vector store files.
+ expires_after: Expiration policy for a vector store.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -115,6 +118,7 @@ def create(
body=maybe_transform(
{
"chunking_strategy": chunking_strategy,
+ "description": description,
"expires_after": expires_after,
"file_ids": file_ids,
"metadata": metadata,
@@ -167,7 +171,7 @@ def update(
self,
vector_store_id: str,
*,
- expires_after: Optional[Dict[str, object]] | Omit = omit,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | Omit = omit,
metadata: Optional[Dict[str, object]] | Omit = omit,
name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -183,7 +187,7 @@ def update(
Args:
vector_store_id: The vector store identifier.
- expires_after: Expiration policy for the vector store.
+ expires_after: Expiration policy for a vector store.
metadata: Metadata to associate with the vector store.
@@ -311,9 +315,9 @@ def search(
*,
query: Union[str, SequenceNotStr[str]],
filters: Optional[Dict[str, object]] | Omit = omit,
- max_num_results: Optional[int] | Omit = omit,
+ max_num_results: int | Omit = omit,
ranking_options: Optional[vector_store_search_params.RankingOptions] | Omit = omit,
- rewrite_query: Optional[bool] | Omit = omit,
+ rewrite_query: bool | Omit = omit,
search_mode: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -401,7 +405,7 @@ def with_raw_response(self) -> AsyncVectorStoresResourceWithRawResponse:
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#accessing-raw-response-data-eg-headers
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#accessing-raw-response-data-eg-headers
"""
return AsyncVectorStoresResourceWithRawResponse(self)
@@ -410,7 +414,7 @@ def with_streaming_response(self) -> AsyncVectorStoresResourceWithStreamingRespo
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/llamastack/llama-stack-client-python#with_streaming_response
+ For more information, see https://www.github.com/ogx-ai/ogx-client-python#with_streaming_response
"""
return AsyncVectorStoresResourceWithStreamingResponse(self)
@@ -418,7 +422,8 @@ async def create(
self,
*,
chunking_strategy: Optional[vector_store_create_params.ChunkingStrategy] | Omit = omit,
- expires_after: Optional[Dict[str, object]] | Omit = omit,
+ description: Optional[str] | Omit = omit,
+ expires_after: Optional[vector_store_create_params.ExpiresAfter] | Omit = omit,
file_ids: Optional[SequenceNotStr[str]] | Omit = omit,
metadata: Optional[Dict[str, object]] | Omit = omit,
name: Optional[str] | Omit = omit,
@@ -435,6 +440,8 @@ async def create(
Args:
chunking_strategy: Automatic chunking strategy for vector store files.
+ expires_after: Expiration policy for a vector store.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -448,6 +455,7 @@ async def create(
body=await async_maybe_transform(
{
"chunking_strategy": chunking_strategy,
+ "description": description,
"expires_after": expires_after,
"file_ids": file_ids,
"metadata": metadata,
@@ -500,7 +508,7 @@ async def update(
self,
vector_store_id: str,
*,
- expires_after: Optional[Dict[str, object]] | Omit = omit,
+ expires_after: Optional[vector_store_update_params.ExpiresAfter] | Omit = omit,
metadata: Optional[Dict[str, object]] | Omit = omit,
name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -516,7 +524,7 @@ async def update(
Args:
vector_store_id: The vector store identifier.
- expires_after: Expiration policy for the vector store.
+ expires_after: Expiration policy for a vector store.
metadata: Metadata to associate with the vector store.
@@ -644,9 +652,9 @@ async def search(
*,
query: Union[str, SequenceNotStr[str]],
filters: Optional[Dict[str, object]] | Omit = omit,
- max_num_results: Optional[int] | Omit = omit,
+ max_num_results: int | Omit = omit,
ranking_options: Optional[vector_store_search_params.RankingOptions] | Omit = omit,
- rewrite_query: Optional[bool] | Omit = omit,
+ rewrite_query: bool | Omit = omit,
search_mode: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
diff --git a/src/llama_stack_client/types/__init__.py b/src/ogx_client/types/__init__.py
similarity index 86%
rename from src/llama_stack_client/types/__init__.py
rename to src/ogx_client/types/__init__.py
index b6abf68b..eba1d737 100644
--- a/src/llama_stack_client/types/__init__.py
+++ b/src/ogx_client/types/__init__.py
@@ -17,7 +17,6 @@
HealthInfo as HealthInfo,
VersionInfo as VersionInfo,
ProviderInfo as ProviderInfo,
- ScoringResult as ScoringResult,
SystemMessage as SystemMessage,
SamplingParams as SamplingParams,
SafetyViolation as SafetyViolation,
@@ -27,19 +26,19 @@
InterleavedContentItem as InterleavedContentItem,
)
from .shield import Shield as Shield
-from .scoring_fn import ScoringFn as ScoringFn
from .vector_store import VectorStore as VectorStore
from .create_response import CreateResponse as CreateResponse
from .response_object import ResponseObject as ResponseObject
from .file_list_params import FileListParams as FileListParams
+from .response_message import ResponseMessage as ResponseMessage
from .batch_list_params import BatchListParams as BatchListParams
from .route_list_params import RouteListParams as RouteListParams
+from .compacted_response import CompactedResponse as CompactedResponse
from .file_create_params import FileCreateParams as FileCreateParams
from .batch_create_params import BatchCreateParams as BatchCreateParams
from .batch_list_response import BatchListResponse as BatchListResponse
from .conversation_object import ConversationObject as ConversationObject
from .list_files_response import ListFilesResponse as ListFilesResponse
-from .model_list_response import ModelListResponse as ModelListResponse
from .route_list_response import RouteListResponse as RouteListResponse
from .run_shield_response import RunShieldResponse as RunShieldResponse
from .delete_file_response import DeleteFileResponse as DeleteFileResponse
@@ -48,7 +47,6 @@
from .prompt_list_response import PromptListResponse as PromptListResponse
from .prompt_update_params import PromptUpdateParams as PromptUpdateParams
from .response_list_params import ResponseListParams as ResponseListParams
-from .scoring_score_params import ScoringScoreParams as ScoringScoreParams
from .shield_list_response import ShieldListResponse as ShieldListResponse
from .batch_cancel_response import BatchCancelResponse as BatchCancelResponse
from .batch_create_response import BatchCreateResponse as BatchCreateResponse
@@ -62,12 +60,12 @@
from .response_create_params import ResponseCreateParams as ResponseCreateParams
from .response_list_response import ResponseListResponse as ResponseListResponse
from .response_object_stream import ResponseObjectStream as ResponseObjectStream
-from .scoring_score_response import ScoringScoreResponse as ScoringScoreResponse
from .shield_register_params import ShieldRegisterParams as ShieldRegisterParams
from .vector_io_query_params import VectorIoQueryParams as VectorIoQueryParams
from .batch_retrieve_response import BatchRetrieveResponse as BatchRetrieveResponse
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse
+from .response_compact_params import ResponseCompactParams as ResponseCompactParams
from .vector_io_insert_params import VectorIoInsertParams as VectorIoInsertParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
@@ -78,16 +76,11 @@
from .conversation_create_params import ConversationCreateParams as ConversationCreateParams
from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams
from .create_embeddings_response import CreateEmbeddingsResponse as CreateEmbeddingsResponse
-from .scoring_score_batch_params import ScoringScoreBatchParams as ScoringScoreBatchParams
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
from .list_vector_stores_response import ListVectorStoresResponse as ListVectorStoresResponse
from .conversation_delete_response import ConversationDeleteResponse as ConversationDeleteResponse
-from .scoring_score_batch_response import ScoringScoreBatchResponse as ScoringScoreBatchResponse
from .vector_store_delete_response import VectorStoreDeleteResponse as VectorStoreDeleteResponse
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
-from .scoring_function_list_response import ScoringFunctionListResponse as ScoringFunctionListResponse
-from .list_scoring_functions_response import ListScoringFunctionsResponse as ListScoringFunctionsResponse
-from .scoring_function_register_params import ScoringFunctionRegisterParams as ScoringFunctionRegisterParams
from .prompt_set_default_version_params import PromptSetDefaultVersionParams as PromptSetDefaultVersionParams
diff --git a/src/llama_stack_client/types/prompts/__init__.py b/src/ogx_client/types/alpha/__init__.py
similarity index 55%
rename from src/llama_stack_client/types/prompts/__init__.py
rename to src/ogx_client/types/alpha/__init__.py
index d14ed874..cf819c05 100644
--- a/src/llama_stack_client/types/prompts/__init__.py
+++ b/src/ogx_client/types/alpha/__init__.py
@@ -7,3 +7,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
+
+from .inference_rerank_params import InferenceRerankParams as InferenceRerankParams
+from .admin_list_routes_params import AdminListRoutesParams as AdminListRoutesParams
+from .inference_rerank_response import InferenceRerankResponse as InferenceRerankResponse
diff --git a/src/llama_stack_client/types/alpha/admin_list_routes_params.py b/src/ogx_client/types/alpha/admin_list_routes_params.py
similarity index 100%
rename from src/llama_stack_client/types/alpha/admin_list_routes_params.py
rename to src/ogx_client/types/alpha/admin_list_routes_params.py
diff --git a/src/llama_stack_client/types/alpha/inference_rerank_params.py b/src/ogx_client/types/alpha/inference_rerank_params.py
similarity index 100%
rename from src/llama_stack_client/types/alpha/inference_rerank_params.py
rename to src/ogx_client/types/alpha/inference_rerank_params.py
diff --git a/src/llama_stack_client/types/alpha/inference_rerank_response.py b/src/ogx_client/types/alpha/inference_rerank_response.py
similarity index 100%
rename from src/llama_stack_client/types/alpha/inference_rerank_response.py
rename to src/ogx_client/types/alpha/inference_rerank_response.py
diff --git a/src/llama_stack_client/types/alpha/post_training/job_artifacts_params.py b/src/ogx_client/types/alpha/post_training/job_artifacts_params.py
similarity index 100%
rename from src/llama_stack_client/types/alpha/post_training/job_artifacts_params.py
rename to src/ogx_client/types/alpha/post_training/job_artifacts_params.py
diff --git a/src/llama_stack_client/types/alpha/post_training/job_cancel_params.py b/src/ogx_client/types/alpha/post_training/job_cancel_params.py
similarity index 100%
rename from src/llama_stack_client/types/alpha/post_training/job_cancel_params.py
rename to src/ogx_client/types/alpha/post_training/job_cancel_params.py
diff --git a/src/llama_stack_client/types/alpha/post_training/job_status_params.py b/src/ogx_client/types/alpha/post_training/job_status_params.py
similarity index 100%
rename from src/llama_stack_client/types/alpha/post_training/job_status_params.py
rename to src/ogx_client/types/alpha/post_training/job_status_params.py
diff --git a/src/llama_stack_client/types/batch_cancel_response.py b/src/ogx_client/types/batch_cancel_response.py
similarity index 89%
rename from src/llama_stack_client/types/batch_cancel_response.py
rename to src/ogx_client/types/batch_cancel_response.py
index 97d8070d..e74b5b1d 100644
--- a/src/llama_stack_client/types/batch_cancel_response.py
+++ b/src/ogx_client/types/batch_cancel_response.py
@@ -66,6 +66,8 @@ def __getattr__(self, attr: str) -> builtins.object: ...
class RequestCounts(BaseModel):
+ """The request counts for different statuses within the batch."""
+
completed: int
failed: int
@@ -86,6 +88,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageInputTokensDetails(BaseModel):
+ """A detailed breakdown of the input tokens."""
+
cached_tokens: int
if TYPE_CHECKING:
@@ -102,6 +106,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageOutputTokensDetails(BaseModel):
+ """A detailed breakdown of the output tokens."""
+
reasoning_tokens: int
if TYPE_CHECKING:
@@ -118,13 +124,21 @@ def __getattr__(self, attr: str) -> object: ...
class Usage(BaseModel):
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on
+ batches created after September 7, 2025.
+ """
+
input_tokens: int
input_tokens_details: UsageInputTokensDetails
+ """A detailed breakdown of the input tokens."""
output_tokens: int
output_tokens_details: UsageOutputTokensDetails
+ """A detailed breakdown of the output tokens."""
total_tokens: int
@@ -185,8 +199,14 @@ class BatchCancelResponse(BaseModel):
output_file_id: Optional[str] = None
request_counts: Optional[RequestCounts] = None
+ """The request counts for different statuses within the batch."""
usage: Optional[Usage] = None
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on batches
+ created after September 7, 2025.
+ """
if TYPE_CHECKING:
# Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a
diff --git a/src/llama_stack_client/types/batch_create_params.py b/src/ogx_client/types/batch_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/batch_create_params.py
rename to src/ogx_client/types/batch_create_params.py
diff --git a/src/llama_stack_client/types/batch_create_response.py b/src/ogx_client/types/batch_create_response.py
similarity index 89%
rename from src/llama_stack_client/types/batch_create_response.py
rename to src/ogx_client/types/batch_create_response.py
index a3f12f93..bfcf49f5 100644
--- a/src/llama_stack_client/types/batch_create_response.py
+++ b/src/ogx_client/types/batch_create_response.py
@@ -66,6 +66,8 @@ def __getattr__(self, attr: str) -> builtins.object: ...
class RequestCounts(BaseModel):
+ """The request counts for different statuses within the batch."""
+
completed: int
failed: int
@@ -86,6 +88,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageInputTokensDetails(BaseModel):
+ """A detailed breakdown of the input tokens."""
+
cached_tokens: int
if TYPE_CHECKING:
@@ -102,6 +106,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageOutputTokensDetails(BaseModel):
+ """A detailed breakdown of the output tokens."""
+
reasoning_tokens: int
if TYPE_CHECKING:
@@ -118,13 +124,21 @@ def __getattr__(self, attr: str) -> object: ...
class Usage(BaseModel):
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on
+ batches created after September 7, 2025.
+ """
+
input_tokens: int
input_tokens_details: UsageInputTokensDetails
+ """A detailed breakdown of the input tokens."""
output_tokens: int
output_tokens_details: UsageOutputTokensDetails
+ """A detailed breakdown of the output tokens."""
total_tokens: int
@@ -185,8 +199,14 @@ class BatchCreateResponse(BaseModel):
output_file_id: Optional[str] = None
request_counts: Optional[RequestCounts] = None
+ """The request counts for different statuses within the batch."""
usage: Optional[Usage] = None
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on batches
+ created after September 7, 2025.
+ """
if TYPE_CHECKING:
# Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a
diff --git a/src/llama_stack_client/types/batch_list_params.py b/src/ogx_client/types/batch_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/batch_list_params.py
rename to src/ogx_client/types/batch_list_params.py
diff --git a/src/llama_stack_client/types/batch_list_response.py b/src/ogx_client/types/batch_list_response.py
similarity index 89%
rename from src/llama_stack_client/types/batch_list_response.py
rename to src/ogx_client/types/batch_list_response.py
index 236ec213..9f176456 100644
--- a/src/llama_stack_client/types/batch_list_response.py
+++ b/src/ogx_client/types/batch_list_response.py
@@ -66,6 +66,8 @@ def __getattr__(self, attr: str) -> builtins.object: ...
class RequestCounts(BaseModel):
+ """The request counts for different statuses within the batch."""
+
completed: int
failed: int
@@ -86,6 +88,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageInputTokensDetails(BaseModel):
+ """A detailed breakdown of the input tokens."""
+
cached_tokens: int
if TYPE_CHECKING:
@@ -102,6 +106,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageOutputTokensDetails(BaseModel):
+ """A detailed breakdown of the output tokens."""
+
reasoning_tokens: int
if TYPE_CHECKING:
@@ -118,13 +124,21 @@ def __getattr__(self, attr: str) -> object: ...
class Usage(BaseModel):
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on
+ batches created after September 7, 2025.
+ """
+
input_tokens: int
input_tokens_details: UsageInputTokensDetails
+ """A detailed breakdown of the input tokens."""
output_tokens: int
output_tokens_details: UsageOutputTokensDetails
+ """A detailed breakdown of the output tokens."""
total_tokens: int
@@ -185,8 +199,14 @@ class BatchListResponse(BaseModel):
output_file_id: Optional[str] = None
request_counts: Optional[RequestCounts] = None
+ """The request counts for different statuses within the batch."""
usage: Optional[Usage] = None
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on batches
+ created after September 7, 2025.
+ """
if TYPE_CHECKING:
# Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a
diff --git a/src/llama_stack_client/types/batch_retrieve_response.py b/src/ogx_client/types/batch_retrieve_response.py
similarity index 89%
rename from src/llama_stack_client/types/batch_retrieve_response.py
rename to src/ogx_client/types/batch_retrieve_response.py
index 429d4f60..02859462 100644
--- a/src/llama_stack_client/types/batch_retrieve_response.py
+++ b/src/ogx_client/types/batch_retrieve_response.py
@@ -66,6 +66,8 @@ def __getattr__(self, attr: str) -> builtins.object: ...
class RequestCounts(BaseModel):
+ """The request counts for different statuses within the batch."""
+
completed: int
failed: int
@@ -86,6 +88,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageInputTokensDetails(BaseModel):
+ """A detailed breakdown of the input tokens."""
+
cached_tokens: int
if TYPE_CHECKING:
@@ -102,6 +106,8 @@ def __getattr__(self, attr: str) -> object: ...
class UsageOutputTokensDetails(BaseModel):
+ """A detailed breakdown of the output tokens."""
+
reasoning_tokens: int
if TYPE_CHECKING:
@@ -118,13 +124,21 @@ def __getattr__(self, attr: str) -> object: ...
class Usage(BaseModel):
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on
+ batches created after September 7, 2025.
+ """
+
input_tokens: int
input_tokens_details: UsageInputTokensDetails
+ """A detailed breakdown of the input tokens."""
output_tokens: int
output_tokens_details: UsageOutputTokensDetails
+ """A detailed breakdown of the output tokens."""
total_tokens: int
@@ -185,8 +199,14 @@ class BatchRetrieveResponse(BaseModel):
output_file_id: Optional[str] = None
request_counts: Optional[RequestCounts] = None
+ """The request counts for different statuses within the batch."""
usage: Optional[Usage] = None
+ """
+ Represents token usage details including input tokens, output tokens, a
+ breakdown of output tokens, and the total tokens used. Only populated on batches
+ created after September 7, 2025.
+ """
if TYPE_CHECKING:
# Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a
diff --git a/src/llama_stack_client/types/chat/__init__.py b/src/ogx_client/types/chat/__init__.py
similarity index 100%
rename from src/llama_stack_client/types/chat/__init__.py
rename to src/ogx_client/types/chat/__init__.py
diff --git a/src/llama_stack_client/types/chat/completion_create_params.py b/src/ogx_client/types/chat/completion_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/chat/completion_create_params.py
rename to src/ogx_client/types/chat/completion_create_params.py
diff --git a/src/llama_stack_client/types/chat/completion_create_response.py b/src/ogx_client/types/chat/completion_create_response.py
similarity index 100%
rename from src/llama_stack_client/types/chat/completion_create_response.py
rename to src/ogx_client/types/chat/completion_create_response.py
diff --git a/src/llama_stack_client/types/chat/completion_list_params.py b/src/ogx_client/types/chat/completion_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/chat/completion_list_params.py
rename to src/ogx_client/types/chat/completion_list_params.py
diff --git a/src/llama_stack_client/types/chat/completion_list_response.py b/src/ogx_client/types/chat/completion_list_response.py
similarity index 100%
rename from src/llama_stack_client/types/chat/completion_list_response.py
rename to src/ogx_client/types/chat/completion_list_response.py
diff --git a/src/llama_stack_client/types/chat/completion_retrieve_response.py b/src/ogx_client/types/chat/completion_retrieve_response.py
similarity index 100%
rename from src/llama_stack_client/types/chat/completion_retrieve_response.py
rename to src/ogx_client/types/chat/completion_retrieve_response.py
diff --git a/src/llama_stack_client/types/chat_completion_chunk.py b/src/ogx_client/types/chat_completion_chunk.py
similarity index 100%
rename from src/llama_stack_client/types/chat_completion_chunk.py
rename to src/ogx_client/types/chat_completion_chunk.py
diff --git a/src/ogx_client/types/compacted_response.py b/src/ogx_client/types/compacted_response.py
new file mode 100644
index 00000000..e0e18b5b
--- /dev/null
+++ b/src/ogx_client/types/compacted_response.py
@@ -0,0 +1,578 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+
+__all__ = [
+ "CompactedResponse",
+ "Output",
+ "OutputOpenAIResponseMessageOutput",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusal",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotation",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFileCitation",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationCitation",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputLogprob",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputLogprobTopLogprob",
+ "OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal",
+ "OutputOpenAIResponseOutputMessageWebSearchToolCall",
+ "OutputOpenAIResponseOutputMessageFileSearchToolCall",
+ "OutputOpenAIResponseOutputMessageFileSearchToolCallResult",
+ "OutputOpenAIResponseOutputMessageFunctionToolCall",
+ "OutputOpenAIResponseOutputMessageMcpCall",
+ "OutputOpenAIResponseOutputMessageMcpListTools",
+ "OutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "OutputOpenAIResponseMcpApprovalRequest",
+ "OutputOpenAIResponseOutputMessageReasoningItem",
+ "OutputOpenAIResponseOutputMessageReasoningItemSummary",
+ "OutputOpenAIResponseOutputMessageReasoningItemContent",
+ "OutputOpenAIResponseInputFunctionToolCallOutput",
+ "OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
+ "OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
+ "OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
+ "OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
+ "OutputOpenAIResponseMcpApprovalResponse",
+ "OutputOpenAIResponseCompaction",
+ "Usage",
+ "UsageInputTokensDetails",
+ "UsageOutputTokensDetails",
+]
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
+ BaseModel
+):
+ """Text content for input messages in OpenAI response format."""
+
+ text: str
+
+ type: Optional[Literal["input_text"]] = None
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
+ BaseModel
+):
+ """Image content for input messages in OpenAI response format."""
+
+ detail: Optional[Literal["low", "high", "auto"]] = None
+
+ file_id: Optional[str] = None
+
+ image_url: Optional[str] = None
+
+ type: Optional[Literal["input_image"]] = None
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
+ BaseModel
+):
+ """File content for input messages in OpenAI response format."""
+
+ file_data: Optional[str] = None
+
+ file_id: Optional[str] = None
+
+ file_url: Optional[str] = None
+
+ filename: Optional[str] = None
+
+ type: Optional[Literal["input_file"]] = None
+
+
+OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Annotated[
+ Union[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFileCitation(
+ BaseModel
+):
+ """File citation annotation for referencing specific files in response content."""
+
+ file_id: str
+
+ filename: str
+
+ index: int
+
+ type: Optional[Literal["file_citation"]] = None
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationCitation(
+ BaseModel
+):
+ """URL citation annotation for referencing external web resources."""
+
+ end_index: int
+
+ start_index: int
+
+ title: str
+
+ url: str
+
+ type: Optional[Literal["url_citation"]] = None
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation(
+ BaseModel
+):
+ """Container file citation annotation referencing a file within a container."""
+
+ container_id: str
+
+ end_index: int
+
+ file_id: str
+
+ filename: str
+
+ start_index: int
+
+ type: Optional[Literal["container_file_citation"]] = None
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath(
+ BaseModel
+):
+ """File path annotation referencing a generated file in response content."""
+
+ file_id: str
+
+ index: int
+
+ type: Optional[Literal["file_path"]] = None
+
+
+OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotation: TypeAlias = Annotated[
+ Union[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFileCitation,
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationCitation,
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationContainerFileCitation,
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotationOpenAIResponseAnnotationFilePath,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputLogprobTopLogprob(
+ BaseModel
+):
+ """
+ The top log probability for a token from an OpenAI-compatible chat completion response.
+ """
+
+ token: str
+ """The token."""
+
+ logprob: float
+ """The log probability of the token."""
+
+ bytes: Optional[List[int]] = None
+ """The bytes for the token."""
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputLogprob(
+ BaseModel
+):
+ """
+ The log probability for a token from an OpenAI-compatible chat completion response.
+ """
+
+ token: str
+ """The token."""
+
+ logprob: float
+ """The log probability of the token."""
+
+ bytes: Optional[List[int]] = None
+ """The bytes for the token."""
+
+ top_logprobs: Optional[
+ List[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputLogprobTopLogprob
+ ]
+ ] = None
+ """The top log probabilities for the token."""
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput(
+ BaseModel
+):
+ """Text content within an output message of an OpenAI response."""
+
+ text: str
+
+ annotations: Optional[
+ List[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputAnnotation
+ ]
+ ] = None
+
+ logprobs: Optional[
+ List[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutputLogprob
+ ]
+ ] = None
+
+ type: Optional[Literal["output_text"]] = None
+
+
+class OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal(
+ BaseModel
+):
+ """Refusal content within a streamed response part."""
+
+ refusal: str
+
+ type: Optional[Literal["refusal"]] = None
+
+
+OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusal: TypeAlias = Annotated[
+ Union[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextOutput,
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class OutputOpenAIResponseMessageOutput(BaseModel):
+ """
+ Corresponds to the various Message types in the Responses API.
+ They are all under one type because the Responses API gives them all
+ the same "type" value, and there is no way to tell them apart in certain
+ scenarios.
+ """
+
+ content: Union[
+ str,
+ List[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
+ ],
+ List[
+ OutputOpenAIResponseMessageOutputContentListOpenAIResponseOutputMessageContentOutputTextOutputOpenAIResponseContentPartRefusal
+ ],
+ ]
+
+ role: Literal["system", "developer", "user", "assistant"]
+
+ id: Optional[str] = None
+
+ status: Optional[str] = None
+
+ type: Optional[Literal["message"]] = None
+
+
+class OutputOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
+ """Web search tool call output message for OpenAI responses."""
+
+ id: str
+
+ status: str
+
+ type: Optional[Literal["web_search_call"]] = None
+
+
+class OutputOpenAIResponseOutputMessageFileSearchToolCallResult(BaseModel):
+ """Search results returned by the file search operation."""
+
+ attributes: Dict[str, object]
+
+ file_id: str
+
+ filename: str
+
+ score: float
+
+ text: str
+
+
+class OutputOpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
+ """File search tool call output message for OpenAI responses."""
+
+ id: str
+
+ queries: List[str]
+
+ status: str
+
+ results: Optional[List[OutputOpenAIResponseOutputMessageFileSearchToolCallResult]] = None
+
+ type: Optional[Literal["file_search_call"]] = None
+
+
+class OutputOpenAIResponseOutputMessageFunctionToolCall(BaseModel):
+ """Function tool call output message for OpenAI responses."""
+
+ arguments: str
+
+ call_id: str
+
+ name: str
+
+ id: Optional[str] = None
+
+ status: Optional[str] = None
+
+ type: Optional[Literal["function_call"]] = None
+
+
+class OutputOpenAIResponseOutputMessageMcpCall(BaseModel):
+ """Model Context Protocol (MCP) call output message for OpenAI responses."""
+
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ error: Optional[str] = None
+
+ output: Optional[str] = None
+
+ type: Optional[Literal["mcp_call"]] = None
+
+
+class OutputOpenAIResponseOutputMessageMcpListToolsTool(BaseModel):
+ """Tool definition returned by MCP list tools operation."""
+
+ input_schema: Dict[str, object]
+
+ name: str
+
+ description: Optional[str] = None
+
+
+class OutputOpenAIResponseOutputMessageMcpListTools(BaseModel):
+ """MCP list tools output message containing available tools from an MCP server."""
+
+ id: str
+
+ server_label: str
+
+ tools: List[OutputOpenAIResponseOutputMessageMcpListToolsTool]
+
+ type: Optional[Literal["mcp_list_tools"]] = None
+
+
+class OutputOpenAIResponseMcpApprovalRequest(BaseModel):
+ """A request for human approval of a tool invocation."""
+
+ id: str
+
+ arguments: str
+
+ name: str
+
+ server_label: str
+
+ type: Optional[Literal["mcp_approval_request"]] = None
+
+
+class OutputOpenAIResponseOutputMessageReasoningItemSummary(BaseModel):
+ """A summary of reasoning output from the model."""
+
+ text: str
+ """The summary text of the reasoning output."""
+
+ type: Optional[Literal["summary_text"]] = None
+ """The type identifier, always 'summary_text'."""
+
+
+class OutputOpenAIResponseOutputMessageReasoningItemContent(BaseModel):
+ """Reasoning text from the model."""
+
+ text: str
+ """The reasoning text content from the model."""
+
+ type: Optional[Literal["reasoning_text"]] = None
+ """The type identifier, always 'reasoning_text'."""
+
+
+class OutputOpenAIResponseOutputMessageReasoningItem(BaseModel):
+ """Reasoning output from the model, representing the model's thinking process."""
+
+ id: str
+ """Unique identifier for the reasoning output item."""
+
+ summary: List[OutputOpenAIResponseOutputMessageReasoningItemSummary]
+ """Summary of the reasoning output."""
+
+ content: Optional[List[OutputOpenAIResponseOutputMessageReasoningItemContent]] = None
+ """The reasoning content from the model."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the reasoning output."""
+
+ type: Optional[Literal["reasoning"]] = None
+ """The type identifier, always 'reasoning'."""
+
+
+class OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
+ BaseModel
+):
+ """Text content for input messages in OpenAI response format."""
+
+ text: str
+
+ type: Optional[Literal["input_text"]] = None
+
+
+class OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
+ BaseModel
+):
+ """Image content for input messages in OpenAI response format."""
+
+ detail: Optional[Literal["low", "high", "auto"]] = None
+
+ file_id: Optional[str] = None
+
+ image_url: Optional[str] = None
+
+ type: Optional[Literal["input_image"]] = None
+
+
+class OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
+ BaseModel
+):
+ """File content for input messages in OpenAI response format."""
+
+ file_data: Optional[str] = None
+
+ file_id: Optional[str] = None
+
+ file_url: Optional[str] = None
+
+ filename: Optional[str] = None
+
+ type: Optional[Literal["input_file"]] = None
+
+
+OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Annotated[
+ Union[
+ OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
+ OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
+ OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class OutputOpenAIResponseInputFunctionToolCallOutput(BaseModel):
+ """
+ This represents the output of a function call that gets passed back to the model.
+ """
+
+ call_id: str
+
+ output: Union[
+ str,
+ List[
+ OutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
+ ],
+ ]
+
+ id: Optional[str] = None
+
+ status: Optional[str] = None
+
+ type: Optional[Literal["function_call_output"]] = None
+
+
+class OutputOpenAIResponseMcpApprovalResponse(BaseModel):
+ """A response to an MCP approval request."""
+
+ approval_request_id: str
+
+ approve: bool
+
+ id: Optional[str] = None
+
+ reason: Optional[str] = None
+
+ type: Optional[Literal["mcp_approval_response"]] = None
+
+
+class OutputOpenAIResponseCompaction(BaseModel):
+ """A compaction item that summarizes prior conversation context."""
+
+ encrypted_content: str
+
+ id: Optional[str] = None
+
+ type: Optional[Literal["compaction"]] = None
+
+
+Output: TypeAlias = Union[
+ OutputOpenAIResponseMessageOutput,
+ OutputOpenAIResponseOutputMessageWebSearchToolCall,
+ OutputOpenAIResponseOutputMessageFileSearchToolCall,
+ OutputOpenAIResponseOutputMessageFunctionToolCall,
+ OutputOpenAIResponseOutputMessageMcpCall,
+ OutputOpenAIResponseOutputMessageMcpListTools,
+ OutputOpenAIResponseMcpApprovalRequest,
+ OutputOpenAIResponseOutputMessageReasoningItem,
+ OutputOpenAIResponseInputFunctionToolCallOutput,
+ OutputOpenAIResponseMcpApprovalResponse,
+ OutputOpenAIResponseCompaction,
+]
+
+
+class UsageInputTokensDetails(BaseModel):
+ """Token details for input tokens in OpenAI response usage."""
+
+ cached_tokens: int
+
+
+class UsageOutputTokensDetails(BaseModel):
+ """Token details for output tokens in OpenAI response usage."""
+
+ reasoning_tokens: int
+
+
+class Usage(BaseModel):
+ """Usage information for OpenAI response."""
+
+ input_tokens: int
+
+ input_tokens_details: UsageInputTokensDetails
+ """Token details for input tokens in OpenAI response usage."""
+
+ output_tokens: int
+
+ output_tokens_details: UsageOutputTokensDetails
+ """Token details for output tokens in OpenAI response usage."""
+
+ total_tokens: int
+
+
+class CompactedResponse(BaseModel):
+ """Response from compacting a conversation."""
+
+ id: str
+
+ created_at: int
+
+ output: List[Output]
+
+ usage: Usage
+ """Usage information for OpenAI response."""
+
+ object: Optional[Literal["response.compaction"]] = None
diff --git a/src/llama_stack_client/types/completion_create_params.py b/src/ogx_client/types/completion_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/completion_create_params.py
rename to src/ogx_client/types/completion_create_params.py
diff --git a/src/llama_stack_client/types/completion_create_response.py b/src/ogx_client/types/completion_create_response.py
similarity index 100%
rename from src/llama_stack_client/types/completion_create_response.py
rename to src/ogx_client/types/completion_create_response.py
diff --git a/src/llama_stack_client/types/conversation_create_params.py b/src/ogx_client/types/conversation_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/conversation_create_params.py
rename to src/ogx_client/types/conversation_create_params.py
diff --git a/src/llama_stack_client/types/conversation_delete_response.py b/src/ogx_client/types/conversation_delete_response.py
similarity index 100%
rename from src/llama_stack_client/types/conversation_delete_response.py
rename to src/ogx_client/types/conversation_delete_response.py
diff --git a/src/llama_stack_client/types/conversation_object.py b/src/ogx_client/types/conversation_object.py
similarity index 100%
rename from src/llama_stack_client/types/conversation_object.py
rename to src/ogx_client/types/conversation_object.py
diff --git a/src/llama_stack_client/types/conversation_update_params.py b/src/ogx_client/types/conversation_update_params.py
similarity index 100%
rename from src/llama_stack_client/types/conversation_update_params.py
rename to src/ogx_client/types/conversation_update_params.py
diff --git a/src/llama_stack_client/types/conversations/__init__.py b/src/ogx_client/types/conversations/__init__.py
similarity index 100%
rename from src/llama_stack_client/types/conversations/__init__.py
rename to src/ogx_client/types/conversations/__init__.py
diff --git a/src/llama_stack_client/types/conversations/item_create_params.py b/src/ogx_client/types/conversations/item_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/conversations/item_create_params.py
rename to src/ogx_client/types/conversations/item_create_params.py
diff --git a/src/llama_stack_client/types/conversations/item_create_response.py b/src/ogx_client/types/conversations/item_create_response.py
similarity index 100%
rename from src/llama_stack_client/types/conversations/item_create_response.py
rename to src/ogx_client/types/conversations/item_create_response.py
diff --git a/src/llama_stack_client/types/conversations/item_delete_response.py b/src/ogx_client/types/conversations/item_delete_response.py
similarity index 100%
rename from src/llama_stack_client/types/conversations/item_delete_response.py
rename to src/ogx_client/types/conversations/item_delete_response.py
diff --git a/src/llama_stack_client/types/conversations/item_get_response.py b/src/ogx_client/types/conversations/item_get_response.py
similarity index 100%
rename from src/llama_stack_client/types/conversations/item_get_response.py
rename to src/ogx_client/types/conversations/item_get_response.py
diff --git a/src/llama_stack_client/types/conversations/item_list_params.py b/src/ogx_client/types/conversations/item_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/conversations/item_list_params.py
rename to src/ogx_client/types/conversations/item_list_params.py
diff --git a/src/llama_stack_client/types/conversations/item_list_response.py b/src/ogx_client/types/conversations/item_list_response.py
similarity index 100%
rename from src/llama_stack_client/types/conversations/item_list_response.py
rename to src/ogx_client/types/conversations/item_list_response.py
diff --git a/src/llama_stack_client/types/create_embeddings_response.py b/src/ogx_client/types/create_embeddings_response.py
similarity index 100%
rename from src/llama_stack_client/types/create_embeddings_response.py
rename to src/ogx_client/types/create_embeddings_response.py
diff --git a/src/llama_stack_client/types/create_response.py b/src/ogx_client/types/create_response.py
similarity index 100%
rename from src/llama_stack_client/types/create_response.py
rename to src/ogx_client/types/create_response.py
diff --git a/src/llama_stack_client/types/delete_file_response.py b/src/ogx_client/types/delete_file_response.py
similarity index 100%
rename from src/llama_stack_client/types/delete_file_response.py
rename to src/ogx_client/types/delete_file_response.py
diff --git a/src/llama_stack_client/types/embedding_create_params.py b/src/ogx_client/types/embedding_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/embedding_create_params.py
rename to src/ogx_client/types/embedding_create_params.py
diff --git a/src/llama_stack_client/types/file.py b/src/ogx_client/types/file.py
similarity index 79%
rename from src/llama_stack_client/types/file.py
rename to src/ogx_client/types/file.py
index e79a7ea8..ceba18af 100644
--- a/src/llama_stack_client/types/file.py
+++ b/src/ogx_client/types/file.py
@@ -32,6 +32,16 @@ class File(BaseModel):
purpose: Literal["assistants", "batch"]
"""The intended purpose of the file."""
+ status: Literal["uploaded", "processed", "error"]
+ """Deprecated. The current status of the file."""
+
+ status_details: str
+ """Deprecated.
+
+ For details on why a fine-tuning training file failed validation, see the error
+ field on fine_tuning.job.
+ """
+
expires_at: Optional[int] = None
"""The Unix timestamp (in seconds) for when the file expires."""
diff --git a/src/llama_stack_client/types/file_content_response.py b/src/ogx_client/types/file_content_response.py
similarity index 100%
rename from src/llama_stack_client/types/file_content_response.py
rename to src/ogx_client/types/file_content_response.py
diff --git a/src/llama_stack_client/types/file_create_params.py b/src/ogx_client/types/file_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/file_create_params.py
rename to src/ogx_client/types/file_create_params.py
diff --git a/src/llama_stack_client/types/file_list_params.py b/src/ogx_client/types/file_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/file_list_params.py
rename to src/ogx_client/types/file_list_params.py
diff --git a/src/llama_stack_client/types/list_files_response.py b/src/ogx_client/types/list_files_response.py
similarity index 100%
rename from src/llama_stack_client/types/list_files_response.py
rename to src/ogx_client/types/list_files_response.py
diff --git a/src/llama_stack_client/types/list_models_response.py b/src/ogx_client/types/list_models_response.py
similarity index 74%
rename from src/llama_stack_client/types/list_models_response.py
rename to src/ogx_client/types/list_models_response.py
index 4bd5980e..d82c3782 100644
--- a/src/llama_stack_client/types/list_models_response.py
+++ b/src/ogx_client/types/list_models_response.py
@@ -6,8 +6,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .model import Model
from .._models import BaseModel
-from .model_list_response import ModelListResponse
__all__ = ["ListModelsResponse"]
@@ -15,5 +18,7 @@
class ListModelsResponse(BaseModel):
"""Response containing a list of OpenAI model objects."""
- data: ModelListResponse
+ data: List[Model]
"""List of OpenAI model objects."""
+
+ object: Optional[Literal["list"]] = None
diff --git a/src/llama_stack_client/types/list_prompts_response.py b/src/ogx_client/types/list_prompts_response.py
similarity index 100%
rename from src/llama_stack_client/types/list_prompts_response.py
rename to src/ogx_client/types/list_prompts_response.py
diff --git a/src/llama_stack_client/types/list_shields_response.py b/src/ogx_client/types/list_shields_response.py
similarity index 100%
rename from src/llama_stack_client/types/list_shields_response.py
rename to src/ogx_client/types/list_shields_response.py
diff --git a/src/llama_stack_client/types/list_vector_stores_response.py b/src/ogx_client/types/list_vector_stores_response.py
similarity index 80%
rename from src/llama_stack_client/types/list_vector_stores_response.py
rename to src/ogx_client/types/list_vector_stores_response.py
index 590321e7..c4a544f0 100644
--- a/src/llama_stack_client/types/list_vector_stores_response.py
+++ b/src/ogx_client/types/list_vector_stores_response.py
@@ -7,6 +7,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
+from typing_extensions import Literal
from .._models import BaseModel
from .vector_store import VectorStore
@@ -19,10 +20,10 @@ class ListVectorStoresResponse(BaseModel):
data: List[VectorStore]
- first_id: Optional[str] = None
+ first_id: str
- has_more: Optional[bool] = None
+ has_more: bool
- last_id: Optional[str] = None
+ last_id: str
- object: Optional[str] = None
+ object: Optional[Literal["list"]] = None
diff --git a/src/llama_stack_client/types/model.py b/src/ogx_client/types/model.py
similarity index 94%
rename from src/llama_stack_client/types/model.py
rename to src/ogx_client/types/model.py
index 85f6ef73..8d5a8eb8 100644
--- a/src/llama_stack_client/types/model.py
+++ b/src/ogx_client/types/model.py
@@ -21,7 +21,7 @@ class Model(BaseModel):
:object: The object type, which will be "model"
:created: The Unix timestamp in seconds when the model was created
:owned_by: The owner of the model
- :custom_metadata: Llama Stack-specific metadata including model_type, provider info, and additional metadata
+ :custom_metadata: OGX-specific metadata including model_type, provider info, and additional metadata
"""
id: str
diff --git a/src/llama_stack_client/types/model_retrieve_response.py b/src/ogx_client/types/model_retrieve_response.py
similarity index 67%
rename from src/llama_stack_client/types/model_retrieve_response.py
rename to src/ogx_client/types/model_retrieve_response.py
index 756dc8ce..5594305c 100644
--- a/src/llama_stack_client/types/model_retrieve_response.py
+++ b/src/ogx_client/types/model_retrieve_response.py
@@ -6,6 +6,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+import builtins
from typing import Dict, Optional
from typing_extensions import Literal
@@ -17,19 +18,28 @@
class ModelRetrieveResponse(BaseModel):
- """A model resource representing an AI model registered in Llama Stack."""
+ """A model resource representing an AI model registered in OGX."""
+
+ id: str
+ """The model identifier (OpenAI-compatible alias for identifier)."""
identifier: str
- """Unique identifier for this resource in llama stack"""
+ """Unique identifier for this resource in ogx"""
+
+ object: Literal["model"]
+ """The object type, always 'model'."""
provider_id: str
"""ID of the provider that owns this resource"""
- metadata: Optional[Dict[str, object]] = None
+ created: Optional[int] = None
+ """The Unix timestamp in seconds when the model was created."""
+
+ metadata: Optional[Dict[str, builtins.object]] = None
"""Any additional metadata for this model"""
api_model_type: Optional[Literal["llm", "embedding", "rerank"]] = FieldInfo(alias="model_type", default=None)
- """Enumeration of supported model types in Llama Stack."""
+ """Enumeration of supported model types in OGX."""
api_model_validation: Optional[bool] = FieldInfo(alias="model_validation", default=None)
"""Enable model availability check during registration.
@@ -38,6 +48,9 @@ class ModelRetrieveResponse(BaseModel):
during provider refresh.
"""
+ owned_by: Optional[str] = None
+ """The owner of the model."""
+
provider_resource_id: Optional[str] = None
"""Unique identifier for this resource in the provider"""
diff --git a/src/llama_stack_client/types/alpha/eval/__init__.py b/src/ogx_client/types/models/__init__.py
similarity index 100%
rename from src/llama_stack_client/types/alpha/eval/__init__.py
rename to src/ogx_client/types/models/__init__.py
diff --git a/src/llama_stack_client/types/moderation_create_params.py b/src/ogx_client/types/moderation_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/moderation_create_params.py
rename to src/ogx_client/types/moderation_create_params.py
diff --git a/src/llama_stack_client/types/prompt.py b/src/ogx_client/types/prompt.py
similarity index 96%
rename from src/llama_stack_client/types/prompt.py
rename to src/ogx_client/types/prompt.py
index b3da67f8..e56d15e3 100644
--- a/src/llama_stack_client/types/prompt.py
+++ b/src/ogx_client/types/prompt.py
@@ -15,7 +15,7 @@
class Prompt(BaseModel):
"""
- A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack.
+ A prompt resource representing a stored OpenAI Compatible prompt template in OGX.
"""
prompt_id: str
diff --git a/src/llama_stack_client/types/prompt_create_params.py b/src/ogx_client/types/prompt_create_params.py
similarity index 100%
rename from src/llama_stack_client/types/prompt_create_params.py
rename to src/ogx_client/types/prompt_create_params.py
diff --git a/src/llama_stack_client/types/prompt_list_response.py b/src/ogx_client/types/prompt_list_response.py
similarity index 100%
rename from src/llama_stack_client/types/prompt_list_response.py
rename to src/ogx_client/types/prompt_list_response.py
diff --git a/src/llama_stack_client/types/prompt_retrieve_params.py b/src/ogx_client/types/prompt_retrieve_params.py
similarity index 100%
rename from src/llama_stack_client/types/prompt_retrieve_params.py
rename to src/ogx_client/types/prompt_retrieve_params.py
diff --git a/src/llama_stack_client/types/prompt_set_default_version_params.py b/src/ogx_client/types/prompt_set_default_version_params.py
similarity index 100%
rename from src/llama_stack_client/types/prompt_set_default_version_params.py
rename to src/ogx_client/types/prompt_set_default_version_params.py
diff --git a/src/llama_stack_client/types/prompt_update_params.py b/src/ogx_client/types/prompt_update_params.py
similarity index 100%
rename from src/llama_stack_client/types/prompt_update_params.py
rename to src/ogx_client/types/prompt_update_params.py
diff --git a/src/llama_stack_client/types/models/__init__.py b/src/ogx_client/types/prompts/__init__.py
similarity index 100%
rename from src/llama_stack_client/types/models/__init__.py
rename to src/ogx_client/types/prompts/__init__.py
diff --git a/src/llama_stack_client/types/provider_list_response.py b/src/ogx_client/types/provider_list_response.py
similarity index 100%
rename from src/llama_stack_client/types/provider_list_response.py
rename to src/ogx_client/types/provider_list_response.py
diff --git a/src/llama_stack_client/types/query_chunks_response.py b/src/ogx_client/types/query_chunks_response.py
similarity index 100%
rename from src/llama_stack_client/types/query_chunks_response.py
rename to src/ogx_client/types/query_chunks_response.py
diff --git a/src/ogx_client/types/response_compact_params.py b/src/ogx_client/types/response_compact_params.py
new file mode 100644
index 00000000..b6e556b9
--- /dev/null
+++ b/src/ogx_client/types/response_compact_params.py
@@ -0,0 +1,804 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = [
+ "ResponseCompactParams",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageWebSearchToolCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCallResult",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFunctionToolCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListTools",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalRequest",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItem",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemSummary",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemContent",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseCompaction",
+ "Reasoning",
+ "Text",
+ "TextFormat",
+ "Tool",
+ "ToolOpenAIResponseInputToolWebSearch",
+ "ToolOpenAIResponseInputToolFileSearch",
+ "ToolOpenAIResponseInputToolFileSearchRankingOptions",
+ "ToolOpenAIResponseInputToolFunction",
+ "ToolOpenAIResponseInputToolMcp",
+ "ToolOpenAIResponseInputToolMcpAllowedTools",
+ "ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter",
+ "ToolOpenAIResponseInputToolMcpRequireApproval",
+ "ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter",
+]
+
+
+class ResponseCompactParams(TypedDict, total=False):
+ model: Required[str]
+ """The model to use for generating the compacted summary."""
+
+ input: Union[str, Iterable[InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput], None]
+ """Input message(s) to compact."""
+
+ instructions: Optional[str]
+ """Instructions to guide the compaction."""
+
+ parallel_tool_calls: Optional[bool]
+ """Whether to enable parallel tool calls.
+
+ Accepted for compatibility but not used during compaction.
+ """
+
+ previous_response_id: Optional[str]
+ """ID of a previous response whose history to compact."""
+
+ prompt_cache_key: Optional[str]
+ """A key to use when reading from or writing to the prompt cache."""
+
+ reasoning: Optional[Reasoning]
+ """Configuration for reasoning effort in OpenAI responses.
+
+ Controls how much reasoning the model performs before generating a response.
+ """
+
+ text: Optional[Text]
+ """Text response configuration for OpenAI responses."""
+
+ tools: Optional[Iterable[Tool]]
+ """List of tools available to the model.
+
+ Accepted for compatibility but not used during compaction.
+ """
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
+ TypedDict, total=False
+):
+ """Text content for input messages in OpenAI response format."""
+
+ text: Required[str]
+
+ type: Literal["input_text"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
+ TypedDict, total=False
+):
+ """Image content for input messages in OpenAI response format."""
+
+ detail: Literal["low", "high", "auto"]
+
+ file_id: Optional[str]
+
+ image_url: Optional[str]
+
+ type: Literal["input_image"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
+ TypedDict, total=False
+):
+ """File content for input messages in OpenAI response format."""
+
+ file_data: Optional[str]
+
+ file_id: Optional[str]
+
+ file_url: Optional[str]
+
+ filename: Optional[str]
+
+ type: Literal["input_file"]
+
+
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
+]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation(
+ TypedDict, total=False
+):
+ """File citation annotation for referencing specific files in response content."""
+
+ file_id: Required[str]
+
+ filename: Required[str]
+
+ index: Required[int]
+
+ type: Literal["file_citation"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation(
+ TypedDict, total=False
+):
+ """URL citation annotation for referencing external web resources."""
+
+ end_index: Required[int]
+
+ start_index: Required[int]
+
+ title: Required[str]
+
+ url: Required[str]
+
+ type: Literal["url_citation"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation(
+ TypedDict, total=False
+):
+ """Container file citation annotation referencing a file within a container."""
+
+ container_id: Required[str]
+
+ end_index: Required[int]
+
+ file_id: Required[str]
+
+ filename: Required[str]
+
+ start_index: Required[int]
+
+ type: Literal["container_file_citation"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath(
+ TypedDict, total=False
+):
+ """File path annotation referencing a generated file in response content."""
+
+ file_id: Required[str]
+
+ index: Required[int]
+
+ type: Literal["file_path"]
+
+
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath,
+]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob(
+ TypedDict, total=False
+):
+ """
+ The top log probability for a token from an OpenAI-compatible chat completion response.
+ """
+
+ token: Required[str]
+ """The token."""
+
+ logprob: Required[float]
+ """The log probability of the token."""
+
+ bytes: Optional[Iterable[int]]
+ """The bytes for the token."""
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob(
+ TypedDict, total=False
+):
+ """
+ The log probability for a token from an OpenAI-compatible chat completion response.
+ """
+
+ token: Required[str]
+ """The token."""
+
+ logprob: Required[float]
+ """The log probability of the token."""
+
+ bytes: Optional[Iterable[int]]
+ """The bytes for the token."""
+
+ top_logprobs: Optional[
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob
+ ]
+ ]
+ """The top log probabilities for the token."""
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput(
+ TypedDict, total=False
+):
+ """Text content within an output message of an OpenAI response."""
+
+ text: Required[str]
+
+ annotations: Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation
+ ]
+
+ logprobs: Optional[
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob
+ ]
+ ]
+
+ type: Literal["output_text"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal(
+ TypedDict, total=False
+):
+ """Refusal content within a streamed response part."""
+
+ refusal: Required[str]
+
+ type: Literal["refusal"]
+
+
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal,
+]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInput(
+ TypedDict, total=False
+):
+ """
+ Corresponds to the various Message types in the Responses API.
+ They are all under one type because the Responses API gives them all
+ the same "type" value, and there is no way to tell them apart in certain
+ scenarios.
+ """
+
+ content: Required[
+ Union[
+ str,
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
+ ],
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal
+ ],
+ ]
+ ]
+
+ role: Required[Literal["system", "developer", "user", "assistant"]]
+
+ id: Optional[str]
+
+ status: Optional[str]
+
+ type: Literal["message"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageWebSearchToolCall(
+ TypedDict, total=False
+):
+ """Web search tool call output message for OpenAI responses."""
+
+ id: Required[str]
+
+ status: Required[str]
+
+ type: Literal["web_search_call"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCallResult(
+ TypedDict, total=False
+):
+ """Search results returned by the file search operation."""
+
+ attributes: Required[Dict[str, object]]
+
+ file_id: Required[str]
+
+ filename: Required[str]
+
+ score: Required[float]
+
+ text: Required[str]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCall(
+ TypedDict, total=False
+):
+ """File search tool call output message for OpenAI responses."""
+
+ id: Required[str]
+
+ queries: Required[SequenceNotStr[str]]
+
+ status: Required[str]
+
+ results: Optional[
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCallResult
+ ]
+ ]
+
+ type: Literal["file_search_call"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFunctionToolCall(
+ TypedDict, total=False
+):
+ """Function tool call output message for OpenAI responses."""
+
+ arguments: Required[str]
+
+ call_id: Required[str]
+
+ name: Required[str]
+
+ id: Optional[str]
+
+ status: Optional[str]
+
+ type: Literal["function_call"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpCall(
+ TypedDict, total=False
+):
+ """Model Context Protocol (MCP) call output message for OpenAI responses."""
+
+ id: Required[str]
+
+ arguments: Required[str]
+
+ name: Required[str]
+
+ server_label: Required[str]
+
+ error: Optional[str]
+
+ output: Optional[str]
+
+ type: Literal["mcp_call"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListToolsTool(
+ TypedDict, total=False
+):
+ """Tool definition returned by MCP list tools operation."""
+
+ input_schema: Required[Dict[str, object]]
+
+ name: Required[str]
+
+ description: Optional[str]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListTools(
+ TypedDict, total=False
+):
+ """MCP list tools output message containing available tools from an MCP server."""
+
+ id: Required[str]
+
+ server_label: Required[str]
+
+ tools: Required[
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListToolsTool
+ ]
+ ]
+
+ type: Literal["mcp_list_tools"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalRequest(
+ TypedDict, total=False
+):
+ """A request for human approval of a tool invocation."""
+
+ id: Required[str]
+
+ arguments: Required[str]
+
+ name: Required[str]
+
+ server_label: Required[str]
+
+ type: Literal["mcp_approval_request"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemSummary(
+ TypedDict, total=False
+):
+ """A summary of reasoning output from the model."""
+
+ text: Required[str]
+ """The summary text of the reasoning output."""
+
+ type: Literal["summary_text"]
+ """The type identifier, always 'summary_text'."""
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemContent(
+ TypedDict, total=False
+):
+ """Reasoning text from the model."""
+
+ text: Required[str]
+ """The reasoning text content from the model."""
+
+ type: Literal["reasoning_text"]
+ """The type identifier, always 'reasoning_text'."""
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItem(
+ TypedDict, total=False
+):
+ """Reasoning output from the model, representing the model's thinking process."""
+
+ id: Required[str]
+ """Unique identifier for the reasoning output item."""
+
+ summary: Required[
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemSummary
+ ]
+ ]
+ """Summary of the reasoning output."""
+
+ content: Optional[
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemContent
+ ]
+ ]
+ """The reasoning content from the model."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]]
+ """The status of the reasoning output."""
+
+ type: Literal["reasoning"]
+ """The type identifier, always 'reasoning'."""
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
+ TypedDict, total=False
+):
+ """Text content for input messages in OpenAI response format."""
+
+ text: Required[str]
+
+ type: Literal["input_text"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
+ TypedDict, total=False
+):
+ """Image content for input messages in OpenAI response format."""
+
+ detail: Literal["low", "high", "auto"]
+
+ file_id: Optional[str]
+
+ image_url: Optional[str]
+
+ type: Literal["input_image"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
+ TypedDict, total=False
+):
+ """File content for input messages in OpenAI response format."""
+
+ file_data: Optional[str]
+
+ file_id: Optional[str]
+
+ file_url: Optional[str]
+
+ filename: Optional[str]
+
+ type: Literal["input_file"]
+
+
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
+]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutput(
+ TypedDict, total=False
+):
+ """
+ This represents the output of a function call that gets passed back to the model.
+ """
+
+ call_id: Required[str]
+
+ output: Required[
+ Union[
+ str,
+ Iterable[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
+ ],
+ ]
+ ]
+
+ id: Optional[str]
+
+ status: Optional[str]
+
+ type: Literal["function_call_output"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse(
+ TypedDict, total=False
+):
+ """A response to an MCP approval request."""
+
+ approval_request_id: Required[str]
+
+ approve: Required[bool]
+
+ id: Optional[str]
+
+ reason: Optional[str]
+
+ type: Literal["mcp_approval_response"]
+
+
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseCompaction(
+ TypedDict, total=False
+):
+ """A compaction item that summarizes prior conversation context."""
+
+ encrypted_content: Required[str]
+
+ id: Optional[str]
+
+ type: Literal["compaction"]
+
+
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInput,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageWebSearchToolCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFunctionToolCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListTools,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalRequest,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItem,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutput,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseCompaction,
+]
+
+
+class Reasoning(TypedDict, total=False):
+ """Configuration for reasoning effort in OpenAI responses.
+
+ Controls how much reasoning the model performs before generating a response.
+ """
+
+ effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]]
+
+ summary: Optional[Literal["auto", "concise", "detailed"]]
+ """Summary mode for reasoning output. One of 'auto', 'concise', or 'detailed'."""
+
+
+class TextFormat(TypedDict, total=False):
+ """Configuration for Responses API text format."""
+
+ description: Optional[str]
+
+ name: Optional[str]
+
+ schema: Optional[Dict[str, object]]
+
+ strict: Optional[bool]
+
+ type: Literal["text", "json_schema", "json_object"]
+
+
+class Text(TypedDict, total=False):
+ """Text response configuration for OpenAI responses."""
+
+ format: Optional[TextFormat]
+ """Configuration for Responses API text format."""
+
+ verbosity: Optional[Literal["low", "medium", "high"]]
+
+
+class ToolOpenAIResponseInputToolWebSearch(TypedDict, total=False):
+ """Web search tool configuration for OpenAI response inputs."""
+
+ search_context_size: Optional[str]
+
+ type: Literal["web_search", "web_search_preview", "web_search_preview_2025_03_11", "web_search_2025_08_26"]
+
+
+class ToolOpenAIResponseInputToolFileSearchRankingOptions(TypedDict, total=False):
+ """Options for ranking and filtering search results.
+
+ This class configures how search results are ranked and filtered. You can use algorithm-based
+ rerankers (weighted, RRF) or neural rerankers. Defaults from VectorStoresConfig are
+ used when parameters are not provided.
+
+ Examples:
+ # Weighted ranker with custom alpha
+ SearchRankingOptions(ranker="weighted", alpha=0.7)
+
+ # RRF ranker with custom impact factor
+ SearchRankingOptions(ranker="rrf", impact_factor=50.0)
+
+ # Use config defaults (just specify ranker type)
+ SearchRankingOptions(ranker="weighted") # Uses alpha from VectorStoresConfig
+
+ # Score threshold filtering
+ SearchRankingOptions(ranker="weighted", score_threshold=0.5)
+ """
+
+ alpha: Optional[float]
+ """Weight factor for weighted ranker"""
+
+ impact_factor: Optional[float]
+ """Impact factor for RRF algorithm"""
+
+ model: Optional[str]
+ """Model identifier for neural reranker"""
+
+ ranker: Optional[str]
+
+ score_threshold: Optional[float]
+
+ weights: Optional[Dict[str, float]]
+ """Weights for combining vector, keyword, and neural scores.
+
+ Keys: 'vector', 'keyword', 'neural'
+ """
+
+
+class ToolOpenAIResponseInputToolFileSearch(TypedDict, total=False):
+ """File search tool configuration for OpenAI response inputs."""
+
+ vector_store_ids: Required[SequenceNotStr[str]]
+
+ filters: Optional[Dict[str, object]]
+
+ max_num_results: Optional[int]
+
+ ranking_options: Optional[ToolOpenAIResponseInputToolFileSearchRankingOptions]
+ """Options for ranking and filtering search results.
+
+ This class configures how search results are ranked and filtered. You can use
+ algorithm-based rerankers (weighted, RRF) or neural rerankers. Defaults from
+ VectorStoresConfig are used when parameters are not provided.
+
+ Examples: # Weighted ranker with custom alpha
+ SearchRankingOptions(ranker="weighted", alpha=0.7)
+
+ # RRF ranker with custom impact factor
+ SearchRankingOptions(ranker="rrf", impact_factor=50.0)
+
+ # Use config defaults (just specify ranker type)
+ SearchRankingOptions(ranker="weighted") # Uses alpha from VectorStoresConfig
+
+ # Score threshold filtering
+ SearchRankingOptions(ranker="weighted", score_threshold=0.5)
+ """
+
+ type: Literal["file_search"]
+
+
+class ToolOpenAIResponseInputToolFunction(TypedDict, total=False):
+ """Function tool configuration for OpenAI response inputs."""
+
+ name: Required[str]
+
+ parameters: Required[Optional[Dict[str, object]]]
+
+ description: Optional[str]
+
+ strict: Optional[bool]
+
+ type: Literal["function"]
+
+
+class ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter(TypedDict, total=False):
+ """Filter configuration for restricting which MCP tools can be used."""
+
+ tool_names: Optional[SequenceNotStr[str]]
+
+
+ToolOpenAIResponseInputToolMcpAllowedTools: TypeAlias = Union[
+ SequenceNotStr[str], ToolOpenAIResponseInputToolMcpAllowedToolsAllowedToolsFilter
+]
+
+
+class ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter(TypedDict, total=False):
+ """Filter configuration for MCP tool approval requirements."""
+
+ always: Optional[SequenceNotStr[str]]
+
+ never: Optional[SequenceNotStr[str]]
+
+
+ToolOpenAIResponseInputToolMcpRequireApproval: TypeAlias = Union[
+ Literal["always", "never"], ToolOpenAIResponseInputToolMcpRequireApprovalApprovalFilter
+]
+
+
+class ToolOpenAIResponseInputToolMcp(TypedDict, total=False):
+ """Model Context Protocol (MCP) tool configuration for OpenAI response inputs."""
+
+ server_label: Required[str]
+
+ allowed_tools: Optional[ToolOpenAIResponseInputToolMcpAllowedTools]
+ """Filter configuration for restricting which MCP tools can be used."""
+
+ authorization: Optional[str]
+
+ connector_id: Optional[str]
+
+ headers: Optional[Dict[str, object]]
+
+ require_approval: ToolOpenAIResponseInputToolMcpRequireApproval
+ """Filter configuration for MCP tool approval requirements."""
+
+ server_url: Optional[str]
+
+ type: Literal["mcp"]
+
+
+Tool: TypeAlias = Union[
+ ToolOpenAIResponseInputToolWebSearch,
+ ToolOpenAIResponseInputToolFileSearch,
+ ToolOpenAIResponseInputToolFunction,
+ ToolOpenAIResponseInputToolMcp,
+]
diff --git a/src/llama_stack_client/types/response_create_params.py b/src/ogx_client/types/response_create_params.py
similarity index 62%
rename from src/llama_stack_client/types/response_create_params.py
rename to src/ogx_client/types/response_create_params.py
index 5ce3f825..2bf5c284 100644
--- a/src/llama_stack_client/types/response_create_params.py
+++ b/src/ogx_client/types/response_create_params.py
@@ -15,39 +15,41 @@
__all__ = [
"ResponseCreateParamsBase",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageWebSearchToolCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCallResult",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFunctionToolCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpCall",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListTools",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListToolsTool",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalRequest",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItem",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemSummary",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemContent",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutput",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
"InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInput",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageWebSearchToolCall",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFileSearchToolCall",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFileSearchToolCallResult",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFunctionToolCall",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpCall",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpListTools",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpListToolsTool",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMcpApprovalRequest",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItem",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItemSummary",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItemContent",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutput",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
- "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMcpApprovalResponse",
+ "InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseCompaction",
+ "ContextManagement",
"Guardrail",
"GuardrailResponseGuardrailSpec",
"Prompt",
@@ -82,14 +84,7 @@
class ResponseCreateParamsBase(TypedDict, total=False):
- input: Required[
- Union[
- str,
- Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse
- ],
- ]
- ]
+ input: Required[Union[str, Iterable[InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput]]]
"""Input message(s) to create the response."""
model: Required[str]
@@ -101,6 +96,13 @@ class ResponseCreateParamsBase(TypedDict, total=False):
When true, returns immediately with status 'queued'.
"""
+ context_management: Optional[Iterable[ContextManagement]]
+ """Context management configuration.
+
+ When set with type 'compaction', automatically compacts conversation history
+ when token count exceeds the compact_threshold.
+ """
+
conversation: Optional[str]
"""Optional ID of a conversation to add the response to."""
@@ -110,17 +112,15 @@ class ResponseCreateParamsBase(TypedDict, total=False):
guardrails: Optional[SequenceNotStr[Guardrail]]
"""List of guardrails to apply during response generation."""
- include: Optional[
- List[
- Literal[
- "web_search_call.action.sources",
- "code_interpreter_call.outputs",
- "computer_call_output.output.image_url",
- "file_search_call.results",
- "message.input_image.image_url",
- "message.output_text.logprobs",
- "reasoning.encrypted_content",
- ]
+ include: List[
+ Literal[
+ "web_search_call.action.sources",
+ "code_interpreter_call.outputs",
+ "computer_call_output.output.image_url",
+ "file_search_call.results",
+ "message.input_image.image_url",
+ "message.output_text.logprobs",
+ "reasoning.encrypted_content",
]
]
"""Additional fields to include in the response."""
@@ -169,7 +169,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
service_tier: Optional[Literal["auto", "default", "flex", "priority"]]
"""The service tier for the request."""
- store: Optional[bool]
+ store: bool
"""Whether to store the response in the database."""
stream_options: Optional[StreamOptions]
@@ -206,7 +206,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
"""
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
TypedDict, total=False
):
"""Text content for input messages in OpenAI response format."""
@@ -216,7 +216,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["input_text"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
TypedDict, total=False
):
"""Image content for input messages in OpenAI response format."""
@@ -230,7 +230,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["input_image"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
TypedDict, total=False
):
"""File content for input messages in OpenAI response format."""
@@ -246,14 +246,14 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["input_file"]
-InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Union[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation(
TypedDict, total=False
):
"""File citation annotation for referencing specific files in response content."""
@@ -267,7 +267,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["file_citation"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation(
TypedDict, total=False
):
"""URL citation annotation for referencing external web resources."""
@@ -283,7 +283,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["url_citation"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation(
TypedDict, total=False
):
"""Container file citation annotation referencing a file within a container."""
@@ -301,7 +301,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["container_file_citation"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath(
TypedDict, total=False
):
"""File path annotation referencing a generated file in response content."""
@@ -313,15 +313,15 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["file_path"]
-InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation: TypeAlias = Union[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath,
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFileCitation,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationCitation,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationContainerFileCitation,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotationOpenAIResponseAnnotationFilePath,
]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob(
TypedDict, total=False
):
"""
@@ -338,7 +338,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
"""The bytes for the token."""
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob(
TypedDict, total=False
):
"""
@@ -356,13 +356,13 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
top_logprobs: Optional[
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprobTopLogprob
]
]
"""The top log probabilities for the token."""
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput(
TypedDict, total=False
):
"""Text content within an output message of an OpenAI response."""
@@ -370,19 +370,19 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
text: Required[str]
annotations: Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputAnnotation
]
logprobs: Optional[
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInputLogprob
]
]
type: Literal["output_text"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal(
TypedDict, total=False
):
"""Refusal content within a streamed response part."""
@@ -392,13 +392,13 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["refusal"]
-InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal: TypeAlias = Union[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal,
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextInput,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal,
]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInput(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInput(
TypedDict, total=False
):
"""
@@ -412,10 +412,10 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
Union[
str,
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
],
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInputContentListOpenAIResponseOutputMessageContentOutputTextInputOpenAIResponseContentPartRefusal
],
]
]
@@ -429,7 +429,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["message"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageWebSearchToolCall(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageWebSearchToolCall(
TypedDict, total=False
):
"""Web search tool call output message for OpenAI responses."""
@@ -441,7 +441,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["web_search_call"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFileSearchToolCallResult(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCallResult(
TypedDict, total=False
):
"""Search results returned by the file search operation."""
@@ -457,7 +457,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
text: Required[str]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFileSearchToolCall(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCall(
TypedDict, total=False
):
"""File search tool call output message for OpenAI responses."""
@@ -470,14 +470,14 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
results: Optional[
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFileSearchToolCallResult
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCallResult
]
]
type: Literal["file_search_call"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFunctionToolCall(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFunctionToolCall(
TypedDict, total=False
):
"""Function tool call output message for OpenAI responses."""
@@ -495,7 +495,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["function_call"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpCall(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpCall(
TypedDict, total=False
):
"""Model Context Protocol (MCP) call output message for OpenAI responses."""
@@ -515,7 +515,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["mcp_call"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpListToolsTool(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListToolsTool(
TypedDict, total=False
):
"""Tool definition returned by MCP list tools operation."""
@@ -527,7 +527,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
description: Optional[str]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpListTools(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListTools(
TypedDict, total=False
):
"""MCP list tools output message containing available tools from an MCP server."""
@@ -538,14 +538,14 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
tools: Required[
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpListToolsTool
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListToolsTool
]
]
type: Literal["mcp_list_tools"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMcpApprovalRequest(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalRequest(
TypedDict, total=False
):
"""A request for human approval of a tool invocation."""
@@ -561,7 +561,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["mcp_approval_request"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItemSummary(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemSummary(
TypedDict, total=False
):
"""A summary of reasoning output from the model."""
@@ -573,7 +573,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
"""The type identifier, always 'summary_text'."""
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItemContent(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemContent(
TypedDict, total=False
):
"""Reasoning text from the model."""
@@ -585,7 +585,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
"""The type identifier, always 'reasoning_text'."""
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItem(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItem(
TypedDict, total=False
):
"""Reasoning output from the model, representing the model's thinking process."""
@@ -595,14 +595,14 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
summary: Required[
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItemSummary
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemSummary
]
]
"""Summary of the reasoning output."""
content: Optional[
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItemContent
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItemContent
]
]
"""The reasoning content from the model."""
@@ -614,7 +614,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
"""The type identifier, always 'reasoning'."""
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
TypedDict, total=False
):
"""Text content for input messages in OpenAI response format."""
@@ -624,7 +624,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["input_text"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
TypedDict, total=False
):
"""Image content for input messages in OpenAI response format."""
@@ -638,7 +638,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["input_image"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
TypedDict, total=False
):
"""File content for input messages in OpenAI response format."""
@@ -654,14 +654,14 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["input_file"]
-InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Union[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutput(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutput(
TypedDict, total=False
):
"""
@@ -674,7 +674,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
Union[
str,
Iterable[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
],
]
]
@@ -686,7 +686,7 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["function_call_output"]
-class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMcpApprovalResponse(
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse(
TypedDict, total=False
):
"""A response to an MCP approval request."""
@@ -702,20 +702,43 @@ class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutp
type: Literal["mcp_approval_response"]
-InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse: TypeAlias = Union[
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMessageInput,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageWebSearchToolCall,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFileSearchToolCall,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageFunctionToolCall,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpCall,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageMcpListTools,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMcpApprovalRequest,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseOutputMessageReasoningItem,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseInputFunctionToolCallOutput,
- InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponseOpenAIResponseMcpApprovalResponse,
+class InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseCompaction(
+ TypedDict, total=False
+):
+ """A compaction item that summarizes prior conversation context."""
+
+ encrypted_content: Required[str]
+
+ id: Optional[str]
+
+ type: Literal["compaction"]
+
+
+InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutput: TypeAlias = Union[
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMessageInput,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageWebSearchToolCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFileSearchToolCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageFunctionToolCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpCall,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageMcpListTools,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalRequest,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseOutputMessageReasoningItem,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseInputFunctionToolCallOutput,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseMcpApprovalResponse,
+ InputListOpenAIResponseMessageUnionOpenAIResponseInputFunctionToolCallOutputOpenAIResponseCompaction,
]
+class ContextManagement(TypedDict, total=False):
+ """Configuration for automatic context management during response generation."""
+
+ type: Required[Literal["compaction"]]
+ """The context management entry type. Currently only 'compaction' is supported."""
+
+ compact_threshold: Optional[int]
+ """Token threshold at which compaction should be triggered."""
+
+
class GuardrailResponseGuardrailSpec(TypedDict, total=False):
"""Specification for a guardrail to apply during response generation."""
@@ -815,6 +838,8 @@ class Text(TypedDict, total=False):
format: Optional[TextFormat]
"""Configuration for Responses API text format."""
+ verbosity: Optional[Literal["low", "medium", "high"]]
+
class ToolChoiceOpenAIResponseInputToolChoiceAllowedTools(TypedDict, total=False):
"""Constrains the tools available to the model to a pre-defined set."""
@@ -1025,7 +1050,7 @@ class ToolOpenAIResponseInputToolMcp(TypedDict, total=False):
class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False):
- stream: Optional[Literal[False]]
+ stream: Literal[False]
"""Whether to stream the response."""
diff --git a/src/llama_stack_client/types/response_delete_response.py b/src/ogx_client/types/response_delete_response.py
similarity index 100%
rename from src/llama_stack_client/types/response_delete_response.py
rename to src/ogx_client/types/response_delete_response.py
diff --git a/src/llama_stack_client/types/response_list_params.py b/src/ogx_client/types/response_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/response_list_params.py
rename to src/ogx_client/types/response_list_params.py
diff --git a/src/llama_stack_client/types/response_list_response.py b/src/ogx_client/types/response_list_response.py
similarity index 99%
rename from src/llama_stack_client/types/response_list_response.py
rename to src/ogx_client/types/response_list_response.py
index 16b7778b..c308ebf2 100644
--- a/src/llama_stack_client/types/response_list_response.py
+++ b/src/ogx_client/types/response_list_response.py
@@ -49,6 +49,7 @@
"InputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
"InputOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
"InputOpenAIResponseMcpApprovalResponse",
+ "InputOpenAIResponseCompaction",
"Output",
"OutputOpenAIResponseMessageOutput",
"OutputOpenAIResponseMessageOutputContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
@@ -569,6 +570,16 @@ class InputOpenAIResponseMcpApprovalResponse(BaseModel):
type: Optional[Literal["mcp_approval_response"]] = None
+class InputOpenAIResponseCompaction(BaseModel):
+ """A compaction item that summarizes prior conversation context."""
+
+ encrypted_content: str
+
+ id: Optional[str] = None
+
+ type: Optional[Literal["compaction"]] = None
+
+
Input: TypeAlias = Union[
InputOpenAIResponseMessageOutput,
InputOpenAIResponseOutputMessageWebSearchToolCall,
@@ -580,7 +591,7 @@ class InputOpenAIResponseMcpApprovalResponse(BaseModel):
InputOpenAIResponseOutputMessageReasoningItem,
InputOpenAIResponseInputFunctionToolCallOutput,
InputOpenAIResponseMcpApprovalResponse,
- InputOpenAIResponseMessageOutput,
+ InputOpenAIResponseCompaction,
]
@@ -1076,6 +1087,8 @@ class Text(BaseModel):
format: Optional[TextFormat] = None
"""Configuration for Responses API text format."""
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
+
class ToolChoiceOpenAIResponseInputToolChoiceAllowedTools(BaseModel):
"""Constrains the tools available to the model to a pre-defined set."""
diff --git a/src/ogx_client/types/response_message.py b/src/ogx_client/types/response_message.py
new file mode 100644
index 00000000..b45439cf
--- /dev/null
+++ b/src/ogx_client/types/response_message.py
@@ -0,0 +1,253 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .._utils import PropertyInfo
+from .._models import BaseModel
+
+__all__ = [
+ "ResponseMessage",
+ "ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
+ "ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
+ "ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
+ "ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob",
+ "ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal",
+]
+
+
+class ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
+ BaseModel
+):
+ """Text content for input messages in OpenAI response format."""
+
+ text: str
+
+ type: Optional[Literal["input_text"]] = None
+
+
+class ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
+ BaseModel
+):
+ """Image content for input messages in OpenAI response format."""
+
+ detail: Optional[Literal["low", "high", "auto"]] = None
+
+ file_id: Optional[str] = None
+
+ image_url: Optional[str] = None
+
+ type: Optional[Literal["input_image"]] = None
+
+
+class ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
+ BaseModel
+):
+ """File content for input messages in OpenAI response format."""
+
+ file_data: Optional[str] = None
+
+ file_id: Optional[str] = None
+
+ file_url: Optional[str] = None
+
+ filename: Optional[str] = None
+
+ type: Optional[Literal["input_file"]] = None
+
+
+ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Annotated[
+ Union[
+ ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
+ ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
+ ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation(
+ BaseModel
+):
+ """File citation annotation for referencing specific files in response content."""
+
+ file_id: str
+
+ filename: str
+
+ index: int
+
+ type: Optional[Literal["file_citation"]] = None
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation(
+ BaseModel
+):
+ """URL citation annotation for referencing external web resources."""
+
+ end_index: int
+
+ start_index: int
+
+ title: str
+
+ url: str
+
+ type: Optional[Literal["url_citation"]] = None
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
+ BaseModel
+):
+ """Container file citation annotation referencing a file within a container."""
+
+ container_id: str
+
+ end_index: int
+
+ file_id: str
+
+ filename: str
+
+ start_index: int
+
+ type: Optional[Literal["container_file_citation"]] = None
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath(
+ BaseModel
+):
+ """File path annotation referencing a generated file in response content."""
+
+ file_id: str
+
+ index: int
+
+ type: Optional[Literal["file_path"]] = None
+
+
+ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation: TypeAlias = Annotated[
+ Union[
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation,
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation,
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation,
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob(
+ BaseModel
+):
+ """
+ The top log probability for a token from an OpenAI-compatible chat completion response.
+ """
+
+ token: str
+ """The token."""
+
+ logprob: float
+ """The log probability of the token."""
+
+ bytes: Optional[List[int]] = None
+ """The bytes for the token."""
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob(
+ BaseModel
+):
+ """
+ The log probability for a token from an OpenAI-compatible chat completion response.
+ """
+
+ token: str
+ """The token."""
+
+ logprob: float
+ """The log probability of the token."""
+
+ bytes: Optional[List[int]] = None
+ """The bytes for the token."""
+
+ top_logprobs: Optional[
+ List[
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob
+ ]
+ ] = None
+ """The top log probabilities for the token."""
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText(
+ BaseModel
+):
+ """Text content within an output message of an OpenAI response."""
+
+ text: str
+
+ annotations: Optional[
+ List[
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation
+ ]
+ ] = None
+
+ logprobs: Optional[
+ List[
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob
+ ]
+ ] = None
+
+ type: Optional[Literal["output_text"]] = None
+
+
+class ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal(
+ BaseModel
+):
+ """Refusal content within a streamed response part."""
+
+ refusal: str
+
+ type: Optional[Literal["refusal"]] = None
+
+
+ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal: TypeAlias = Annotated[
+ Union[
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText,
+ ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class ResponseMessage(BaseModel):
+ """
+ Corresponds to the various Message types in the Responses API.
+ They are all under one type because the Responses API gives them all
+ the same "type" value, and there is no way to tell them apart in certain
+ scenarios.
+ """
+
+ content: Union[
+ str,
+ List[
+ ContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
+ ],
+ List[ContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal],
+ ]
+
+ role: Literal["system", "developer", "user", "assistant"]
+
+ id: Optional[str] = None
+
+ status: Optional[str] = None
+
+ type: Optional[Literal["message"]] = None
diff --git a/src/llama_stack_client/types/response_object.py b/src/ogx_client/types/response_object.py
similarity index 99%
rename from src/llama_stack_client/types/response_object.py
rename to src/ogx_client/types/response_object.py
index d582225e..15f740c2 100644
--- a/src/llama_stack_client/types/response_object.py
+++ b/src/ogx_client/types/response_object.py
@@ -566,6 +566,8 @@ class Text(BaseModel):
format: Optional[TextFormat] = None
"""Configuration for Responses API text format."""
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
+
class ToolChoiceOpenAIResponseInputToolChoiceAllowedTools(BaseModel):
"""Constrains the tools available to the model to a pre-defined set."""
diff --git a/src/llama_stack_client/types/response_object_stream.py b/src/ogx_client/types/response_object_stream.py
similarity index 62%
rename from src/llama_stack_client/types/response_object_stream.py
rename to src/ogx_client/types/response_object_stream.py
index 6e84d189..f24b32dc 100644
--- a/src/llama_stack_client/types/response_object_stream.py
+++ b/src/ogx_client/types/response_object_stream.py
@@ -12,6 +12,7 @@
from .._utils import PropertyInfo
from .._models import BaseModel
from .response_object import ResponseObject
+from .response_message import ResponseMessage
__all__ = [
"ResponseObjectStream",
@@ -19,21 +20,6 @@
"OpenAIResponseObjectStreamResponseInProgress",
"OpenAIResponseObjectStreamResponseOutputItemAdded",
"OpenAIResponseObjectStreamResponseOutputItemAddedItem",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob",
- "OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal",
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageWebSearchToolCall",
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCall",
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCallResult",
@@ -47,21 +33,6 @@
"OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageReasoningItemContent",
"OpenAIResponseObjectStreamResponseOutputItemDone",
"OpenAIResponseObjectStreamResponseOutputItemDoneItem",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob",
- "OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageWebSearchToolCall",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCall",
"OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCallResult",
@@ -136,6 +107,7 @@
"OpenAIResponseObjectStreamResponseIncomplete",
"OpenAIResponseObjectStreamResponseFailed",
"OpenAIResponseObjectStreamResponseCompleted",
+ "OpenAIResponseObjectStreamError",
]
@@ -161,236 +133,6 @@ class OpenAIResponseObjectStreamResponseInProgress(BaseModel):
type: Optional[Literal["response.in_progress"]] = None
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
- BaseModel
-):
- """Text content for input messages in OpenAI response format."""
-
- text: str
-
- type: Optional[Literal["input_text"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
- BaseModel
-):
- """Image content for input messages in OpenAI response format."""
-
- detail: Optional[Literal["low", "high", "auto"]] = None
-
- file_id: Optional[str] = None
-
- image_url: Optional[str] = None
-
- type: Optional[Literal["input_image"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
- BaseModel
-):
- """File content for input messages in OpenAI response format."""
-
- file_data: Optional[str] = None
-
- file_id: Optional[str] = None
-
- file_url: Optional[str] = None
-
- filename: Optional[str] = None
-
- type: Optional[Literal["input_file"]] = None
-
-
-OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Annotated[
- Union[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation(
- BaseModel
-):
- """File citation annotation for referencing specific files in response content."""
-
- file_id: str
-
- filename: str
-
- index: int
-
- type: Optional[Literal["file_citation"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation(
- BaseModel
-):
- """URL citation annotation for referencing external web resources."""
-
- end_index: int
-
- start_index: int
-
- title: str
-
- url: str
-
- type: Optional[Literal["url_citation"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
- BaseModel
-):
- """Container file citation annotation referencing a file within a container."""
-
- container_id: str
-
- end_index: int
-
- file_id: str
-
- filename: str
-
- start_index: int
-
- type: Optional[Literal["container_file_citation"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath(
- BaseModel
-):
- """File path annotation referencing a generated file in response content."""
-
- file_id: str
-
- index: int
-
- type: Optional[Literal["file_path"]] = None
-
-
-OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation: TypeAlias = Annotated[
- Union[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation,
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation,
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation,
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob(
- BaseModel
-):
- """
- The top log probability for a token from an OpenAI-compatible chat completion response.
- """
-
- token: str
- """The token."""
-
- logprob: float
- """The log probability of the token."""
-
- bytes: Optional[List[int]] = None
- """The bytes for the token."""
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob(
- BaseModel
-):
- """
- The log probability for a token from an OpenAI-compatible chat completion response.
- """
-
- token: str
- """The token."""
-
- logprob: float
- """The log probability of the token."""
-
- bytes: Optional[List[int]] = None
- """The bytes for the token."""
-
- top_logprobs: Optional[
- List[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob
- ]
- ] = None
- """The top log probabilities for the token."""
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText(
- BaseModel
-):
- """Text content within an output message of an OpenAI response."""
-
- text: str
-
- annotations: Optional[
- List[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation
- ]
- ] = None
-
- logprobs: Optional[
- List[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob
- ]
- ] = None
-
- type: Optional[Literal["output_text"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal(
- BaseModel
-):
- """Refusal content within a streamed response part."""
-
- refusal: str
-
- type: Optional[Literal["refusal"]] = None
-
-
-OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal: TypeAlias = Annotated[
- Union[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText,
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage(BaseModel):
- """
- Corresponds to the various Message types in the Responses API.
- They are all under one type because the Responses API gives them all
- the same "type" value, and there is no way to tell them apart in certain
- scenarios.
- """
-
- content: Union[
- str,
- List[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
- ],
- List[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal
- ],
- ]
-
- role: Literal["system", "developer", "user", "assistant"]
-
- id: Optional[str] = None
-
- status: Optional[str] = None
-
- type: Optional[Literal["message"]] = None
-
-
class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
"""Web search tool call output message for OpenAI responses."""
@@ -546,7 +288,7 @@ class OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputM
OpenAIResponseObjectStreamResponseOutputItemAddedItem: TypeAlias = Annotated[
Union[
- OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseMessage,
+ ResponseMessage,
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageWebSearchToolCall,
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseObjectStreamResponseOutputItemAddedItemOpenAIResponseOutputMessageFunctionToolCall,
@@ -578,236 +320,6 @@ class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel):
type: Optional[Literal["response.output_item.added"]] = None
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText(
- BaseModel
-):
- """Text content for input messages in OpenAI response format."""
-
- text: str
-
- type: Optional[Literal["input_text"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage(
- BaseModel
-):
- """Image content for input messages in OpenAI response format."""
-
- detail: Optional[Literal["low", "high", "auto"]] = None
-
- file_id: Optional[str] = None
-
- image_url: Optional[str] = None
-
- type: Optional[Literal["input_image"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile(
- BaseModel
-):
- """File content for input messages in OpenAI response format."""
-
- file_data: Optional[str] = None
-
- file_id: Optional[str] = None
-
- file_url: Optional[str] = None
-
- filename: Optional[str] = None
-
- type: Optional[Literal["input_file"]] = None
-
-
-OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile: TypeAlias = Annotated[
- Union[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentText,
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage,
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation(
- BaseModel
-):
- """File citation annotation for referencing specific files in response content."""
-
- file_id: str
-
- filename: str
-
- index: int
-
- type: Optional[Literal["file_citation"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation(
- BaseModel
-):
- """URL citation annotation for referencing external web resources."""
-
- end_index: int
-
- start_index: int
-
- title: str
-
- url: str
-
- type: Optional[Literal["url_citation"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation(
- BaseModel
-):
- """Container file citation annotation referencing a file within a container."""
-
- container_id: str
-
- end_index: int
-
- file_id: str
-
- filename: str
-
- start_index: int
-
- type: Optional[Literal["container_file_citation"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath(
- BaseModel
-):
- """File path annotation referencing a generated file in response content."""
-
- file_id: str
-
- index: int
-
- type: Optional[Literal["file_path"]] = None
-
-
-OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation: TypeAlias = Annotated[
- Union[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFileCitation,
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationCitation,
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationContainerFileCitation,
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotationOpenAIResponseAnnotationFilePath,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob(
- BaseModel
-):
- """
- The top log probability for a token from an OpenAI-compatible chat completion response.
- """
-
- token: str
- """The token."""
-
- logprob: float
- """The log probability of the token."""
-
- bytes: Optional[List[int]] = None
- """The bytes for the token."""
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob(
- BaseModel
-):
- """
- The log probability for a token from an OpenAI-compatible chat completion response.
- """
-
- token: str
- """The token."""
-
- logprob: float
- """The log probability of the token."""
-
- bytes: Optional[List[int]] = None
- """The bytes for the token."""
-
- top_logprobs: Optional[
- List[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprobTopLogprob
- ]
- ] = None
- """The top log probabilities for the token."""
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText(
- BaseModel
-):
- """Text content within an output message of an OpenAI response."""
-
- text: str
-
- annotations: Optional[
- List[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextAnnotation
- ]
- ] = None
-
- logprobs: Optional[
- List[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputTextLogprob
- ]
- ] = None
-
- type: Optional[Literal["output_text"]] = None
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal(
- BaseModel
-):
- """Refusal content within a streamed response part."""
-
- refusal: str
-
- type: Optional[Literal["refusal"]] = None
-
-
-OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal: TypeAlias = Annotated[
- Union[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseOutputMessageContentOutputText,
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusalOpenAIResponseContentPartRefusal,
- ],
- PropertyInfo(discriminator="type"),
-]
-
-
-class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage(BaseModel):
- """
- Corresponds to the various Message types in the Responses API.
- They are all under one type because the Responses API gives them all
- the same "type" value, and there is no way to tell them apart in certain
- scenarios.
- """
-
- content: Union[
- str,
- List[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFile
- ],
- List[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessageContentListOpenAIResponseOutputMessageContentOutputTextOpenAIResponseContentPartRefusal
- ],
- ]
-
- role: Literal["system", "developer", "user", "assistant"]
-
- id: Optional[str] = None
-
- status: Optional[str] = None
-
- type: Optional[Literal["message"]] = None
-
-
class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
"""Web search tool call output message for OpenAI responses."""
@@ -963,7 +475,7 @@ class OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMe
OpenAIResponseObjectStreamResponseOutputItemDoneItem: TypeAlias = Annotated[
Union[
- OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseMessage,
+ ResponseMessage,
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageWebSearchToolCall,
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseObjectStreamResponseOutputItemDoneItemOpenAIResponseOutputMessageFunctionToolCall,
@@ -1845,6 +1357,24 @@ class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
type: Optional[Literal["response.completed"]] = None
+class OpenAIResponseObjectStreamError(BaseModel):
+ """Standalone error event emitted during streaming when an error occurs.
+
+ This is distinct from response.failed which is a response lifecycle event.
+ The error event signals transport/infrastructure-level errors to the client.
+ """
+
+ message: str
+
+ sequence_number: int
+
+ code: Optional[str] = None
+
+ param: Optional[str] = None
+
+ type: Optional[Literal["error"]] = None
+
+
ResponseObjectStream: TypeAlias = Annotated[
Union[
OpenAIResponseObjectStreamResponseCreated,
@@ -1883,6 +1413,7 @@ class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
OpenAIResponseObjectStreamResponseIncomplete,
OpenAIResponseObjectStreamResponseFailed,
OpenAIResponseObjectStreamResponseCompleted,
+ OpenAIResponseObjectStreamError,
],
PropertyInfo(discriminator="type"),
]
diff --git a/src/llama_stack_client/types/responses/__init__.py b/src/ogx_client/types/responses/__init__.py
similarity index 100%
rename from src/llama_stack_client/types/responses/__init__.py
rename to src/ogx_client/types/responses/__init__.py
diff --git a/src/llama_stack_client/types/responses/input_item_list_params.py b/src/ogx_client/types/responses/input_item_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/responses/input_item_list_params.py
rename to src/ogx_client/types/responses/input_item_list_params.py
diff --git a/src/llama_stack_client/types/responses/input_item_list_response.py b/src/ogx_client/types/responses/input_item_list_response.py
similarity index 98%
rename from src/llama_stack_client/types/responses/input_item_list_response.py
rename to src/ogx_client/types/responses/input_item_list_response.py
index 9591b758..9fa4b637 100644
--- a/src/llama_stack_client/types/responses/input_item_list_response.py
+++ b/src/ogx_client/types/responses/input_item_list_response.py
@@ -47,6 +47,7 @@
"DataOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentImage",
"DataOpenAIResponseInputFunctionToolCallOutputOutputListOpenAIResponseInputMessageContentTextOpenAIResponseInputMessageContentImageOpenAIResponseInputMessageContentFileOpenAIResponseInputMessageContentFile",
"DataOpenAIResponseMcpApprovalResponse",
+ "DataOpenAIResponseCompaction",
]
@@ -512,6 +513,16 @@ class DataOpenAIResponseMcpApprovalResponse(BaseModel):
type: Optional[Literal["mcp_approval_response"]] = None
+class DataOpenAIResponseCompaction(BaseModel):
+ """A compaction item that summarizes prior conversation context."""
+
+ encrypted_content: str
+
+ id: Optional[str] = None
+
+ type: Optional[Literal["compaction"]] = None
+
+
Data: TypeAlias = Union[
DataOpenAIResponseMessageOutput,
DataOpenAIResponseOutputMessageWebSearchToolCall,
@@ -523,7 +534,7 @@ class DataOpenAIResponseMcpApprovalResponse(BaseModel):
DataOpenAIResponseOutputMessageReasoningItem,
DataOpenAIResponseInputFunctionToolCallOutput,
DataOpenAIResponseMcpApprovalResponse,
- DataOpenAIResponseMessageOutput,
+ DataOpenAIResponseCompaction,
]
diff --git a/src/llama_stack_client/types/route_list_params.py b/src/ogx_client/types/route_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/route_list_params.py
rename to src/ogx_client/types/route_list_params.py
diff --git a/src/llama_stack_client/types/route_list_response.py b/src/ogx_client/types/route_list_response.py
similarity index 100%
rename from src/llama_stack_client/types/route_list_response.py
rename to src/ogx_client/types/route_list_response.py
diff --git a/src/llama_stack_client/types/run_shield_response.py b/src/ogx_client/types/run_shield_response.py
similarity index 100%
rename from src/llama_stack_client/types/run_shield_response.py
rename to src/ogx_client/types/run_shield_response.py
diff --git a/src/llama_stack_client/types/safety_run_shield_params.py b/src/ogx_client/types/safety_run_shield_params.py
similarity index 100%
rename from src/llama_stack_client/types/safety_run_shield_params.py
rename to src/ogx_client/types/safety_run_shield_params.py
diff --git a/src/llama_stack_client/types/scoring_fn_params.py b/src/ogx_client/types/scoring_fn_params.py
similarity index 100%
rename from src/llama_stack_client/types/scoring_fn_params.py
rename to src/ogx_client/types/scoring_fn_params.py
diff --git a/src/llama_stack_client/types/scoring_fn_params_param.py b/src/ogx_client/types/scoring_fn_params_param.py
similarity index 100%
rename from src/llama_stack_client/types/scoring_fn_params_param.py
rename to src/ogx_client/types/scoring_fn_params_param.py
diff --git a/src/llama_stack_client/types/shared/__init__.py b/src/ogx_client/types/shared/__init__.py
similarity index 94%
rename from src/llama_stack_client/types/shared/__init__.py
rename to src/ogx_client/types/shared/__init__.py
index c8fb8fd0..cc6b1615 100644
--- a/src/llama_stack_client/types/shared/__init__.py
+++ b/src/ogx_client/types/shared/__init__.py
@@ -11,7 +11,6 @@
from .health_info import HealthInfo as HealthInfo
from .version_info import VersionInfo as VersionInfo
from .provider_info import ProviderInfo as ProviderInfo
-from .scoring_result import ScoringResult as ScoringResult
from .system_message import SystemMessage as SystemMessage
from .sampling_params import SamplingParams as SamplingParams
from .safety_violation import SafetyViolation as SafetyViolation
diff --git a/src/llama_stack_client/types/shared/health_info.py b/src/ogx_client/types/shared/health_info.py
similarity index 100%
rename from src/llama_stack_client/types/shared/health_info.py
rename to src/ogx_client/types/shared/health_info.py
diff --git a/src/llama_stack_client/types/shared/interleaved_content.py b/src/ogx_client/types/shared/interleaved_content.py
similarity index 100%
rename from src/llama_stack_client/types/shared/interleaved_content.py
rename to src/ogx_client/types/shared/interleaved_content.py
diff --git a/src/llama_stack_client/types/shared/interleaved_content_item.py b/src/ogx_client/types/shared/interleaved_content_item.py
similarity index 100%
rename from src/llama_stack_client/types/shared/interleaved_content_item.py
rename to src/ogx_client/types/shared/interleaved_content_item.py
diff --git a/src/llama_stack_client/types/shared/list_providers_response.py b/src/ogx_client/types/shared/list_providers_response.py
similarity index 100%
rename from src/llama_stack_client/types/shared/list_providers_response.py
rename to src/ogx_client/types/shared/list_providers_response.py
diff --git a/src/llama_stack_client/types/shared/list_routes_response.py b/src/ogx_client/types/shared/list_routes_response.py
similarity index 100%
rename from src/llama_stack_client/types/shared/list_routes_response.py
rename to src/ogx_client/types/shared/list_routes_response.py
diff --git a/src/llama_stack_client/types/shared/param_type.py b/src/ogx_client/types/shared/param_type.py
similarity index 100%
rename from src/llama_stack_client/types/shared/param_type.py
rename to src/ogx_client/types/shared/param_type.py
diff --git a/src/llama_stack_client/types/shared/provider_info.py b/src/ogx_client/types/shared/provider_info.py
similarity index 100%
rename from src/llama_stack_client/types/shared/provider_info.py
rename to src/ogx_client/types/shared/provider_info.py
diff --git a/src/llama_stack_client/types/shared/route_info.py b/src/ogx_client/types/shared/route_info.py
similarity index 100%
rename from src/llama_stack_client/types/shared/route_info.py
rename to src/ogx_client/types/shared/route_info.py
diff --git a/src/llama_stack_client/types/shared/safety_violation.py b/src/ogx_client/types/shared/safety_violation.py
similarity index 100%
rename from src/llama_stack_client/types/shared/safety_violation.py
rename to src/ogx_client/types/shared/safety_violation.py
diff --git a/src/llama_stack_client/types/shared/sampling_params.py b/src/ogx_client/types/shared/sampling_params.py
similarity index 100%
rename from src/llama_stack_client/types/shared/sampling_params.py
rename to src/ogx_client/types/shared/sampling_params.py
diff --git a/src/llama_stack_client/types/shared/system_message.py b/src/ogx_client/types/shared/system_message.py
similarity index 52%
rename from src/llama_stack_client/types/shared/system_message.py
rename to src/ogx_client/types/shared/system_message.py
index a50fa60c..7cc6976f 100644
--- a/src/llama_stack_client/types/shared/system_message.py
+++ b/src/ogx_client/types/shared/system_message.py
@@ -15,37 +15,37 @@
__all__ = [
"SystemMessage",
"Content",
- "ContentImageContentItemInput",
- "ContentImageContentItemInputImage",
- "ContentImageContentItemInputImageURL",
+ "ContentImageContentItem",
+ "ContentImageContentItemImage",
+ "ContentImageContentItemImageURL",
"ContentTextContentItem",
- "ContentListImageContentItemInputTextContentItem",
- "ContentListImageContentItemInputTextContentItemImageContentItemInput",
- "ContentListImageContentItemInputTextContentItemImageContentItemInputImage",
- "ContentListImageContentItemInputTextContentItemImageContentItemInputImageURL",
- "ContentListImageContentItemInputTextContentItemTextContentItem",
+ "ContentListImageContentItemTextContentItem",
+ "ContentListImageContentItemTextContentItemImageContentItem",
+ "ContentListImageContentItemTextContentItemImageContentItemImage",
+ "ContentListImageContentItemTextContentItemImageContentItemImageURL",
+ "ContentListImageContentItemTextContentItemTextContentItem",
]
-class ContentImageContentItemInputImageURL(BaseModel):
+class ContentImageContentItemImageURL(BaseModel):
"""A URL reference to external content."""
uri: str
-class ContentImageContentItemInputImage(BaseModel):
+class ContentImageContentItemImage(BaseModel):
"""A URL or a base64 encoded string"""
data: Optional[str] = None
- url: Optional[ContentImageContentItemInputImageURL] = None
+ url: Optional[ContentImageContentItemImageURL] = None
"""A URL reference to external content."""
-class ContentImageContentItemInput(BaseModel):
+class ContentImageContentItem(BaseModel):
"""A image content item"""
- image: ContentImageContentItemInputImage
+ image: ContentImageContentItemImage
"""A URL or a base64 encoded string"""
type: Optional[Literal["image"]] = None
@@ -59,31 +59,31 @@ class ContentTextContentItem(BaseModel):
type: Optional[Literal["text"]] = None
-class ContentListImageContentItemInputTextContentItemImageContentItemInputImageURL(BaseModel):
+class ContentListImageContentItemTextContentItemImageContentItemImageURL(BaseModel):
"""A URL reference to external content."""
uri: str
-class ContentListImageContentItemInputTextContentItemImageContentItemInputImage(BaseModel):
+class ContentListImageContentItemTextContentItemImageContentItemImage(BaseModel):
"""A URL or a base64 encoded string"""
data: Optional[str] = None
- url: Optional[ContentListImageContentItemInputTextContentItemImageContentItemInputImageURL] = None
+ url: Optional[ContentListImageContentItemTextContentItemImageContentItemImageURL] = None
"""A URL reference to external content."""
-class ContentListImageContentItemInputTextContentItemImageContentItemInput(BaseModel):
+class ContentListImageContentItemTextContentItemImageContentItem(BaseModel):
"""A image content item"""
- image: ContentListImageContentItemInputTextContentItemImageContentItemInputImage
+ image: ContentListImageContentItemTextContentItemImageContentItemImage
"""A URL or a base64 encoded string"""
type: Optional[Literal["image"]] = None
-class ContentListImageContentItemInputTextContentItemTextContentItem(BaseModel):
+class ContentListImageContentItemTextContentItemTextContentItem(BaseModel):
"""A text content item"""
text: str
@@ -91,16 +91,16 @@ class ContentListImageContentItemInputTextContentItemTextContentItem(BaseModel):
type: Optional[Literal["text"]] = None
-ContentListImageContentItemInputTextContentItem: TypeAlias = Annotated[
+ContentListImageContentItemTextContentItem: TypeAlias = Annotated[
Union[
- ContentListImageContentItemInputTextContentItemImageContentItemInput,
- ContentListImageContentItemInputTextContentItemTextContentItem,
+ ContentListImageContentItemTextContentItemImageContentItem,
+ ContentListImageContentItemTextContentItemTextContentItem,
],
PropertyInfo(discriminator="type"),
]
Content: TypeAlias = Union[
- str, ContentImageContentItemInput, ContentTextContentItem, List[ContentListImageContentItemInputTextContentItem]
+ str, ContentImageContentItem, ContentTextContentItem, List[ContentListImageContentItemTextContentItem]
]
@@ -111,7 +111,7 @@ class SystemMessage(BaseModel):
"""The content of the 'system prompt'.
If multiple system messages are provided, they are concatenated. The underlying
- Llama Stack code may also add other system messages.
+ OGX code may also add other system messages.
"""
role: Optional[Literal["system"]] = None
diff --git a/src/llama_stack_client/types/shared/version_info.py b/src/ogx_client/types/shared/version_info.py
similarity index 100%
rename from src/llama_stack_client/types/shared/version_info.py
rename to src/ogx_client/types/shared/version_info.py
diff --git a/src/llama_stack_client/types/shield.py b/src/ogx_client/types/shield.py
similarity index 93%
rename from src/llama_stack_client/types/shield.py
rename to src/ogx_client/types/shield.py
index 12a1530b..53cc44bb 100644
--- a/src/llama_stack_client/types/shield.py
+++ b/src/ogx_client/types/shield.py
@@ -18,7 +18,7 @@ class Shield(BaseModel):
"""A safety shield resource that can be used to check content."""
identifier: str
- """Unique identifier for this resource in llama stack"""
+ """Unique identifier for this resource in ogx"""
provider_id: str
"""ID of the provider that owns this resource"""
diff --git a/src/llama_stack_client/types/shield_list_response.py b/src/ogx_client/types/shield_list_response.py
similarity index 100%
rename from src/llama_stack_client/types/shield_list_response.py
rename to src/ogx_client/types/shield_list_response.py
diff --git a/src/llama_stack_client/types/shield_register_params.py b/src/ogx_client/types/shield_register_params.py
similarity index 100%
rename from src/llama_stack_client/types/shield_register_params.py
rename to src/ogx_client/types/shield_register_params.py
diff --git a/src/llama_stack_client/types/vector_io_insert_params.py b/src/ogx_client/types/vector_io_insert_params.py
similarity index 100%
rename from src/llama_stack_client/types/vector_io_insert_params.py
rename to src/ogx_client/types/vector_io_insert_params.py
diff --git a/src/llama_stack_client/types/vector_io_query_params.py b/src/ogx_client/types/vector_io_query_params.py
similarity index 100%
rename from src/llama_stack_client/types/vector_io_query_params.py
rename to src/ogx_client/types/vector_io_query_params.py
diff --git a/src/llama_stack_client/types/vector_store.py b/src/ogx_client/types/vector_store.py
similarity index 61%
rename from src/llama_stack_client/types/vector_store.py
rename to src/ogx_client/types/vector_store.py
index 18404114..4b1e3a2d 100644
--- a/src/llama_stack_client/types/vector_store.py
+++ b/src/ogx_client/types/vector_store.py
@@ -7,10 +7,11 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, Optional
+from typing_extensions import Literal
from .._models import BaseModel
-__all__ = ["VectorStore", "FileCounts"]
+__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"]
class FileCounts(BaseModel):
@@ -27,6 +28,16 @@ class FileCounts(BaseModel):
total: int
+class ExpiresAfter(BaseModel):
+ """Expiration policy for a vector store."""
+
+ anchor: Literal["last_active_at"]
+ """Anchor timestamp after which the expiration policy applies."""
+
+ days: int
+ """The number of days after the anchor time that the vector store will expire."""
+
+
class VectorStore(BaseModel):
"""OpenAI Vector Store object."""
@@ -37,7 +48,10 @@ class VectorStore(BaseModel):
file_counts: FileCounts
"""File processing status counts for a vector store."""
- expires_after: Optional[Dict[str, object]] = None
+ status: Literal["expired", "in_progress", "completed"]
+
+ expires_after: Optional[ExpiresAfter] = None
+ """Expiration policy for a vector store."""
expires_at: Optional[int] = None
@@ -47,8 +61,6 @@ class VectorStore(BaseModel):
name: Optional[str] = None
- object: Optional[str] = None
-
- status: Optional[str] = None
+ object: Optional[Literal["vector_store"]] = None
usage_bytes: Optional[int] = None
diff --git a/src/llama_stack_client/types/vector_store_create_params.py b/src/ogx_client/types/vector_store_create_params.py
similarity index 88%
rename from src/llama_stack_client/types/vector_store_create_params.py
rename to src/ogx_client/types/vector_store_create_params.py
index 73225b86..af2a1d69 100644
--- a/src/llama_stack_client/types/vector_store_create_params.py
+++ b/src/ogx_client/types/vector_store_create_params.py
@@ -21,6 +21,7 @@
"ChunkingStrategyVectorStoreChunkingStrategyStaticStatic",
"ChunkingStrategyVectorStoreChunkingStrategyContextual",
"ChunkingStrategyVectorStoreChunkingStrategyContextualContextual",
+ "ExpiresAfter",
]
@@ -28,7 +29,10 @@ class VectorStoreCreateParams(TypedDict, total=False):
chunking_strategy: Optional[ChunkingStrategy]
"""Automatic chunking strategy for vector store files."""
- expires_after: Optional[Dict[str, object]]
+ description: Optional[str]
+
+ expires_after: Optional[ExpiresAfter]
+ """Expiration policy for a vector store."""
file_ids: Optional[SequenceNotStr[str]]
@@ -110,3 +114,13 @@ class ChunkingStrategyVectorStoreChunkingStrategyContextual(TypedDict, total=Fal
ChunkingStrategyVectorStoreChunkingStrategyStatic,
ChunkingStrategyVectorStoreChunkingStrategyContextual,
]
+
+
+class ExpiresAfter(TypedDict, total=False):
+ """Expiration policy for a vector store."""
+
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies."""
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/llama_stack_client/types/vector_store_delete_response.py b/src/ogx_client/types/vector_store_delete_response.py
similarity index 81%
rename from src/llama_stack_client/types/vector_store_delete_response.py
rename to src/ogx_client/types/vector_store_delete_response.py
index 6c68b2e2..adb2580f 100644
--- a/src/llama_stack_client/types/vector_store_delete_response.py
+++ b/src/ogx_client/types/vector_store_delete_response.py
@@ -7,6 +7,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
+from typing_extensions import Literal
from .._models import BaseModel
@@ -18,6 +19,6 @@ class VectorStoreDeleteResponse(BaseModel):
id: str
- deleted: Optional[bool] = None
+ deleted: bool
- object: Optional[str] = None
+ object: Optional[Literal["vector_store.deleted"]] = None
diff --git a/src/llama_stack_client/types/vector_store_list_params.py b/src/ogx_client/types/vector_store_list_params.py
similarity index 100%
rename from src/llama_stack_client/types/vector_store_list_params.py
rename to src/ogx_client/types/vector_store_list_params.py
diff --git a/src/llama_stack_client/types/vector_store_search_params.py b/src/ogx_client/types/vector_store_search_params.py
similarity index 97%
rename from src/llama_stack_client/types/vector_store_search_params.py
rename to src/ogx_client/types/vector_store_search_params.py
index 0136463a..fd70aacd 100644
--- a/src/llama_stack_client/types/vector_store_search_params.py
+++ b/src/ogx_client/types/vector_store_search_params.py
@@ -23,7 +23,7 @@ class VectorStoreSearchParams(TypedDict, total=False):
filters: Optional[Dict[str, object]]
"""Filters to apply to the search."""
- max_num_results: Optional[int]
+ max_num_results: int
"""Maximum number of results to return."""
ranking_options: Optional[RankingOptions]
@@ -46,7 +46,7 @@ class VectorStoreSearchParams(TypedDict, total=False):
SearchRankingOptions(ranker="weighted", score_threshold=0.5)
"""
- rewrite_query: Optional[bool]
+ rewrite_query: bool
"""Whether to rewrite the query for better results."""
search_mode: Optional[str]
diff --git a/src/llama_stack_client/types/vector_store_search_response.py b/src/ogx_client/types/vector_store_search_response.py
similarity index 96%
rename from src/llama_stack_client/types/vector_store_search_response.py
rename to src/ogx_client/types/vector_store_search_response.py
index 5f799ac7..b87e8240 100644
--- a/src/llama_stack_client/types/vector_store_search_response.py
+++ b/src/ogx_client/types/vector_store_search_response.py
@@ -82,10 +82,10 @@ class VectorStoreSearchResponse(BaseModel):
data: List[Data]
- search_query: List[str]
+ has_more: bool
- has_more: Optional[bool] = None
+ search_query: List[str]
next_page: Optional[str] = None
- object: Optional[str] = None
+ object: Optional[Literal["vector_store.search_results.page"]] = None
diff --git a/src/llama_stack_client/types/vector_store_update_params.py b/src/ogx_client/types/vector_store_update_params.py
similarity index 52%
rename from src/llama_stack_client/types/vector_store_update_params.py
rename to src/ogx_client/types/vector_store_update_params.py
index f08612d8..b440373f 100644
--- a/src/llama_stack_client/types/vector_store_update_params.py
+++ b/src/ogx_client/types/vector_store_update_params.py
@@ -9,17 +9,27 @@
from __future__ import annotations
from typing import Dict, Optional
-from typing_extensions import TypedDict
+from typing_extensions import Literal, Required, TypedDict
-__all__ = ["VectorStoreUpdateParams"]
+__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"]
class VectorStoreUpdateParams(TypedDict, total=False):
- expires_after: Optional[Dict[str, object]]
- """Expiration policy for the vector store."""
+ expires_after: Optional[ExpiresAfter]
+ """Expiration policy for a vector store."""
metadata: Optional[Dict[str, object]]
"""Metadata to associate with the vector store."""
name: Optional[str]
"""The new name for the vector store."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ """Expiration policy for a vector store."""
+
+ anchor: Required[Literal["last_active_at"]]
+ """Anchor timestamp after which the expiration policy applies."""
+
+ days: Required[int]
+ """The number of days after the anchor time that the vector store will expire."""
diff --git a/src/llama_stack_client/types/vector_stores/__init__.py b/src/ogx_client/types/vector_stores/__init__.py
similarity index 100%
rename from src/llama_stack_client/types/vector_stores/__init__.py
rename to src/ogx_client/types/vector_stores/__init__.py
diff --git a/src/ogx_client/types/vector_stores/file_batch_create_params.py b/src/ogx_client/types/vector_stores/file_batch_create_params.py
new file mode 100644
index 00000000..aca32191
--- /dev/null
+++ b/src/ogx_client/types/vector_stores/file_batch_create_params.py
@@ -0,0 +1,217 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+from ..._types import SequenceNotStr
+
+__all__ = [
+ "FileBatchCreateParams",
+ "ChunkingStrategy",
+ "ChunkingStrategyVectorStoreChunkingStrategyAuto",
+ "ChunkingStrategyVectorStoreChunkingStrategyStatic",
+ "ChunkingStrategyVectorStoreChunkingStrategyStaticStatic",
+ "ChunkingStrategyVectorStoreChunkingStrategyContextual",
+ "ChunkingStrategyVectorStoreChunkingStrategyContextualContextual",
+ "File",
+ "FileChunkingStrategy",
+ "FileChunkingStrategyVectorStoreChunkingStrategyAuto",
+ "FileChunkingStrategyVectorStoreChunkingStrategyStatic",
+ "FileChunkingStrategyVectorStoreChunkingStrategyStaticStatic",
+ "FileChunkingStrategyVectorStoreChunkingStrategyContextual",
+ "FileChunkingStrategyVectorStoreChunkingStrategyContextualContextual",
+]
+
+
+class FileBatchCreateParams(TypedDict, total=False):
+ attributes: Optional[Dict[str, Union[str, float, bool]]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ chunking_strategy: Optional[ChunkingStrategy]
+ """Automatic chunking strategy for vector store files."""
+
+ file_ids: SequenceNotStr[str]
+
+ files: Optional[Iterable[File]]
+
+
+class ChunkingStrategyVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+ """Automatic chunking strategy for vector store files."""
+
+ type: Literal["auto"]
+
+
+class ChunkingStrategyVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+ """Configuration for static chunking strategy."""
+
+ chunk_overlap_tokens: int
+
+ max_chunk_size_tokens: int
+
+
+class ChunkingStrategyVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+ """Static chunking strategy with configurable parameters."""
+
+ static: Required[ChunkingStrategyVectorStoreChunkingStrategyStaticStatic]
+ """Configuration for static chunking strategy."""
+
+ type: Literal["static"]
+
+
+class ChunkingStrategyVectorStoreChunkingStrategyContextualContextual(TypedDict, total=False):
+ """Configuration for contextual chunking."""
+
+ chunk_overlap_tokens: int
+ """Tokens to overlap between adjacent chunks.
+
+ Must be less than max_chunk_size_tokens.
+ """
+
+ context_prompt: str
+ """Prompt template for contextual retrieval.
+
+ Uses WHOLE_DOCUMENT and CHUNK_CONTENT placeholders wrapped in double curly
+ braces.
+ """
+
+ max_chunk_size_tokens: int
+ """Maximum tokens per chunk. Suggested ~700 to allow room for prepended context."""
+
+ max_concurrency: Optional[int]
+ """Maximum concurrent LLM calls. Falls back to config default if not provided."""
+
+ model_id: Optional[str]
+ """LLM model for generating context.
+
+ Falls back to VectorStoresConfig.contextual_retrieval_params.model if not
+ provided.
+ """
+
+ timeout_seconds: Optional[int]
+ """Timeout per LLM call in seconds. Falls back to config default if not provided."""
+
+
+class ChunkingStrategyVectorStoreChunkingStrategyContextual(TypedDict, total=False):
+ """
+ Contextual chunking strategy that uses an LLM to situate chunks within the document.
+ """
+
+ contextual: Required[ChunkingStrategyVectorStoreChunkingStrategyContextualContextual]
+ """Configuration for contextual chunking."""
+
+ type: Literal["contextual"]
+ """Strategy type identifier."""
+
+
+ChunkingStrategy: TypeAlias = Union[
+ ChunkingStrategyVectorStoreChunkingStrategyAuto,
+ ChunkingStrategyVectorStoreChunkingStrategyStatic,
+ ChunkingStrategyVectorStoreChunkingStrategyContextual,
+]
+
+
+class FileChunkingStrategyVectorStoreChunkingStrategyAuto(TypedDict, total=False):
+ """Automatic chunking strategy for vector store files."""
+
+ type: Literal["auto"]
+
+
+class FileChunkingStrategyVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
+ """Configuration for static chunking strategy."""
+
+ chunk_overlap_tokens: int
+
+ max_chunk_size_tokens: int
+
+
+class FileChunkingStrategyVectorStoreChunkingStrategyStatic(TypedDict, total=False):
+ """Static chunking strategy with configurable parameters."""
+
+ static: Required[FileChunkingStrategyVectorStoreChunkingStrategyStaticStatic]
+ """Configuration for static chunking strategy."""
+
+ type: Literal["static"]
+
+
+class FileChunkingStrategyVectorStoreChunkingStrategyContextualContextual(TypedDict, total=False):
+ """Configuration for contextual chunking."""
+
+ chunk_overlap_tokens: int
+ """Tokens to overlap between adjacent chunks.
+
+ Must be less than max_chunk_size_tokens.
+ """
+
+ context_prompt: str
+ """Prompt template for contextual retrieval.
+
+ Uses WHOLE_DOCUMENT and CHUNK_CONTENT placeholders wrapped in double curly
+ braces.
+ """
+
+ max_chunk_size_tokens: int
+ """Maximum tokens per chunk. Suggested ~700 to allow room for prepended context."""
+
+ max_concurrency: Optional[int]
+ """Maximum concurrent LLM calls. Falls back to config default if not provided."""
+
+ model_id: Optional[str]
+ """LLM model for generating context.
+
+ Falls back to VectorStoresConfig.contextual_retrieval_params.model if not
+ provided.
+ """
+
+ timeout_seconds: Optional[int]
+ """Timeout per LLM call in seconds. Falls back to config default if not provided."""
+
+
+class FileChunkingStrategyVectorStoreChunkingStrategyContextual(TypedDict, total=False):
+ """
+ Contextual chunking strategy that uses an LLM to situate chunks within the document.
+ """
+
+ contextual: Required[FileChunkingStrategyVectorStoreChunkingStrategyContextualContextual]
+ """Configuration for contextual chunking."""
+
+ type: Literal["contextual"]
+ """Strategy type identifier."""
+
+
+FileChunkingStrategy: TypeAlias = Union[
+ FileChunkingStrategyVectorStoreChunkingStrategyAuto,
+ FileChunkingStrategyVectorStoreChunkingStrategyStatic,
+ FileChunkingStrategyVectorStoreChunkingStrategyContextual,
+]
+
+
+class File(TypedDict, total=False):
+ """A file entry for creating a vector store file batch with per-file options."""
+
+ file_id: Required[str]
+
+ attributes: Optional[Dict[str, Union[str, float, bool]]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
+
+ chunking_strategy: Optional[FileChunkingStrategy]
+ """Automatic chunking strategy for vector store files."""
diff --git a/src/llama_stack_client/types/vector_stores/file_batch_list_files_params.py b/src/ogx_client/types/vector_stores/file_batch_list_files_params.py
similarity index 100%
rename from src/llama_stack_client/types/vector_stores/file_batch_list_files_params.py
rename to src/ogx_client/types/vector_stores/file_batch_list_files_params.py
diff --git a/src/llama_stack_client/types/vector_stores/file_content_params.py b/src/ogx_client/types/vector_stores/file_content_params.py
similarity index 100%
rename from src/llama_stack_client/types/vector_stores/file_content_params.py
rename to src/ogx_client/types/vector_stores/file_content_params.py
diff --git a/src/llama_stack_client/types/vector_stores/file_content_response.py b/src/ogx_client/types/vector_stores/file_content_response.py
similarity index 98%
rename from src/llama_stack_client/types/vector_stores/file_content_response.py
rename to src/ogx_client/types/vector_stores/file_content_response.py
index 3c1393c6..3ee107cf 100644
--- a/src/llama_stack_client/types/vector_stores/file_content_response.py
+++ b/src/ogx_client/types/vector_stores/file_content_response.py
@@ -68,7 +68,7 @@ class FileContentResponse(BaseModel):
data: List[Data]
- has_more: Optional[bool] = None
+ has_more: bool
next_page: Optional[str] = None
diff --git a/src/llama_stack_client/types/vector_stores/file_create_params.py b/src/ogx_client/types/vector_stores/file_create_params.py
similarity index 88%
rename from src/llama_stack_client/types/vector_stores/file_create_params.py
rename to src/ogx_client/types/vector_stores/file_create_params.py
index be49a9d2..55a90979 100644
--- a/src/llama_stack_client/types/vector_stores/file_create_params.py
+++ b/src/ogx_client/types/vector_stores/file_create_params.py
@@ -26,8 +26,14 @@ class FileCreateParams(TypedDict, total=False):
file_id: Required[str]
"""The ID of the file to attach."""
- attributes: Optional[Dict[str, object]]
- """Attributes to associate with the file."""
+ attributes: Optional[Dict[str, Union[str, float, bool]]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters, booleans, or numbers.
+ """
chunking_strategy: Optional[ChunkingStrategy]
"""Strategy for chunking the file content."""
diff --git a/src/llama_stack_client/types/vector_stores/file_delete_response.py b/src/ogx_client/types/vector_stores/file_delete_response.py
similarity index 80%
rename from src/llama_stack_client/types/vector_stores/file_delete_response.py
rename to src/ogx_client/types/vector_stores/file_delete_response.py
index 7e5ad372..3fdbaab2 100644
--- a/src/llama_stack_client/types/vector_stores/file_delete_response.py
+++ b/src/ogx_client/types/vector_stores/file_delete_response.py
@@ -7,6 +7,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
+from typing_extensions import Literal
from ..._models import BaseModel
@@ -18,6 +19,6 @@ class FileDeleteResponse(BaseModel):
id: str
- deleted: Optional[bool] = None
+ deleted: bool
- object: Optional[str] = None
+ object: Optional[Literal["vector_store.file.deleted"]] = None
diff --git a/src/llama_stack_client/types/vector_stores/file_list_params.py b/src/ogx_client/types/vector_stores/file_list_params.py
similarity index 92%
rename from src/llama_stack_client/types/vector_stores/file_list_params.py
rename to src/ogx_client/types/vector_stores/file_list_params.py
index e17c4390..6dad1c50 100644
--- a/src/llama_stack_client/types/vector_stores/file_list_params.py
+++ b/src/ogx_client/types/vector_stores/file_list_params.py
@@ -21,7 +21,7 @@ class FileListParams(TypedDict, total=False):
before: Optional[str]
"""Pagination cursor (before)."""
- filter: Optional[Literal["completed", "in_progress", "cancelled", "failed"]]
+ filter: Optional[Literal["in_progress", "completed", "cancelled", "failed"]]
"""Filter by file status."""
limit: Optional[int]
diff --git a/src/llama_stack_client/types/vector_stores/file_update_params.py b/src/ogx_client/types/vector_stores/file_update_params.py
similarity index 100%
rename from src/llama_stack_client/types/vector_stores/file_update_params.py
rename to src/ogx_client/types/vector_stores/file_update_params.py
diff --git a/src/llama_stack_client/types/vector_stores/list_vector_store_files_in_batch_response.py b/src/ogx_client/types/vector_stores/list_vector_store_files_in_batch_response.py
similarity index 81%
rename from src/llama_stack_client/types/vector_stores/list_vector_store_files_in_batch_response.py
rename to src/ogx_client/types/vector_stores/list_vector_store_files_in_batch_response.py
index 2b9b8de1..938ab517 100644
--- a/src/llama_stack_client/types/vector_stores/list_vector_store_files_in_batch_response.py
+++ b/src/ogx_client/types/vector_stores/list_vector_store_files_in_batch_response.py
@@ -7,6 +7,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
+from typing_extensions import Literal
from ..._models import BaseModel
from .vector_store_file import VectorStoreFile
@@ -19,10 +20,10 @@ class ListVectorStoreFilesInBatchResponse(BaseModel):
data: List[VectorStoreFile]
- first_id: Optional[str] = None
+ first_id: str
- has_more: Optional[bool] = None
+ has_more: bool
- last_id: Optional[str] = None
+ last_id: str
- object: Optional[str] = None
+ object: Optional[Literal["list"]] = None
diff --git a/src/llama_stack_client/types/vector_stores/vector_store_file.py b/src/ogx_client/types/vector_stores/vector_store_file.py
similarity index 95%
rename from src/llama_stack_client/types/vector_stores/vector_store_file.py
rename to src/ogx_client/types/vector_stores/vector_store_file.py
index 02f4bc24..bfc0d452 100644
--- a/src/llama_stack_client/types/vector_stores/vector_store_file.py
+++ b/src/ogx_client/types/vector_stores/vector_store_file.py
@@ -107,7 +107,7 @@ class ChunkingStrategyVectorStoreChunkingStrategyContextual(BaseModel):
class LastError(BaseModel):
"""Error information for failed vector store file processing."""
- code: Literal["server_error", "rate_limit_exceeded"]
+ code: Literal["server_error", "unsupported_file", "invalid_file"]
message: str
@@ -122,7 +122,7 @@ class VectorStoreFile(BaseModel):
created_at: int
- status: Literal["completed", "in_progress", "cancelled", "failed"]
+ status: Literal["in_progress", "completed", "cancelled", "failed"]
vector_store_id: str
@@ -138,6 +138,6 @@ class VectorStoreFile(BaseModel):
last_error: Optional[LastError] = None
"""Error information for failed vector store file processing."""
- object: Optional[str] = None
+ object: Optional[Literal["vector_store.file"]] = None
usage_bytes: Optional[int] = None
diff --git a/src/llama_stack_client/types/vector_stores/vector_store_file_batches.py b/src/ogx_client/types/vector_stores/vector_store_file_batches.py
similarity index 87%
rename from src/llama_stack_client/types/vector_stores/vector_store_file_batches.py
rename to src/ogx_client/types/vector_stores/vector_store_file_batches.py
index b599f629..169a3ee0 100644
--- a/src/llama_stack_client/types/vector_stores/vector_store_file_batches.py
+++ b/src/ogx_client/types/vector_stores/vector_store_file_batches.py
@@ -38,8 +38,8 @@ class VectorStoreFileBatches(BaseModel):
file_counts: FileCounts
"""File processing status counts for a vector store."""
- status: Literal["completed", "in_progress", "cancelled", "failed"]
+ status: Literal["in_progress", "completed", "cancelled", "failed"]
vector_store_id: str
- object: Optional[str] = None
+ object: Optional[Literal["vector_store.files_batch"]] = None
diff --git a/tests/api_resources/alpha/eval/__init__.py b/tests/api_resources/alpha/eval/__init__.py
deleted file mode 100644
index 6a8e62e9..00000000
--- a/tests/api_resources/alpha/eval/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/alpha/eval/test_jobs.py b/tests/api_resources/alpha/eval/test_jobs.py
deleted file mode 100644
index 42844d80..00000000
--- a/tests/api_resources/alpha/eval/test_jobs.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.alpha import Job, EvaluateResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestJobs:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
- job = client.alpha.eval.jobs.retrieve(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
- assert_matches_type(EvaluateResponse, job, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
- response = client.alpha.eval.jobs.with_raw_response.retrieve(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(EvaluateResponse, job, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
- with client.alpha.eval.jobs.with_streaming_response.retrieve(
- job_id="job_id",
- benchmark_id="benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(EvaluateResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.eval.jobs.with_raw_response.retrieve(
- job_id="job_id",
- benchmark_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"):
- client.alpha.eval.jobs.with_raw_response.retrieve(
- job_id="",
- benchmark_id="benchmark_id",
- )
-
- @parametrize
- def test_method_cancel(self, client: LlamaStackClient) -> None:
- job = client.alpha.eval.jobs.cancel(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
- assert job is None
-
- @parametrize
- def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
- response = client.alpha.eval.jobs.with_raw_response.cancel(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert job is None
-
- @parametrize
- def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
- with client.alpha.eval.jobs.with_streaming_response.cancel(
- job_id="job_id",
- benchmark_id="benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert job is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_cancel(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.eval.jobs.with_raw_response.cancel(
- job_id="job_id",
- benchmark_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"):
- client.alpha.eval.jobs.with_raw_response.cancel(
- job_id="",
- benchmark_id="benchmark_id",
- )
-
- @parametrize
- def test_method_status(self, client: LlamaStackClient) -> None:
- job = client.alpha.eval.jobs.status(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
- assert_matches_type(Job, job, path=["response"])
-
- @parametrize
- def test_raw_response_status(self, client: LlamaStackClient) -> None:
- response = client.alpha.eval.jobs.with_raw_response.status(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = response.parse()
- assert_matches_type(Job, job, path=["response"])
-
- @parametrize
- def test_streaming_response_status(self, client: LlamaStackClient) -> None:
- with client.alpha.eval.jobs.with_streaming_response.status(
- job_id="job_id",
- benchmark_id="benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = response.parse()
- assert_matches_type(Job, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_status(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.eval.jobs.with_raw_response.status(
- job_id="job_id",
- benchmark_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"):
- client.alpha.eval.jobs.with_raw_response.status(
- job_id="",
- benchmark_id="benchmark_id",
- )
-
-
-class TestAsyncJobs:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- job = await async_client.alpha.eval.jobs.retrieve(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
- assert_matches_type(EvaluateResponse, job, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.eval.jobs.with_raw_response.retrieve(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(EvaluateResponse, job, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.eval.jobs.with_streaming_response.retrieve(
- job_id="job_id",
- benchmark_id="benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(EvaluateResponse, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.eval.jobs.with_raw_response.retrieve(
- job_id="job_id",
- benchmark_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"):
- await async_client.alpha.eval.jobs.with_raw_response.retrieve(
- job_id="",
- benchmark_id="benchmark_id",
- )
-
- @parametrize
- async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- job = await async_client.alpha.eval.jobs.cancel(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
- assert job is None
-
- @parametrize
- async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.eval.jobs.with_raw_response.cancel(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert job is None
-
- @parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.eval.jobs.with_streaming_response.cancel(
- job_id="job_id",
- benchmark_id="benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert job is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.eval.jobs.with_raw_response.cancel(
- job_id="job_id",
- benchmark_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"):
- await async_client.alpha.eval.jobs.with_raw_response.cancel(
- job_id="",
- benchmark_id="benchmark_id",
- )
-
- @parametrize
- async def test_method_status(self, async_client: AsyncLlamaStackClient) -> None:
- job = await async_client.alpha.eval.jobs.status(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
- assert_matches_type(Job, job, path=["response"])
-
- @parametrize
- async def test_raw_response_status(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.eval.jobs.with_raw_response.status(
- job_id="job_id",
- benchmark_id="benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- job = await response.parse()
- assert_matches_type(Job, job, path=["response"])
-
- @parametrize
- async def test_streaming_response_status(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.eval.jobs.with_streaming_response.status(
- job_id="job_id",
- benchmark_id="benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- job = await response.parse()
- assert_matches_type(Job, job, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_status(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.eval.jobs.with_raw_response.status(
- job_id="job_id",
- benchmark_id="",
- )
-
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `job_id` but received ''"):
- await async_client.alpha.eval.jobs.with_raw_response.status(
- job_id="",
- benchmark_id="benchmark_id",
- )
diff --git a/tests/api_resources/alpha/test_admin.py b/tests/api_resources/alpha/test_admin.py
index c43ea2c1..093c6f1c 100644
--- a/tests/api_resources/alpha/test_admin.py
+++ b/tests/api_resources/alpha/test_admin.py
@@ -13,10 +13,10 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import RouteListResponse, ProviderListResponse
-from llama_stack_client.types.shared import HealthInfo, VersionInfo, ProviderInfo
+from ogx_client.types import RouteListResponse, ProviderListResponse
+from ogx_client.types.shared import HealthInfo, VersionInfo, ProviderInfo
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -25,12 +25,12 @@ class TestAdmin:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_health(self, client: LlamaStackClient) -> None:
+ def test_method_health(self, client: OgxClient) -> None:
admin = client.alpha.admin.health()
assert_matches_type(HealthInfo, admin, path=["response"])
@parametrize
- def test_raw_response_health(self, client: LlamaStackClient) -> None:
+ def test_raw_response_health(self, client: OgxClient) -> None:
response = client.alpha.admin.with_raw_response.health()
assert response.is_closed is True
@@ -39,7 +39,7 @@ def test_raw_response_health(self, client: LlamaStackClient) -> None:
assert_matches_type(HealthInfo, admin, path=["response"])
@parametrize
- def test_streaming_response_health(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_health(self, client: OgxClient) -> None:
with client.alpha.admin.with_streaming_response.health() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -50,14 +50,14 @@ def test_streaming_response_health(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_inspect_provider(self, client: LlamaStackClient) -> None:
+ def test_method_inspect_provider(self, client: OgxClient) -> None:
admin = client.alpha.admin.inspect_provider(
"provider_id",
)
assert_matches_type(ProviderInfo, admin, path=["response"])
@parametrize
- def test_raw_response_inspect_provider(self, client: LlamaStackClient) -> None:
+ def test_raw_response_inspect_provider(self, client: OgxClient) -> None:
response = client.alpha.admin.with_raw_response.inspect_provider(
"provider_id",
)
@@ -68,7 +68,7 @@ def test_raw_response_inspect_provider(self, client: LlamaStackClient) -> None:
assert_matches_type(ProviderInfo, admin, path=["response"])
@parametrize
- def test_streaming_response_inspect_provider(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_inspect_provider(self, client: OgxClient) -> None:
with client.alpha.admin.with_streaming_response.inspect_provider(
"provider_id",
) as response:
@@ -81,19 +81,19 @@ def test_streaming_response_inspect_provider(self, client: LlamaStackClient) ->
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_inspect_provider(self, client: LlamaStackClient) -> None:
+ def test_path_params_inspect_provider(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `provider_id` but received ''"):
client.alpha.admin.with_raw_response.inspect_provider(
"",
)
@parametrize
- def test_method_list_providers(self, client: LlamaStackClient) -> None:
+ def test_method_list_providers(self, client: OgxClient) -> None:
admin = client.alpha.admin.list_providers()
assert_matches_type(ProviderListResponse, admin, path=["response"])
@parametrize
- def test_raw_response_list_providers(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list_providers(self, client: OgxClient) -> None:
response = client.alpha.admin.with_raw_response.list_providers()
assert response.is_closed is True
@@ -102,7 +102,7 @@ def test_raw_response_list_providers(self, client: LlamaStackClient) -> None:
assert_matches_type(ProviderListResponse, admin, path=["response"])
@parametrize
- def test_streaming_response_list_providers(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list_providers(self, client: OgxClient) -> None:
with client.alpha.admin.with_streaming_response.list_providers() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -113,19 +113,19 @@ def test_streaming_response_list_providers(self, client: LlamaStackClient) -> No
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_list_routes(self, client: LlamaStackClient) -> None:
+ def test_method_list_routes(self, client: OgxClient) -> None:
admin = client.alpha.admin.list_routes()
assert_matches_type(RouteListResponse, admin, path=["response"])
@parametrize
- def test_method_list_routes_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_routes_with_all_params(self, client: OgxClient) -> None:
admin = client.alpha.admin.list_routes(
api_filter="v1",
)
assert_matches_type(RouteListResponse, admin, path=["response"])
@parametrize
- def test_raw_response_list_routes(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list_routes(self, client: OgxClient) -> None:
response = client.alpha.admin.with_raw_response.list_routes()
assert response.is_closed is True
@@ -134,7 +134,7 @@ def test_raw_response_list_routes(self, client: LlamaStackClient) -> None:
assert_matches_type(RouteListResponse, admin, path=["response"])
@parametrize
- def test_streaming_response_list_routes(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list_routes(self, client: OgxClient) -> None:
with client.alpha.admin.with_streaming_response.list_routes() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -145,12 +145,12 @@ def test_streaming_response_list_routes(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_version(self, client: LlamaStackClient) -> None:
+ def test_method_version(self, client: OgxClient) -> None:
admin = client.alpha.admin.version()
assert_matches_type(VersionInfo, admin, path=["response"])
@parametrize
- def test_raw_response_version(self, client: LlamaStackClient) -> None:
+ def test_raw_response_version(self, client: OgxClient) -> None:
response = client.alpha.admin.with_raw_response.version()
assert response.is_closed is True
@@ -159,7 +159,7 @@ def test_raw_response_version(self, client: LlamaStackClient) -> None:
assert_matches_type(VersionInfo, admin, path=["response"])
@parametrize
- def test_streaming_response_version(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_version(self, client: OgxClient) -> None:
with client.alpha.admin.with_streaming_response.version() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -176,12 +176,12 @@ class TestAsyncAdmin:
)
@parametrize
- async def test_method_health(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_health(self, async_client: AsyncOgxClient) -> None:
admin = await async_client.alpha.admin.health()
assert_matches_type(HealthInfo, admin, path=["response"])
@parametrize
- async def test_raw_response_health(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_health(self, async_client: AsyncOgxClient) -> None:
response = await async_client.alpha.admin.with_raw_response.health()
assert response.is_closed is True
@@ -190,7 +190,7 @@ async def test_raw_response_health(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(HealthInfo, admin, path=["response"])
@parametrize
- async def test_streaming_response_health(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_health(self, async_client: AsyncOgxClient) -> None:
async with async_client.alpha.admin.with_streaming_response.health() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -201,14 +201,14 @@ async def test_streaming_response_health(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_inspect_provider(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_inspect_provider(self, async_client: AsyncOgxClient) -> None:
admin = await async_client.alpha.admin.inspect_provider(
"provider_id",
)
assert_matches_type(ProviderInfo, admin, path=["response"])
@parametrize
- async def test_raw_response_inspect_provider(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_inspect_provider(self, async_client: AsyncOgxClient) -> None:
response = await async_client.alpha.admin.with_raw_response.inspect_provider(
"provider_id",
)
@@ -219,7 +219,7 @@ async def test_raw_response_inspect_provider(self, async_client: AsyncLlamaStack
assert_matches_type(ProviderInfo, admin, path=["response"])
@parametrize
- async def test_streaming_response_inspect_provider(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_inspect_provider(self, async_client: AsyncOgxClient) -> None:
async with async_client.alpha.admin.with_streaming_response.inspect_provider(
"provider_id",
) as response:
@@ -232,19 +232,19 @@ async def test_streaming_response_inspect_provider(self, async_client: AsyncLlam
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_inspect_provider(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_inspect_provider(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `provider_id` but received ''"):
await async_client.alpha.admin.with_raw_response.inspect_provider(
"",
)
@parametrize
- async def test_method_list_providers(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_providers(self, async_client: AsyncOgxClient) -> None:
admin = await async_client.alpha.admin.list_providers()
assert_matches_type(ProviderListResponse, admin, path=["response"])
@parametrize
- async def test_raw_response_list_providers(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list_providers(self, async_client: AsyncOgxClient) -> None:
response = await async_client.alpha.admin.with_raw_response.list_providers()
assert response.is_closed is True
@@ -253,7 +253,7 @@ async def test_raw_response_list_providers(self, async_client: AsyncLlamaStackCl
assert_matches_type(ProviderListResponse, admin, path=["response"])
@parametrize
- async def test_streaming_response_list_providers(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list_providers(self, async_client: AsyncOgxClient) -> None:
async with async_client.alpha.admin.with_streaming_response.list_providers() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -264,19 +264,19 @@ async def test_streaming_response_list_providers(self, async_client: AsyncLlamaS
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_list_routes(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_routes(self, async_client: AsyncOgxClient) -> None:
admin = await async_client.alpha.admin.list_routes()
assert_matches_type(RouteListResponse, admin, path=["response"])
@parametrize
- async def test_method_list_routes_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_routes_with_all_params(self, async_client: AsyncOgxClient) -> None:
admin = await async_client.alpha.admin.list_routes(
api_filter="v1",
)
assert_matches_type(RouteListResponse, admin, path=["response"])
@parametrize
- async def test_raw_response_list_routes(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list_routes(self, async_client: AsyncOgxClient) -> None:
response = await async_client.alpha.admin.with_raw_response.list_routes()
assert response.is_closed is True
@@ -285,7 +285,7 @@ async def test_raw_response_list_routes(self, async_client: AsyncLlamaStackClien
assert_matches_type(RouteListResponse, admin, path=["response"])
@parametrize
- async def test_streaming_response_list_routes(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list_routes(self, async_client: AsyncOgxClient) -> None:
async with async_client.alpha.admin.with_streaming_response.list_routes() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -296,12 +296,12 @@ async def test_streaming_response_list_routes(self, async_client: AsyncLlamaStac
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_version(self, async_client: AsyncOgxClient) -> None:
admin = await async_client.alpha.admin.version()
assert_matches_type(VersionInfo, admin, path=["response"])
@parametrize
- async def test_raw_response_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_version(self, async_client: AsyncOgxClient) -> None:
response = await async_client.alpha.admin.with_raw_response.version()
assert response.is_closed is True
@@ -310,7 +310,7 @@ async def test_raw_response_version(self, async_client: AsyncLlamaStackClient) -
assert_matches_type(VersionInfo, admin, path=["response"])
@parametrize
- async def test_streaming_response_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_version(self, async_client: AsyncOgxClient) -> None:
async with async_client.alpha.admin.with_streaming_response.version() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/alpha/test_benchmarks.py b/tests/api_resources/alpha/test_benchmarks.py
deleted file mode 100644
index dcb89351..00000000
--- a/tests/api_resources/alpha/test_benchmarks.py
+++ /dev/null
@@ -1,354 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.alpha import Benchmark, BenchmarkListResponse
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestBenchmarks:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
- benchmark = client.alpha.benchmarks.retrieve(
- "benchmark_id",
- )
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
- response = client.alpha.benchmarks.with_raw_response.retrieve(
- "benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
- with client.alpha.benchmarks.with_streaming_response.retrieve(
- "benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.benchmarks.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- benchmark = client.alpha.benchmarks.list()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- response = client.alpha.benchmarks.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with client.alpha.benchmarks.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- benchmark = client.alpha.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
-
- assert benchmark is None
-
- @parametrize
- def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- benchmark = client.alpha.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- metadata={"foo": "bar"},
- provider_benchmark_id="provider_benchmark_id",
- provider_id="provider_id",
- )
-
- assert benchmark is None
-
- @parametrize
- def test_raw_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.alpha.benchmarks.with_raw_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = response.parse()
- assert benchmark is None
-
- @parametrize
- def test_streaming_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.alpha.benchmarks.with_streaming_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = response.parse()
- assert benchmark is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- benchmark = client.alpha.benchmarks.unregister(
- "benchmark_id",
- )
-
- assert benchmark is None
-
- @parametrize
- def test_raw_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.alpha.benchmarks.with_raw_response.unregister(
- "benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = response.parse()
- assert benchmark is None
-
- @parametrize
- def test_streaming_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.alpha.benchmarks.with_streaming_response.unregister(
- "benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = response.parse()
- assert benchmark is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.benchmarks.with_raw_response.unregister(
- "",
- )
-
-
-class TestAsyncBenchmarks:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- benchmark = await async_client.alpha.benchmarks.retrieve(
- "benchmark_id",
- )
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.benchmarks.with_raw_response.retrieve(
- "benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = await response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.benchmarks.with_streaming_response.retrieve(
- "benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = await response.parse()
- assert_matches_type(Benchmark, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.benchmarks.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- benchmark = await async_client.alpha.benchmarks.list()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.benchmarks.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = await response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.benchmarks.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = await response.parse()
- assert_matches_type(BenchmarkListResponse, benchmark, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- benchmark = await async_client.alpha.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
-
- assert benchmark is None
-
- @parametrize
- async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- benchmark = await async_client.alpha.benchmarks.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- metadata={"foo": "bar"},
- provider_benchmark_id="provider_benchmark_id",
- provider_id="provider_id",
- )
-
- assert benchmark is None
-
- @parametrize
- async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.alpha.benchmarks.with_raw_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = await response.parse()
- assert benchmark is None
-
- @parametrize
- async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.alpha.benchmarks.with_streaming_response.register(
- benchmark_id="benchmark_id",
- dataset_id="dataset_id",
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = await response.parse()
- assert benchmark is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- benchmark = await async_client.alpha.benchmarks.unregister(
- "benchmark_id",
- )
-
- assert benchmark is None
-
- @parametrize
- async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.alpha.benchmarks.with_raw_response.unregister(
- "benchmark_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- benchmark = await response.parse()
- assert benchmark is None
-
- @parametrize
- async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.alpha.benchmarks.with_streaming_response.unregister(
- "benchmark_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- benchmark = await response.parse()
- assert benchmark is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.benchmarks.with_raw_response.unregister(
- "",
- )
diff --git a/tests/api_resources/alpha/test_eval.py b/tests/api_resources/alpha/test_eval.py
deleted file mode 100644
index a0fa702b..00000000
--- a/tests/api_resources/alpha/test_eval.py
+++ /dev/null
@@ -1,833 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.alpha import (
- Job,
- EvaluateResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestEval:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_evaluate_rows(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- def test_method_evaluate_rows_with_all_params(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- def test_raw_response_evaluate_rows(self, client: LlamaStackClient) -> None:
- response = client.alpha.eval.with_raw_response.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- def test_streaming_response_evaluate_rows(self, client: LlamaStackClient) -> None:
- with client.alpha.eval.with_streaming_response.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_evaluate_rows(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.eval.with_raw_response.evaluate_rows(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- @parametrize
- def test_method_evaluate_rows_alpha(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- def test_method_evaluate_rows_alpha_with_all_params(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- def test_raw_response_evaluate_rows_alpha(self, client: LlamaStackClient) -> None:
- response = client.alpha.eval.with_raw_response.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- def test_streaming_response_evaluate_rows_alpha(self, client: LlamaStackClient) -> None:
- with client.alpha.eval.with_streaming_response.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_evaluate_rows_alpha(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.eval.with_raw_response.evaluate_rows_alpha(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- @parametrize
- def test_method_run_eval(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- def test_method_run_eval_with_all_params(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- def test_raw_response_run_eval(self, client: LlamaStackClient) -> None:
- response = client.alpha.eval.with_raw_response.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- def test_streaming_response_run_eval(self, client: LlamaStackClient) -> None:
- with client.alpha.eval.with_streaming_response.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_run_eval(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.eval.with_raw_response.run_eval(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
-
- @parametrize
- def test_method_run_eval_alpha(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- def test_method_run_eval_alpha_with_all_params(self, client: LlamaStackClient) -> None:
- eval = client.alpha.eval.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- def test_raw_response_run_eval_alpha(self, client: LlamaStackClient) -> None:
- response = client.alpha.eval.with_raw_response.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- def test_streaming_response_run_eval_alpha(self, client: LlamaStackClient) -> None:
- with client.alpha.eval.with_streaming_response.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_run_eval_alpha(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- client.alpha.eval.with_raw_response.run_eval_alpha(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
-
-
-class TestAsyncEval:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- async def test_method_evaluate_rows_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- async def test_raw_response_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.eval.with_raw_response.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = await response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- async def test_streaming_response_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.eval.with_streaming_response.evaluate_rows(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = await response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_evaluate_rows(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.eval.with_raw_response.evaluate_rows(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- @parametrize
- async def test_method_evaluate_rows_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- async def test_method_evaluate_rows_alpha_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- async def test_raw_response_evaluate_rows_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.eval.with_raw_response.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = await response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- @parametrize
- async def test_streaming_response_evaluate_rows_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.eval.with_streaming_response.evaluate_rows_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = await response.parse()
- assert_matches_type(EvaluateResponse, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_evaluate_rows_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.eval.with_raw_response.evaluate_rows_alpha(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- input_rows=[{"foo": "bar"}],
- scoring_functions=["string"],
- )
-
- @parametrize
- async def test_method_run_eval(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- async def test_method_run_eval_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- async def test_raw_response_run_eval(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.eval.with_raw_response.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = await response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- async def test_streaming_response_run_eval(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.eval.with_streaming_response.run_eval(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = await response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_run_eval(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.eval.with_raw_response.run_eval(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
-
- @parametrize
- async def test_method_run_eval_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- async def test_method_run_eval_alpha_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- eval = await async_client.alpha.eval.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {
- "max_tokens": 1,
- "repetition_penalty": -2,
- "stop": ["string"],
- "strategy": {"type": "greedy"},
- },
- "system_message": {
- "content": "string",
- "role": "system",
- },
- "type": "model",
- },
- "num_examples": 1,
- "scoring_params": {
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- },
- )
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- async def test_raw_response_run_eval_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.alpha.eval.with_raw_response.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- eval = await response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- @parametrize
- async def test_streaming_response_run_eval_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.alpha.eval.with_streaming_response.run_eval_alpha(
- benchmark_id="benchmark_id",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- eval = await response.parse()
- assert_matches_type(Job, eval, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_run_eval_alpha(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `benchmark_id` but received ''"):
- await async_client.alpha.eval.with_raw_response.run_eval_alpha(
- benchmark_id="",
- benchmark_config={
- "eval_candidate": {
- "model": "x",
- "sampling_params": {},
- }
- },
- )
diff --git a/tests/api_resources/alpha/test_inference.py b/tests/api_resources/alpha/test_inference.py
index dd8d893d..9720a689 100644
--- a/tests/api_resources/alpha/test_inference.py
+++ b/tests/api_resources/alpha/test_inference.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.alpha import InferenceRerankResponse
+from ogx_client.types.alpha import InferenceRerankResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,7 +24,7 @@ class TestInference:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_rerank(self, client: LlamaStackClient) -> None:
+ def test_method_rerank(self, client: OgxClient) -> None:
inference = client.alpha.inference.rerank(
items=["string"],
model="model",
@@ -33,7 +33,7 @@ def test_method_rerank(self, client: LlamaStackClient) -> None:
assert_matches_type(InferenceRerankResponse, inference, path=["response"])
@parametrize
- def test_method_rerank_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_rerank_with_all_params(self, client: OgxClient) -> None:
inference = client.alpha.inference.rerank(
items=["string"],
model="model",
@@ -43,7 +43,7 @@ def test_method_rerank_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(InferenceRerankResponse, inference, path=["response"])
@parametrize
- def test_raw_response_rerank(self, client: LlamaStackClient) -> None:
+ def test_raw_response_rerank(self, client: OgxClient) -> None:
response = client.alpha.inference.with_raw_response.rerank(
items=["string"],
model="model",
@@ -56,7 +56,7 @@ def test_raw_response_rerank(self, client: LlamaStackClient) -> None:
assert_matches_type(InferenceRerankResponse, inference, path=["response"])
@parametrize
- def test_streaming_response_rerank(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_rerank(self, client: OgxClient) -> None:
with client.alpha.inference.with_streaming_response.rerank(
items=["string"],
model="model",
@@ -77,7 +77,7 @@ class TestAsyncInference:
)
@parametrize
- async def test_method_rerank(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_rerank(self, async_client: AsyncOgxClient) -> None:
inference = await async_client.alpha.inference.rerank(
items=["string"],
model="model",
@@ -86,7 +86,7 @@ async def test_method_rerank(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(InferenceRerankResponse, inference, path=["response"])
@parametrize
- async def test_method_rerank_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_rerank_with_all_params(self, async_client: AsyncOgxClient) -> None:
inference = await async_client.alpha.inference.rerank(
items=["string"],
model="model",
@@ -96,7 +96,7 @@ async def test_method_rerank_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(InferenceRerankResponse, inference, path=["response"])
@parametrize
- async def test_raw_response_rerank(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_rerank(self, async_client: AsyncOgxClient) -> None:
response = await async_client.alpha.inference.with_raw_response.rerank(
items=["string"],
model="model",
@@ -109,7 +109,7 @@ async def test_raw_response_rerank(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(InferenceRerankResponse, inference, path=["response"])
@parametrize
- async def test_streaming_response_rerank(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_rerank(self, async_client: AsyncOgxClient) -> None:
async with async_client.alpha.inference.with_streaming_response.rerank(
items=["string"],
model="model",
diff --git a/tests/api_resources/beta/__init__.py b/tests/api_resources/beta/__init__.py
deleted file mode 100644
index 6a8e62e9..00000000
--- a/tests/api_resources/beta/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/beta/test_datasets.py b/tests/api_resources/beta/test_datasets.py
deleted file mode 100644
index 8ddd7646..00000000
--- a/tests/api_resources/beta/test_datasets.py
+++ /dev/null
@@ -1,551 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.beta import (
- DatasetListResponse,
- DatasetIterrowsResponse,
- DatasetRegisterResponse,
- DatasetRetrieveResponse,
-)
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestDatasets:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
- dataset = client.beta.datasets.retrieve(
- "dataset_id",
- )
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
- response = client.beta.datasets.with_raw_response.retrieve(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
- with client.beta.datasets.with_streaming_response.retrieve(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.beta.datasets.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- dataset = client.beta.datasets.list()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- response = client.beta.datasets.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with client.beta.datasets.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_appendrows(self, client: LlamaStackClient) -> None:
- dataset = client.beta.datasets.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": "bar"}],
- )
- assert dataset is None
-
- @parametrize
- def test_raw_response_appendrows(self, client: LlamaStackClient) -> None:
- response = client.beta.datasets.with_raw_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": "bar"}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert dataset is None
-
- @parametrize
- def test_streaming_response_appendrows(self, client: LlamaStackClient) -> None:
- with client.beta.datasets.with_streaming_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": "bar"}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_appendrows(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.beta.datasets.with_raw_response.appendrows(
- dataset_id="",
- rows=[{"foo": "bar"}],
- )
-
- @parametrize
- def test_method_iterrows(self, client: LlamaStackClient) -> None:
- dataset = client.beta.datasets.iterrows(
- dataset_id="dataset_id",
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- def test_method_iterrows_with_all_params(self, client: LlamaStackClient) -> None:
- dataset = client.beta.datasets.iterrows(
- dataset_id="dataset_id",
- limit=0,
- start_index=0,
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_iterrows(self, client: LlamaStackClient) -> None:
- response = client.beta.datasets.with_raw_response.iterrows(
- dataset_id="dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_iterrows(self, client: LlamaStackClient) -> None:
- with client.beta.datasets.with_streaming_response.iterrows(
- dataset_id="dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_iterrows(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.beta.datasets.with_raw_response.iterrows(
- dataset_id="",
- )
-
- @parametrize
- def test_method_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- dataset = client.beta.datasets.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- )
-
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- dataset = client.beta.datasets.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- dataset_id="dataset_id",
- metadata={"foo": "bar"},
- )
-
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- def test_raw_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.beta.datasets.with_raw_response.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- def test_streaming_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.beta.datasets.with_streaming_response.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- dataset = client.beta.datasets.unregister(
- "dataset_id",
- )
-
- assert dataset is None
-
- @parametrize
- def test_raw_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.beta.datasets.with_raw_response.unregister(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = response.parse()
- assert dataset is None
-
- @parametrize
- def test_streaming_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.beta.datasets.with_streaming_response.unregister(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- client.beta.datasets.with_raw_response.unregister(
- "",
- )
-
-
-class TestAsyncDatasets:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.beta.datasets.retrieve(
- "dataset_id",
- )
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.beta.datasets.with_raw_response.retrieve(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.beta.datasets.with_streaming_response.retrieve(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetRetrieveResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.beta.datasets.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.beta.datasets.list()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.beta.datasets.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.beta.datasets.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetListResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.beta.datasets.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": "bar"}],
- )
- assert dataset is None
-
- @parametrize
- async def test_raw_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.beta.datasets.with_raw_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": "bar"}],
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert dataset is None
-
- @parametrize
- async def test_streaming_response_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.beta.datasets.with_streaming_response.appendrows(
- dataset_id="dataset_id",
- rows=[{"foo": "bar"}],
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_appendrows(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.beta.datasets.with_raw_response.appendrows(
- dataset_id="",
- rows=[{"foo": "bar"}],
- )
-
- @parametrize
- async def test_method_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.beta.datasets.iterrows(
- dataset_id="dataset_id",
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- async def test_method_iterrows_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- dataset = await async_client.beta.datasets.iterrows(
- dataset_id="dataset_id",
- limit=0,
- start_index=0,
- )
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.beta.datasets.with_raw_response.iterrows(
- dataset_id="dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.beta.datasets.with_streaming_response.iterrows(
- dataset_id="dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetIterrowsResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_iterrows(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.beta.datasets.with_raw_response.iterrows(
- dataset_id="",
- )
-
- @parametrize
- async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- dataset = await async_client.beta.datasets.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- )
-
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- dataset = await async_client.beta.datasets.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- dataset_id="dataset_id",
- metadata={"foo": "bar"},
- )
-
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.beta.datasets.with_raw_response.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- @parametrize
- async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.beta.datasets.with_streaming_response.register(
- purpose="eval/question-answer",
- source={
- "uri": "uri",
- "type": "uri",
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert_matches_type(DatasetRegisterResponse, dataset, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- dataset = await async_client.beta.datasets.unregister(
- "dataset_id",
- )
-
- assert dataset is None
-
- @parametrize
- async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.beta.datasets.with_raw_response.unregister(
- "dataset_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- dataset = await response.parse()
- assert dataset is None
-
- @parametrize
- async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.beta.datasets.with_streaming_response.unregister(
- "dataset_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- dataset = await response.parse()
- assert dataset is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `dataset_id` but received ''"):
- await async_client.beta.datasets.with_raw_response.unregister(
- "",
- )
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index ba690c2f..f80d8c08 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.chat import (
+from ogx_client.types.chat import (
CompletionListResponse,
CompletionCreateResponse,
CompletionRetrieveResponse,
@@ -28,7 +28,7 @@ class TestCompletions:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_method_create_overload_1(self, client: OgxClient) -> None:
completion = client.chat.completions.create(
messages=[
{
@@ -41,7 +41,7 @@ def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params_overload_1(self, client: OgxClient) -> None:
completion = client.chat.completions.create(
messages=[
{
@@ -80,7 +80,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create_overload_1(self, client: OgxClient) -> None:
response = client.chat.completions.with_raw_response.create(
messages=[
{
@@ -97,7 +97,7 @@ def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create_overload_1(self, client: OgxClient) -> None:
with client.chat.completions.with_streaming_response.create(
messages=[
{
@@ -116,7 +116,7 @@ def test_streaming_response_create_overload_1(self, client: LlamaStackClient) ->
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_method_create_overload_2(self, client: OgxClient) -> None:
completion_stream = client.chat.completions.create(
messages=[
{
@@ -130,7 +130,7 @@ def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
completion_stream.response.close()
@parametrize
- def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params_overload_2(self, client: OgxClient) -> None:
completion_stream = client.chat.completions.create(
messages=[
{
@@ -169,7 +169,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
completion_stream.response.close()
@parametrize
- def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create_overload_2(self, client: OgxClient) -> None:
response = client.chat.completions.with_raw_response.create(
messages=[
{
@@ -186,7 +186,7 @@ def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
stream.close()
@parametrize
- def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create_overload_2(self, client: OgxClient) -> None:
with client.chat.completions.with_streaming_response.create(
messages=[
{
@@ -206,14 +206,14 @@ def test_streaming_response_create_overload_2(self, client: LlamaStackClient) ->
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
completion = client.chat.completions.retrieve(
"completion_id",
)
assert_matches_type(CompletionRetrieveResponse, completion, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.chat.completions.with_raw_response.retrieve(
"completion_id",
)
@@ -224,7 +224,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(CompletionRetrieveResponse, completion, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.chat.completions.with_streaming_response.retrieve(
"completion_id",
) as response:
@@ -237,19 +237,19 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
client.chat.completions.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
completion = client.chat.completions.list()
assert_matches_type(CompletionListResponse, completion, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
completion = client.chat.completions.list(
after="after",
limit=0,
@@ -259,7 +259,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(CompletionListResponse, completion, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.chat.completions.with_raw_response.list()
assert response.is_closed is True
@@ -268,7 +268,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(CompletionListResponse, completion, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.chat.completions.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -285,7 +285,7 @@ class TestAsyncCompletions:
)
@parametrize
- async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_overload_1(self, async_client: AsyncOgxClient) -> None:
completion = await async_client.chat.completions.create(
messages=[
{
@@ -298,7 +298,7 @@ async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClien
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- async def test_method_create_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOgxClient) -> None:
completion = await async_client.chat.completions.create(
messages=[
{
@@ -337,7 +337,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create_overload_1(self, async_client: AsyncOgxClient) -> None:
response = await async_client.chat.completions.with_raw_response.create(
messages=[
{
@@ -354,7 +354,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStac
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- async def test_streaming_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncOgxClient) -> None:
async with async_client.chat.completions.with_streaming_response.create(
messages=[
{
@@ -373,7 +373,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncLla
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_overload_2(self, async_client: AsyncOgxClient) -> None:
completion_stream = await async_client.chat.completions.create(
messages=[
{
@@ -387,7 +387,7 @@ async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClien
await completion_stream.response.aclose()
@parametrize
- async def test_method_create_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOgxClient) -> None:
completion_stream = await async_client.chat.completions.create(
messages=[
{
@@ -426,7 +426,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
await completion_stream.response.aclose()
@parametrize
- async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create_overload_2(self, async_client: AsyncOgxClient) -> None:
response = await async_client.chat.completions.with_raw_response.create(
messages=[
{
@@ -443,7 +443,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStac
await stream.close()
@parametrize
- async def test_streaming_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncOgxClient) -> None:
async with async_client.chat.completions.with_streaming_response.create(
messages=[
{
@@ -463,14 +463,14 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncLla
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
completion = await async_client.chat.completions.retrieve(
"completion_id",
)
assert_matches_type(CompletionRetrieveResponse, completion, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.chat.completions.with_raw_response.retrieve(
"completion_id",
)
@@ -481,7 +481,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(CompletionRetrieveResponse, completion, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.chat.completions.with_streaming_response.retrieve(
"completion_id",
) as response:
@@ -494,19 +494,19 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
await async_client.chat.completions.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
completion = await async_client.chat.completions.list()
assert_matches_type(CompletionListResponse, completion, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
completion = await async_client.chat.completions.list(
after="after",
limit=0,
@@ -516,7 +516,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl
assert_matches_type(CompletionListResponse, completion, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.chat.completions.with_raw_response.list()
assert response.is_closed is True
@@ -525,7 +525,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(CompletionListResponse, completion, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.chat.completions.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/conversations/test_items.py b/tests/api_resources/conversations/test_items.py
index 7938cdc4..70248f7c 100644
--- a/tests/api_resources/conversations/test_items.py
+++ b/tests/api_resources/conversations/test_items.py
@@ -13,10 +13,10 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
-from llama_stack_client.types.conversations import (
+from ogx_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
+from ogx_client.types.conversations import (
ItemGetResponse,
ItemListResponse,
ItemCreateResponse,
@@ -30,7 +30,7 @@ class TestItems:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
item = client.conversations.items.create(
conversation_id="conversation_id",
items=[
@@ -44,7 +44,7 @@ def test_method_create(self, client: LlamaStackClient) -> None:
assert_matches_type(ItemCreateResponse, item, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.conversations.items.with_raw_response.create(
conversation_id="conversation_id",
items=[
@@ -62,7 +62,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(ItemCreateResponse, item, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.conversations.items.with_streaming_response.create(
conversation_id="conversation_id",
items=[
@@ -82,7 +82,7 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_create(self, client: LlamaStackClient) -> None:
+ def test_path_params_create(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.create(
conversation_id="",
@@ -96,14 +96,14 @@ def test_path_params_create(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
item = client.conversations.items.list(
conversation_id="conversation_id",
)
assert_matches_type(SyncOpenAICursorPage[ItemListResponse], item, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
item = client.conversations.items.list(
conversation_id="conversation_id",
after="after",
@@ -114,7 +114,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[ItemListResponse], item, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.conversations.items.with_raw_response.list(
conversation_id="conversation_id",
)
@@ -125,7 +125,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[ItemListResponse], item, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.conversations.items.with_streaming_response.list(
conversation_id="conversation_id",
) as response:
@@ -138,14 +138,14 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_list(self, client: LlamaStackClient) -> None:
+ def test_path_params_list(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.list(
conversation_id="",
)
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
item = client.conversations.items.delete(
item_id="item_id",
conversation_id="conversation_id",
@@ -153,7 +153,7 @@ def test_method_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(ItemDeleteResponse, item, path=["response"])
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
response = client.conversations.items.with_raw_response.delete(
item_id="item_id",
conversation_id="conversation_id",
@@ -165,7 +165,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(ItemDeleteResponse, item, path=["response"])
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with client.conversations.items.with_streaming_response.delete(
item_id="item_id",
conversation_id="conversation_id",
@@ -179,7 +179,7 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.delete(
item_id="item_id",
@@ -193,7 +193,7 @@ def test_path_params_delete(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_get(self, client: LlamaStackClient) -> None:
+ def test_method_get(self, client: OgxClient) -> None:
item = client.conversations.items.get(
item_id="item_id",
conversation_id="conversation_id",
@@ -201,7 +201,7 @@ def test_method_get(self, client: LlamaStackClient) -> None:
assert_matches_type(ItemGetResponse, item, path=["response"])
@parametrize
- def test_raw_response_get(self, client: LlamaStackClient) -> None:
+ def test_raw_response_get(self, client: OgxClient) -> None:
response = client.conversations.items.with_raw_response.get(
item_id="item_id",
conversation_id="conversation_id",
@@ -213,7 +213,7 @@ def test_raw_response_get(self, client: LlamaStackClient) -> None:
assert_matches_type(ItemGetResponse, item, path=["response"])
@parametrize
- def test_streaming_response_get(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_get(self, client: OgxClient) -> None:
with client.conversations.items.with_streaming_response.get(
item_id="item_id",
conversation_id="conversation_id",
@@ -227,7 +227,7 @@ def test_streaming_response_get(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_get(self, client: LlamaStackClient) -> None:
+ def test_path_params_get(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.get(
item_id="item_id",
@@ -247,7 +247,7 @@ class TestAsyncItems:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
item = await async_client.conversations.items.create(
conversation_id="conversation_id",
items=[
@@ -261,7 +261,7 @@ async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(ItemCreateResponse, item, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.items.with_raw_response.create(
conversation_id="conversation_id",
items=[
@@ -279,7 +279,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(ItemCreateResponse, item, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.items.with_streaming_response.create(
conversation_id="conversation_id",
items=[
@@ -299,7 +299,7 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_create(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.create(
conversation_id="",
@@ -313,14 +313,14 @@ async def test_path_params_create(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
item = await async_client.conversations.items.list(
conversation_id="conversation_id",
)
assert_matches_type(AsyncOpenAICursorPage[ItemListResponse], item, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
item = await async_client.conversations.items.list(
conversation_id="conversation_id",
after="after",
@@ -331,7 +331,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl
assert_matches_type(AsyncOpenAICursorPage[ItemListResponse], item, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.items.with_raw_response.list(
conversation_id="conversation_id",
)
@@ -342,7 +342,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(AsyncOpenAICursorPage[ItemListResponse], item, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.items.with_streaming_response.list(
conversation_id="conversation_id",
) as response:
@@ -355,14 +355,14 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_list(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.list(
conversation_id="",
)
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
item = await async_client.conversations.items.delete(
item_id="item_id",
conversation_id="conversation_id",
@@ -370,7 +370,7 @@ async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(ItemDeleteResponse, item, path=["response"])
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.items.with_raw_response.delete(
item_id="item_id",
conversation_id="conversation_id",
@@ -382,7 +382,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(ItemDeleteResponse, item, path=["response"])
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.items.with_streaming_response.delete(
item_id="item_id",
conversation_id="conversation_id",
@@ -396,7 +396,7 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.delete(
item_id="item_id",
@@ -410,7 +410,7 @@ async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_get(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_get(self, async_client: AsyncOgxClient) -> None:
item = await async_client.conversations.items.get(
item_id="item_id",
conversation_id="conversation_id",
@@ -418,7 +418,7 @@ async def test_method_get(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(ItemGetResponse, item, path=["response"])
@parametrize
- async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_get(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.items.with_raw_response.get(
item_id="item_id",
conversation_id="conversation_id",
@@ -430,7 +430,7 @@ async def test_raw_response_get(self, async_client: AsyncLlamaStackClient) -> No
assert_matches_type(ItemGetResponse, item, path=["response"])
@parametrize
- async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_get(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.items.with_streaming_response.get(
item_id="item_id",
conversation_id="conversation_id",
@@ -444,7 +444,7 @@ async def test_streaming_response_get(self, async_client: AsyncLlamaStackClient)
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_get(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_get(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.get(
item_id="item_id",
diff --git a/tests/api_resources/models/test_openai.py b/tests/api_resources/models/test_openai.py
index 6a9acf23..b8a83462 100644
--- a/tests/api_resources/models/test_openai.py
+++ b/tests/api_resources/models/test_openai.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import ModelListResponse
+from ogx_client.types import ListModelsResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,27 +24,27 @@ class TestOpenAI:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
openai = client.models.openai.list()
- assert_matches_type(ModelListResponse, openai, path=["response"])
+ assert_matches_type(ListModelsResponse, openai, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.models.openai.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
openai = response.parse()
- assert_matches_type(ModelListResponse, openai, path=["response"])
+ assert_matches_type(ListModelsResponse, openai, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.models.openai.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
openai = response.parse()
- assert_matches_type(ModelListResponse, openai, path=["response"])
+ assert_matches_type(ListModelsResponse, openai, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -55,26 +55,26 @@ class TestAsyncOpenAI:
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
openai = await async_client.models.openai.list()
- assert_matches_type(ModelListResponse, openai, path=["response"])
+ assert_matches_type(ListModelsResponse, openai, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.models.openai.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
openai = await response.parse()
- assert_matches_type(ModelListResponse, openai, path=["response"])
+ assert_matches_type(ListModelsResponse, openai, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.models.openai.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
openai = await response.parse()
- assert_matches_type(ModelListResponse, openai, path=["response"])
+ assert_matches_type(ListModelsResponse, openai, path=["response"])
assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/prompts/test_versions.py b/tests/api_resources/prompts/test_versions.py
index 00e9b805..d1737b6a 100644
--- a/tests/api_resources/prompts/test_versions.py
+++ b/tests/api_resources/prompts/test_versions.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import PromptListResponse
+from ogx_client.types import PromptListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,14 +24,14 @@ class TestVersions:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
version = client.prompts.versions.list(
"prompt_id",
)
assert_matches_type(PromptListResponse, version, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.prompts.versions.with_raw_response.list(
"prompt_id",
)
@@ -42,7 +42,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(PromptListResponse, version, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.prompts.versions.with_streaming_response.list(
"prompt_id",
) as response:
@@ -55,7 +55,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_list(self, client: LlamaStackClient) -> None:
+ def test_path_params_list(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
client.prompts.versions.with_raw_response.list(
"",
@@ -68,14 +68,14 @@ class TestAsyncVersions:
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
version = await async_client.prompts.versions.list(
"prompt_id",
)
assert_matches_type(PromptListResponse, version, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.prompts.versions.with_raw_response.list(
"prompt_id",
)
@@ -86,7 +86,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(PromptListResponse, version, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.prompts.versions.with_streaming_response.list(
"prompt_id",
) as response:
@@ -99,7 +99,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_list(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
await async_client.prompts.versions.with_raw_response.list(
"",
diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py
index a641c52d..ba30155b 100644
--- a/tests/api_resources/responses/test_input_items.py
+++ b/tests/api_resources/responses/test_input_items.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.responses import InputItemListResponse
+from ogx_client.types.responses import InputItemListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,14 +24,14 @@ class TestInputItems:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
input_item = client.responses.input_items.list(
response_id="response_id",
)
assert_matches_type(InputItemListResponse, input_item, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
input_item = client.responses.input_items.list(
response_id="response_id",
after="after",
@@ -43,7 +43,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(InputItemListResponse, input_item, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.responses.input_items.with_raw_response.list(
response_id="response_id",
)
@@ -54,7 +54,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(InputItemListResponse, input_item, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.responses.input_items.with_streaming_response.list(
response_id="response_id",
) as response:
@@ -67,7 +67,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_list(self, client: LlamaStackClient) -> None:
+ def test_path_params_list(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
client.responses.input_items.with_raw_response.list(
response_id="",
@@ -80,14 +80,14 @@ class TestAsyncInputItems:
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
input_item = await async_client.responses.input_items.list(
response_id="response_id",
)
assert_matches_type(InputItemListResponse, input_item, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
input_item = await async_client.responses.input_items.list(
response_id="response_id",
after="after",
@@ -99,7 +99,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl
assert_matches_type(InputItemListResponse, input_item, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.input_items.with_raw_response.list(
response_id="response_id",
)
@@ -110,7 +110,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(InputItemListResponse, input_item, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.responses.input_items.with_streaming_response.list(
response_id="response_id",
) as response:
@@ -123,7 +123,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_list(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
await async_client.responses.input_items.with_raw_response.list(
response_id="",
diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py
index 0364d9c4..29986c0b 100644
--- a/tests/api_resources/test_batches.py
+++ b/tests/api_resources/test_batches.py
@@ -13,15 +13,15 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
+from ogx_client.types import (
BatchListResponse,
BatchCancelResponse,
BatchCreateResponse,
BatchRetrieveResponse,
)
-from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
+from ogx_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -30,7 +30,7 @@ class TestBatches:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
batch = client.batches.create(
completion_window="24h",
endpoint="endpoint",
@@ -39,7 +39,7 @@ def test_method_create(self, client: LlamaStackClient) -> None:
assert_matches_type(BatchCreateResponse, batch, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
batch = client.batches.create(
completion_window="24h",
endpoint="endpoint",
@@ -50,7 +50,7 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(BatchCreateResponse, batch, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.batches.with_raw_response.create(
completion_window="24h",
endpoint="endpoint",
@@ -63,7 +63,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(BatchCreateResponse, batch, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.batches.with_streaming_response.create(
completion_window="24h",
endpoint="endpoint",
@@ -78,14 +78,14 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
batch = client.batches.retrieve(
"batch_id",
)
assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.batches.with_raw_response.retrieve(
"batch_id",
)
@@ -96,7 +96,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.batches.with_streaming_response.retrieve(
"batch_id",
) as response:
@@ -109,19 +109,19 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
client.batches.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
batch = client.batches.list()
assert_matches_type(SyncOpenAICursorPage[BatchListResponse], batch, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
batch = client.batches.list(
after="after",
limit=0,
@@ -129,7 +129,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[BatchListResponse], batch, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.batches.with_raw_response.list()
assert response.is_closed is True
@@ -138,7 +138,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[BatchListResponse], batch, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.batches.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -149,14 +149,14 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_cancel(self, client: LlamaStackClient) -> None:
+ def test_method_cancel(self, client: OgxClient) -> None:
batch = client.batches.cancel(
"batch_id",
)
assert_matches_type(BatchCancelResponse, batch, path=["response"])
@parametrize
- def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
+ def test_raw_response_cancel(self, client: OgxClient) -> None:
response = client.batches.with_raw_response.cancel(
"batch_id",
)
@@ -167,7 +167,7 @@ def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
assert_matches_type(BatchCancelResponse, batch, path=["response"])
@parametrize
- def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_cancel(self, client: OgxClient) -> None:
with client.batches.with_streaming_response.cancel(
"batch_id",
) as response:
@@ -180,7 +180,7 @@ def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_cancel(self, client: LlamaStackClient) -> None:
+ def test_path_params_cancel(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
client.batches.with_raw_response.cancel(
"",
@@ -193,7 +193,7 @@ class TestAsyncBatches:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
batch = await async_client.batches.create(
completion_window="24h",
endpoint="endpoint",
@@ -202,7 +202,7 @@ async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(BatchCreateResponse, batch, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
batch = await async_client.batches.create(
completion_window="24h",
endpoint="endpoint",
@@ -213,7 +213,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(BatchCreateResponse, batch, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.batches.with_raw_response.create(
completion_window="24h",
endpoint="endpoint",
@@ -226,7 +226,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(BatchCreateResponse, batch, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.batches.with_streaming_response.create(
completion_window="24h",
endpoint="endpoint",
@@ -241,14 +241,14 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
batch = await async_client.batches.retrieve(
"batch_id",
)
assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.batches.with_raw_response.retrieve(
"batch_id",
)
@@ -259,7 +259,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.batches.with_streaming_response.retrieve(
"batch_id",
) as response:
@@ -272,19 +272,19 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
await async_client.batches.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
batch = await async_client.batches.list()
assert_matches_type(AsyncOpenAICursorPage[BatchListResponse], batch, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
batch = await async_client.batches.list(
after="after",
limit=0,
@@ -292,7 +292,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl
assert_matches_type(AsyncOpenAICursorPage[BatchListResponse], batch, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.batches.with_raw_response.list()
assert response.is_closed is True
@@ -301,7 +301,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(AsyncOpenAICursorPage[BatchListResponse], batch, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.batches.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -312,14 +312,14 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_cancel(self, async_client: AsyncOgxClient) -> None:
batch = await async_client.batches.cancel(
"batch_id",
)
assert_matches_type(BatchCancelResponse, batch, path=["response"])
@parametrize
- async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_cancel(self, async_client: AsyncOgxClient) -> None:
response = await async_client.batches.with_raw_response.cancel(
"batch_id",
)
@@ -330,7 +330,7 @@ async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(BatchCancelResponse, batch, path=["response"])
@parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_cancel(self, async_client: AsyncOgxClient) -> None:
async with async_client.batches.with_streaming_response.cancel(
"batch_id",
) as response:
@@ -343,7 +343,7 @@ async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_cancel(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
await async_client.batches.with_raw_response.cancel(
"",
diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py
index 68586952..c38a9bcf 100644
--- a/tests/api_resources/test_completions.py
+++ b/tests/api_resources/test_completions.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import CompletionCreateResponse
+from ogx_client.types import CompletionCreateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,7 +24,7 @@ class TestCompletions:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_method_create_overload_1(self, client: OgxClient) -> None:
completion = client.completions.create(
model="model",
prompt="string",
@@ -32,7 +32,7 @@ def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params_overload_1(self, client: OgxClient) -> None:
completion = client.completions.create(
model="model",
prompt="string",
@@ -56,7 +56,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create_overload_1(self, client: OgxClient) -> None:
response = client.completions.with_raw_response.create(
model="model",
prompt="string",
@@ -68,7 +68,7 @@ def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create_overload_1(self, client: OgxClient) -> None:
with client.completions.with_streaming_response.create(
model="model",
prompt="string",
@@ -82,7 +82,7 @@ def test_streaming_response_create_overload_1(self, client: LlamaStackClient) ->
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_method_create_overload_2(self, client: OgxClient) -> None:
completion_stream = client.completions.create(
model="model",
prompt="string",
@@ -91,7 +91,7 @@ def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
completion_stream.response.close()
@parametrize
- def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params_overload_2(self, client: OgxClient) -> None:
completion_stream = client.completions.create(
model="model",
prompt="string",
@@ -115,7 +115,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
completion_stream.response.close()
@parametrize
- def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create_overload_2(self, client: OgxClient) -> None:
response = client.completions.with_raw_response.create(
model="model",
prompt="string",
@@ -127,7 +127,7 @@ def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
stream.close()
@parametrize
- def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create_overload_2(self, client: OgxClient) -> None:
with client.completions.with_streaming_response.create(
model="model",
prompt="string",
@@ -148,7 +148,7 @@ class TestAsyncCompletions:
)
@parametrize
- async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_overload_1(self, async_client: AsyncOgxClient) -> None:
completion = await async_client.completions.create(
model="model",
prompt="string",
@@ -156,7 +156,7 @@ async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClien
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- async def test_method_create_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOgxClient) -> None:
completion = await async_client.completions.create(
model="model",
prompt="string",
@@ -180,7 +180,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create_overload_1(self, async_client: AsyncOgxClient) -> None:
response = await async_client.completions.with_raw_response.create(
model="model",
prompt="string",
@@ -192,7 +192,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStac
assert_matches_type(CompletionCreateResponse, completion, path=["response"])
@parametrize
- async def test_streaming_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncOgxClient) -> None:
async with async_client.completions.with_streaming_response.create(
model="model",
prompt="string",
@@ -206,7 +206,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncLla
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_overload_2(self, async_client: AsyncOgxClient) -> None:
completion_stream = await async_client.completions.create(
model="model",
prompt="string",
@@ -215,7 +215,7 @@ async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClien
await completion_stream.response.aclose()
@parametrize
- async def test_method_create_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOgxClient) -> None:
completion_stream = await async_client.completions.create(
model="model",
prompt="string",
@@ -239,7 +239,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
await completion_stream.response.aclose()
@parametrize
- async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create_overload_2(self, async_client: AsyncOgxClient) -> None:
response = await async_client.completions.with_raw_response.create(
model="model",
prompt="string",
@@ -251,7 +251,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStac
await stream.close()
@parametrize
- async def test_streaming_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncOgxClient) -> None:
async with async_client.completions.with_streaming_response.create(
model="model",
prompt="string",
diff --git a/tests/api_resources/test_conversations.py b/tests/api_resources/test_conversations.py
index dbe50dd1..be14b7a3 100644
--- a/tests/api_resources/test_conversations.py
+++ b/tests/api_resources/test_conversations.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
+from ogx_client.types import (
ConversationObject,
ConversationDeleteResponse,
)
@@ -27,12 +27,12 @@ class TestConversations:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
conversation = client.conversations.create()
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
conversation = client.conversations.create(
items=[
{
@@ -48,7 +48,7 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.conversations.with_raw_response.create()
assert response.is_closed is True
@@ -57,7 +57,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.conversations.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -68,14 +68,14 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
conversation = client.conversations.retrieve(
"conversation_id",
)
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.conversations.with_raw_response.retrieve(
"conversation_id",
)
@@ -86,7 +86,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.conversations.with_streaming_response.retrieve(
"conversation_id",
) as response:
@@ -99,14 +99,14 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_update(self, client: LlamaStackClient) -> None:
+ def test_method_update(self, client: OgxClient) -> None:
conversation = client.conversations.update(
conversation_id="conversation_id",
metadata={"foo": "string"},
@@ -114,7 +114,7 @@ def test_method_update(self, client: LlamaStackClient) -> None:
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- def test_raw_response_update(self, client: LlamaStackClient) -> None:
+ def test_raw_response_update(self, client: OgxClient) -> None:
response = client.conversations.with_raw_response.update(
conversation_id="conversation_id",
metadata={"foo": "string"},
@@ -126,7 +126,7 @@ def test_raw_response_update(self, client: LlamaStackClient) -> None:
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- def test_streaming_response_update(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_update(self, client: OgxClient) -> None:
with client.conversations.with_streaming_response.update(
conversation_id="conversation_id",
metadata={"foo": "string"},
@@ -140,7 +140,7 @@ def test_streaming_response_update(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_update(self, client: LlamaStackClient) -> None:
+ def test_path_params_update(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.with_raw_response.update(
conversation_id="",
@@ -148,14 +148,14 @@ def test_path_params_update(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
conversation = client.conversations.delete(
"conversation_id",
)
assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
response = client.conversations.with_raw_response.delete(
"conversation_id",
)
@@ -166,7 +166,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with client.conversations.with_streaming_response.delete(
"conversation_id",
) as response:
@@ -179,7 +179,7 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.with_raw_response.delete(
"",
@@ -192,12 +192,12 @@ class TestAsyncConversations:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
conversation = await async_client.conversations.create()
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
conversation = await async_client.conversations.create(
items=[
{
@@ -213,7 +213,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.with_raw_response.create()
assert response.is_closed is True
@@ -222,7 +222,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -233,14 +233,14 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
conversation = await async_client.conversations.retrieve(
"conversation_id",
)
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.with_raw_response.retrieve(
"conversation_id",
)
@@ -251,7 +251,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.with_streaming_response.retrieve(
"conversation_id",
) as response:
@@ -264,14 +264,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_update(self, async_client: AsyncOgxClient) -> None:
conversation = await async_client.conversations.update(
conversation_id="conversation_id",
metadata={"foo": "string"},
@@ -279,7 +279,7 @@ async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_update(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.with_raw_response.update(
conversation_id="conversation_id",
metadata={"foo": "string"},
@@ -291,7 +291,7 @@ async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(ConversationObject, conversation, path=["response"])
@parametrize
- async def test_streaming_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_update(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.with_streaming_response.update(
conversation_id="conversation_id",
metadata={"foo": "string"},
@@ -305,7 +305,7 @@ async def test_streaming_response_update(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_update(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.update(
conversation_id="",
@@ -313,14 +313,14 @@ async def test_path_params_update(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
conversation = await async_client.conversations.delete(
"conversation_id",
)
assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
response = await async_client.conversations.with_raw_response.delete(
"conversation_id",
)
@@ -331,7 +331,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(ConversationDeleteResponse, conversation, path=["response"])
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
async with async_client.conversations.with_streaming_response.delete(
"conversation_id",
) as response:
@@ -344,7 +344,7 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.delete(
"",
diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py
index 3406fb6e..c1867e0a 100644
--- a/tests/api_resources/test_embeddings.py
+++ b/tests/api_resources/test_embeddings.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import CreateEmbeddingsResponse
+from ogx_client.types import CreateEmbeddingsResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,7 +24,7 @@ class TestEmbeddings:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
embedding = client.embeddings.create(
input="string",
model="model",
@@ -32,7 +32,7 @@ def test_method_create(self, client: LlamaStackClient) -> None:
assert_matches_type(CreateEmbeddingsResponse, embedding, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
embedding = client.embeddings.create(
input="string",
model="model",
@@ -43,7 +43,7 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(CreateEmbeddingsResponse, embedding, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.embeddings.with_raw_response.create(
input="string",
model="model",
@@ -55,7 +55,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(CreateEmbeddingsResponse, embedding, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.embeddings.with_streaming_response.create(
input="string",
model="model",
@@ -75,7 +75,7 @@ class TestAsyncEmbeddings:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
embedding = await async_client.embeddings.create(
input="string",
model="model",
@@ -83,7 +83,7 @@ async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(CreateEmbeddingsResponse, embedding, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
embedding = await async_client.embeddings.create(
input="string",
model="model",
@@ -94,7 +94,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(CreateEmbeddingsResponse, embedding, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.embeddings.with_raw_response.create(
input="string",
model="model",
@@ -106,7 +106,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(CreateEmbeddingsResponse, embedding, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.embeddings.with_streaming_response.create(
input="string",
model="model",
diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py
index 368b492f..23849909 100644
--- a/tests/api_resources/test_files.py
+++ b/tests/api_resources/test_files.py
@@ -13,10 +13,10 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import File, DeleteFileResponse
-from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
+from ogx_client.types import File, DeleteFileResponse
+from ogx_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -25,7 +25,7 @@ class TestFiles:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
file = client.files.create(
file=b"Example data",
purpose="assistants",
@@ -33,7 +33,7 @@ def test_method_create(self, client: LlamaStackClient) -> None:
assert_matches_type(File, file, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
file = client.files.create(
file=b"Example data",
purpose="assistants",
@@ -45,7 +45,7 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(File, file, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.files.with_raw_response.create(
file=b"Example data",
purpose="assistants",
@@ -57,7 +57,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(File, file, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.files.with_streaming_response.create(
file=b"Example data",
purpose="assistants",
@@ -71,14 +71,14 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
file = client.files.retrieve(
"file_id",
)
assert_matches_type(File, file, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.files.with_raw_response.retrieve(
"file_id",
)
@@ -89,7 +89,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(File, file, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.files.with_streaming_response.retrieve(
"file_id",
) as response:
@@ -102,19 +102,19 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.files.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
file = client.files.list()
assert_matches_type(SyncOpenAICursorPage[File], file, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
file = client.files.list(
after="after",
limit=0,
@@ -124,7 +124,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[File], file, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.files.with_raw_response.list()
assert response.is_closed is True
@@ -133,7 +133,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[File], file, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.files.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -144,14 +144,14 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
file = client.files.delete(
"file_id",
)
assert_matches_type(DeleteFileResponse, file, path=["response"])
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
response = client.files.with_raw_response.delete(
"file_id",
)
@@ -162,7 +162,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(DeleteFileResponse, file, path=["response"])
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with client.files.with_streaming_response.delete(
"file_id",
) as response:
@@ -175,21 +175,21 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.files.with_raw_response.delete(
"",
)
@parametrize
- def test_method_content(self, client: LlamaStackClient) -> None:
+ def test_method_content(self, client: OgxClient) -> None:
file = client.files.content(
"file_id",
)
assert_matches_type(str, file, path=["response"])
@parametrize
- def test_raw_response_content(self, client: LlamaStackClient) -> None:
+ def test_raw_response_content(self, client: OgxClient) -> None:
response = client.files.with_raw_response.content(
"file_id",
)
@@ -200,7 +200,7 @@ def test_raw_response_content(self, client: LlamaStackClient) -> None:
assert_matches_type(str, file, path=["response"])
@parametrize
- def test_streaming_response_content(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_content(self, client: OgxClient) -> None:
with client.files.with_streaming_response.content(
"file_id",
) as response:
@@ -213,7 +213,7 @@ def test_streaming_response_content(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_content(self, client: LlamaStackClient) -> None:
+ def test_path_params_content(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.files.with_raw_response.content(
"",
@@ -226,7 +226,7 @@ class TestAsyncFiles:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
file = await async_client.files.create(
file=b"Example data",
purpose="assistants",
@@ -234,7 +234,7 @@ async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(File, file, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
file = await async_client.files.create(
file=b"Example data",
purpose="assistants",
@@ -246,7 +246,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(File, file, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.files.with_raw_response.create(
file=b"Example data",
purpose="assistants",
@@ -258,7 +258,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(File, file, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.files.with_streaming_response.create(
file=b"Example data",
purpose="assistants",
@@ -272,14 +272,14 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
file = await async_client.files.retrieve(
"file_id",
)
assert_matches_type(File, file, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.files.with_raw_response.retrieve(
"file_id",
)
@@ -290,7 +290,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(File, file, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.files.with_streaming_response.retrieve(
"file_id",
) as response:
@@ -303,19 +303,19 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.files.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
file = await async_client.files.list()
assert_matches_type(AsyncOpenAICursorPage[File], file, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
file = await async_client.files.list(
after="after",
limit=0,
@@ -325,7 +325,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl
assert_matches_type(AsyncOpenAICursorPage[File], file, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.files.with_raw_response.list()
assert response.is_closed is True
@@ -334,7 +334,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(AsyncOpenAICursorPage[File], file, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.files.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -345,14 +345,14 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
file = await async_client.files.delete(
"file_id",
)
assert_matches_type(DeleteFileResponse, file, path=["response"])
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
response = await async_client.files.with_raw_response.delete(
"file_id",
)
@@ -363,7 +363,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(DeleteFileResponse, file, path=["response"])
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
async with async_client.files.with_streaming_response.delete(
"file_id",
) as response:
@@ -376,21 +376,21 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.files.with_raw_response.delete(
"",
)
@parametrize
- async def test_method_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_content(self, async_client: AsyncOgxClient) -> None:
file = await async_client.files.content(
"file_id",
)
assert_matches_type(str, file, path=["response"])
@parametrize
- async def test_raw_response_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_content(self, async_client: AsyncOgxClient) -> None:
response = await async_client.files.with_raw_response.content(
"file_id",
)
@@ -401,7 +401,7 @@ async def test_raw_response_content(self, async_client: AsyncLlamaStackClient) -
assert_matches_type(str, file, path=["response"])
@parametrize
- async def test_streaming_response_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_content(self, async_client: AsyncOgxClient) -> None:
async with async_client.files.with_streaming_response.content(
"file_id",
) as response:
@@ -414,7 +414,7 @@ async def test_streaming_response_content(self, async_client: AsyncLlamaStackCli
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_content(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.files.with_raw_response.content(
"",
diff --git a/tests/api_resources/test_inspect.py b/tests/api_resources/test_inspect.py
index 67896956..e5048398 100644
--- a/tests/api_resources/test_inspect.py
+++ b/tests/api_resources/test_inspect.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types.shared import HealthInfo, VersionInfo
+from ogx_client.types.shared import HealthInfo, VersionInfo
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,12 +24,12 @@ class TestInspect:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_health(self, client: LlamaStackClient) -> None:
+ def test_method_health(self, client: OgxClient) -> None:
inspect = client.inspect.health()
assert_matches_type(HealthInfo, inspect, path=["response"])
@parametrize
- def test_raw_response_health(self, client: LlamaStackClient) -> None:
+ def test_raw_response_health(self, client: OgxClient) -> None:
response = client.inspect.with_raw_response.health()
assert response.is_closed is True
@@ -38,7 +38,7 @@ def test_raw_response_health(self, client: LlamaStackClient) -> None:
assert_matches_type(HealthInfo, inspect, path=["response"])
@parametrize
- def test_streaming_response_health(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_health(self, client: OgxClient) -> None:
with client.inspect.with_streaming_response.health() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -49,12 +49,12 @@ def test_streaming_response_health(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_version(self, client: LlamaStackClient) -> None:
+ def test_method_version(self, client: OgxClient) -> None:
inspect = client.inspect.version()
assert_matches_type(VersionInfo, inspect, path=["response"])
@parametrize
- def test_raw_response_version(self, client: LlamaStackClient) -> None:
+ def test_raw_response_version(self, client: OgxClient) -> None:
response = client.inspect.with_raw_response.version()
assert response.is_closed is True
@@ -63,7 +63,7 @@ def test_raw_response_version(self, client: LlamaStackClient) -> None:
assert_matches_type(VersionInfo, inspect, path=["response"])
@parametrize
- def test_streaming_response_version(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_version(self, client: OgxClient) -> None:
with client.inspect.with_streaming_response.version() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -80,12 +80,12 @@ class TestAsyncInspect:
)
@parametrize
- async def test_method_health(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_health(self, async_client: AsyncOgxClient) -> None:
inspect = await async_client.inspect.health()
assert_matches_type(HealthInfo, inspect, path=["response"])
@parametrize
- async def test_raw_response_health(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_health(self, async_client: AsyncOgxClient) -> None:
response = await async_client.inspect.with_raw_response.health()
assert response.is_closed is True
@@ -94,7 +94,7 @@ async def test_raw_response_health(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(HealthInfo, inspect, path=["response"])
@parametrize
- async def test_streaming_response_health(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_health(self, async_client: AsyncOgxClient) -> None:
async with async_client.inspect.with_streaming_response.health() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -105,12 +105,12 @@ async def test_streaming_response_health(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_version(self, async_client: AsyncOgxClient) -> None:
inspect = await async_client.inspect.version()
assert_matches_type(VersionInfo, inspect, path=["response"])
@parametrize
- async def test_raw_response_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_version(self, async_client: AsyncOgxClient) -> None:
response = await async_client.inspect.with_raw_response.version()
assert response.is_closed is True
@@ -119,7 +119,7 @@ async def test_raw_response_version(self, async_client: AsyncLlamaStackClient) -
assert_matches_type(VersionInfo, inspect, path=["response"])
@parametrize
- async def test_streaming_response_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_version(self, async_client: AsyncOgxClient) -> None:
async with async_client.inspect.with_streaming_response.version() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
index a50d3905..17e3f06c 100644
--- a/tests/api_resources/test_models.py
+++ b/tests/api_resources/test_models.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import ModelListResponse, ModelRetrieveResponse
+from ogx_client.types import ListModelsResponse, ModelRetrieveResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,14 +24,14 @@ class TestModels:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
model = client.models.retrieve(
"model_id",
)
assert_matches_type(ModelRetrieveResponse, model, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.models.with_raw_response.retrieve(
"model_id",
)
@@ -42,7 +42,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(ModelRetrieveResponse, model, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.models.with_streaming_response.retrieve(
"model_id",
) as response:
@@ -55,34 +55,34 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `model_id` but received ''"):
client.models.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
model = client.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ assert_matches_type(ListModelsResponse, model, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.models.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ assert_matches_type(ListModelsResponse, model, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.models.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ assert_matches_type(ListModelsResponse, model, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -93,14 +93,14 @@ class TestAsyncModels:
)
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
model = await async_client.models.retrieve(
"model_id",
)
assert_matches_type(ModelRetrieveResponse, model, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.models.with_raw_response.retrieve(
"model_id",
)
@@ -111,7 +111,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(ModelRetrieveResponse, model, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.models.with_streaming_response.retrieve(
"model_id",
) as response:
@@ -124,33 +124,33 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `model_id` but received ''"):
await async_client.models.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
model = await async_client.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ assert_matches_type(ListModelsResponse, model, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.models.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = await response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ assert_matches_type(ListModelsResponse, model, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.models.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
model = await response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ assert_matches_type(ListModelsResponse, model, path=["response"])
assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_moderations.py b/tests/api_resources/test_moderations.py
index 48b2ece3..8f6dd5dd 100644
--- a/tests/api_resources/test_moderations.py
+++ b/tests/api_resources/test_moderations.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import CreateResponse
+from ogx_client.types import CreateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,14 +24,14 @@ class TestModerations:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
moderation = client.moderations.create(
input="string",
)
assert_matches_type(CreateResponse, moderation, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
moderation = client.moderations.create(
input="string",
model="model",
@@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(CreateResponse, moderation, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.moderations.with_raw_response.create(
input="string",
)
@@ -50,7 +50,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(CreateResponse, moderation, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.moderations.with_streaming_response.create(
input="string",
) as response:
@@ -69,14 +69,14 @@ class TestAsyncModerations:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
moderation = await async_client.moderations.create(
input="string",
)
assert_matches_type(CreateResponse, moderation, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
moderation = await async_client.moderations.create(
input="string",
model="model",
@@ -84,7 +84,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(CreateResponse, moderation, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.moderations.with_raw_response.create(
input="string",
)
@@ -95,7 +95,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(CreateResponse, moderation, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.moderations.with_streaming_response.create(
input="string",
) as response:
diff --git a/tests/api_resources/test_prompts.py b/tests/api_resources/test_prompts.py
index 5b7a81c9..35cbff8e 100644
--- a/tests/api_resources/test_prompts.py
+++ b/tests/api_resources/test_prompts.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
+from ogx_client.types import (
Prompt,
PromptListResponse,
)
@@ -27,14 +27,14 @@ class TestPrompts:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
prompt = client.prompts.create(
prompt="prompt",
)
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
prompt = client.prompts.create(
prompt="prompt",
variables=["string"],
@@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.prompts.with_raw_response.create(
prompt="prompt",
)
@@ -53,7 +53,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.prompts.with_streaming_response.create(
prompt="prompt",
) as response:
@@ -66,14 +66,14 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
prompt = client.prompts.retrieve(
prompt_id="prompt_id",
)
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve_with_all_params(self, client: OgxClient) -> None:
prompt = client.prompts.retrieve(
prompt_id="prompt_id",
version=0,
@@ -81,7 +81,7 @@ def test_method_retrieve_with_all_params(self, client: LlamaStackClient) -> None
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.prompts.with_raw_response.retrieve(
prompt_id="prompt_id",
)
@@ -92,7 +92,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.prompts.with_streaming_response.retrieve(
prompt_id="prompt_id",
) as response:
@@ -105,14 +105,14 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
client.prompts.with_raw_response.retrieve(
prompt_id="",
)
@parametrize
- def test_method_update(self, client: LlamaStackClient) -> None:
+ def test_method_update(self, client: OgxClient) -> None:
prompt = client.prompts.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -121,7 +121,7 @@ def test_method_update(self, client: LlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_method_update_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_update_with_all_params(self, client: OgxClient) -> None:
prompt = client.prompts.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -132,7 +132,7 @@ def test_method_update_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_raw_response_update(self, client: LlamaStackClient) -> None:
+ def test_raw_response_update(self, client: OgxClient) -> None:
response = client.prompts.with_raw_response.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -145,7 +145,7 @@ def test_raw_response_update(self, client: LlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_streaming_response_update(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_update(self, client: OgxClient) -> None:
with client.prompts.with_streaming_response.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -160,7 +160,7 @@ def test_streaming_response_update(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_update(self, client: LlamaStackClient) -> None:
+ def test_path_params_update(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
client.prompts.with_raw_response.update(
prompt_id="",
@@ -169,12 +169,12 @@ def test_path_params_update(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
prompt = client.prompts.list()
assert_matches_type(PromptListResponse, prompt, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.prompts.with_raw_response.list()
assert response.is_closed is True
@@ -183,7 +183,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(PromptListResponse, prompt, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.prompts.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -194,14 +194,14 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
prompt = client.prompts.delete(
"prompt_id",
)
assert prompt is None
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
response = client.prompts.with_raw_response.delete(
"prompt_id",
)
@@ -212,7 +212,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert prompt is None
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with client.prompts.with_streaming_response.delete(
"prompt_id",
) as response:
@@ -225,14 +225,14 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
client.prompts.with_raw_response.delete(
"",
)
@parametrize
- def test_method_set_default_version(self, client: LlamaStackClient) -> None:
+ def test_method_set_default_version(self, client: OgxClient) -> None:
prompt = client.prompts.set_default_version(
prompt_id="prompt_id",
version=0,
@@ -240,7 +240,7 @@ def test_method_set_default_version(self, client: LlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_raw_response_set_default_version(self, client: LlamaStackClient) -> None:
+ def test_raw_response_set_default_version(self, client: OgxClient) -> None:
response = client.prompts.with_raw_response.set_default_version(
prompt_id="prompt_id",
version=0,
@@ -252,7 +252,7 @@ def test_raw_response_set_default_version(self, client: LlamaStackClient) -> Non
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- def test_streaming_response_set_default_version(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_set_default_version(self, client: OgxClient) -> None:
with client.prompts.with_streaming_response.set_default_version(
prompt_id="prompt_id",
version=0,
@@ -266,7 +266,7 @@ def test_streaming_response_set_default_version(self, client: LlamaStackClient)
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_set_default_version(self, client: LlamaStackClient) -> None:
+ def test_path_params_set_default_version(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
client.prompts.with_raw_response.set_default_version(
prompt_id="",
@@ -280,14 +280,14 @@ class TestAsyncPrompts:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.create(
prompt="prompt",
)
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.create(
prompt="prompt",
variables=["string"],
@@ -295,7 +295,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.prompts.with_raw_response.create(
prompt="prompt",
)
@@ -306,7 +306,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.prompts.with_streaming_response.create(
prompt="prompt",
) as response:
@@ -319,14 +319,14 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.retrieve(
prompt_id="prompt_id",
)
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.retrieve(
prompt_id="prompt_id",
version=0,
@@ -334,7 +334,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncLlamaSta
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.prompts.with_raw_response.retrieve(
prompt_id="prompt_id",
)
@@ -345,7 +345,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.prompts.with_streaming_response.retrieve(
prompt_id="prompt_id",
) as response:
@@ -358,14 +358,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
await async_client.prompts.with_raw_response.retrieve(
prompt_id="",
)
@parametrize
- async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_update(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -374,7 +374,7 @@ async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_update_with_all_params(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -385,7 +385,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_update(self, async_client: AsyncOgxClient) -> None:
response = await async_client.prompts.with_raw_response.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -398,7 +398,7 @@ async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_streaming_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_update(self, async_client: AsyncOgxClient) -> None:
async with async_client.prompts.with_streaming_response.update(
prompt_id="prompt_id",
prompt="prompt",
@@ -413,7 +413,7 @@ async def test_streaming_response_update(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_update(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
await async_client.prompts.with_raw_response.update(
prompt_id="",
@@ -422,12 +422,12 @@ async def test_path_params_update(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.list()
assert_matches_type(PromptListResponse, prompt, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.prompts.with_raw_response.list()
assert response.is_closed is True
@@ -436,7 +436,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(PromptListResponse, prompt, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.prompts.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -447,14 +447,14 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.delete(
"prompt_id",
)
assert prompt is None
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
response = await async_client.prompts.with_raw_response.delete(
"prompt_id",
)
@@ -465,7 +465,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert prompt is None
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
async with async_client.prompts.with_streaming_response.delete(
"prompt_id",
) as response:
@@ -478,14 +478,14 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
await async_client.prompts.with_raw_response.delete(
"",
)
@parametrize
- async def test_method_set_default_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_set_default_version(self, async_client: AsyncOgxClient) -> None:
prompt = await async_client.prompts.set_default_version(
prompt_id="prompt_id",
version=0,
@@ -493,7 +493,7 @@ async def test_method_set_default_version(self, async_client: AsyncLlamaStackCli
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_raw_response_set_default_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_set_default_version(self, async_client: AsyncOgxClient) -> None:
response = await async_client.prompts.with_raw_response.set_default_version(
prompt_id="prompt_id",
version=0,
@@ -505,7 +505,7 @@ async def test_raw_response_set_default_version(self, async_client: AsyncLlamaSt
assert_matches_type(Prompt, prompt, path=["response"])
@parametrize
- async def test_streaming_response_set_default_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_set_default_version(self, async_client: AsyncOgxClient) -> None:
async with async_client.prompts.with_streaming_response.set_default_version(
prompt_id="prompt_id",
version=0,
@@ -519,7 +519,7 @@ async def test_streaming_response_set_default_version(self, async_client: AsyncL
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_set_default_version(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_set_default_version(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `prompt_id` but received ''"):
await async_client.prompts.with_raw_response.set_default_version(
prompt_id="",
diff --git a/tests/api_resources/test_providers.py b/tests/api_resources/test_providers.py
index 80301144..d78e5620 100644
--- a/tests/api_resources/test_providers.py
+++ b/tests/api_resources/test_providers.py
@@ -13,10 +13,10 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import ProviderListResponse
-from llama_stack_client.types.shared import ProviderInfo
+from ogx_client.types import ProviderListResponse
+from ogx_client.types.shared import ProviderInfo
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -25,14 +25,14 @@ class TestProviders:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
provider = client.providers.retrieve(
"provider_id",
)
assert_matches_type(ProviderInfo, provider, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.providers.with_raw_response.retrieve(
"provider_id",
)
@@ -43,7 +43,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(ProviderInfo, provider, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.providers.with_streaming_response.retrieve(
"provider_id",
) as response:
@@ -56,19 +56,19 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `provider_id` but received ''"):
client.providers.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
provider = client.providers.list()
assert_matches_type(ProviderListResponse, provider, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.providers.with_raw_response.list()
assert response.is_closed is True
@@ -77,7 +77,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(ProviderListResponse, provider, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.providers.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -94,14 +94,14 @@ class TestAsyncProviders:
)
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
provider = await async_client.providers.retrieve(
"provider_id",
)
assert_matches_type(ProviderInfo, provider, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.providers.with_raw_response.retrieve(
"provider_id",
)
@@ -112,7 +112,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(ProviderInfo, provider, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.providers.with_streaming_response.retrieve(
"provider_id",
) as response:
@@ -125,19 +125,19 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `provider_id` but received ''"):
await async_client.providers.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
provider = await async_client.providers.list()
assert_matches_type(ProviderListResponse, provider, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.providers.with_raw_response.list()
assert response.is_closed is True
@@ -146,7 +146,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(ProviderListResponse, provider, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.providers.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index 97e3084d..4781c912 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -13,14 +13,15 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
+from ogx_client.types import (
ResponseObject,
+ CompactedResponse,
ResponseListResponse,
ResponseDeleteResponse,
)
-from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
+from ogx_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -29,7 +30,7 @@ class TestResponses:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_method_create_overload_1(self, client: OgxClient) -> None:
response = client.responses.create(
input="string",
model="model",
@@ -37,11 +38,17 @@ def test_method_create_overload_1(self, client: LlamaStackClient) -> None:
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params_overload_1(self, client: OgxClient) -> None:
response = client.responses.create(
input="string",
model="model",
background=True,
+ context_management=[
+ {
+ "type": "compaction",
+ "compact_threshold": 0,
+ }
+ ],
conversation="conversation",
frequency_penalty=-2,
guardrails=["string"],
@@ -82,7 +89,8 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
"schema": {"foo": "bar"},
"strict": True,
"type": "text",
- }
+ },
+ "verbosity": "low",
},
tool_choice="auto",
tools=[
@@ -98,7 +106,7 @@ def test_method_create_with_all_params_overload_1(self, client: LlamaStackClient
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create_overload_1(self, client: OgxClient) -> None:
http_response = client.responses.with_raw_response.create(
input="string",
model="model",
@@ -110,7 +118,7 @@ def test_raw_response_create_overload_1(self, client: LlamaStackClient) -> None:
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- def test_streaming_response_create_overload_1(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create_overload_1(self, client: OgxClient) -> None:
with client.responses.with_streaming_response.create(
input="string",
model="model",
@@ -124,7 +132,7 @@ def test_streaming_response_create_overload_1(self, client: LlamaStackClient) ->
assert cast(Any, http_response.is_closed) is True
@parametrize
- def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_method_create_overload_2(self, client: OgxClient) -> None:
response_stream = client.responses.create(
input="string",
model="model",
@@ -133,12 +141,18 @@ def test_method_create_overload_2(self, client: LlamaStackClient) -> None:
response_stream.response.close()
@parametrize
- def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params_overload_2(self, client: OgxClient) -> None:
response_stream = client.responses.create(
input="string",
model="model",
stream=True,
background=True,
+ context_management=[
+ {
+ "type": "compaction",
+ "compact_threshold": 0,
+ }
+ ],
conversation="conversation",
frequency_penalty=-2,
guardrails=["string"],
@@ -178,7 +192,8 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
"schema": {"foo": "bar"},
"strict": True,
"type": "text",
- }
+ },
+ "verbosity": "low",
},
tool_choice="auto",
tools=[
@@ -194,7 +209,7 @@ def test_method_create_with_all_params_overload_2(self, client: LlamaStackClient
response_stream.response.close()
@parametrize
- def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create_overload_2(self, client: OgxClient) -> None:
response = client.responses.with_raw_response.create(
input="string",
model="model",
@@ -206,7 +221,7 @@ def test_raw_response_create_overload_2(self, client: LlamaStackClient) -> None:
stream.close()
@parametrize
- def test_streaming_response_create_overload_2(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create_overload_2(self, client: OgxClient) -> None:
with client.responses.with_streaming_response.create(
input="string",
model="model",
@@ -221,14 +236,14 @@ def test_streaming_response_create_overload_2(self, client: LlamaStackClient) ->
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
response = client.responses.retrieve(
"response_id",
)
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
http_response = client.responses.with_raw_response.retrieve(
"response_id",
)
@@ -239,7 +254,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.responses.with_streaming_response.retrieve(
"response_id",
) as http_response:
@@ -252,19 +267,19 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, http_response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
client.responses.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
response = client.responses.list()
assert_matches_type(SyncOpenAICursorPage[ResponseListResponse], response, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
response = client.responses.list(
after="after",
limit=0,
@@ -274,7 +289,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[ResponseListResponse], response, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
http_response = client.responses.with_raw_response.list()
assert http_response.is_closed is True
@@ -283,7 +298,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[ResponseListResponse], response, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.responses.with_streaming_response.list() as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -294,14 +309,14 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, http_response.is_closed) is True
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
response = client.responses.delete(
"response_id",
)
assert_matches_type(ResponseDeleteResponse, response, path=["response"])
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
http_response = client.responses.with_raw_response.delete(
"response_id",
)
@@ -312,7 +327,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(ResponseDeleteResponse, response, path=["response"])
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with client.responses.with_streaming_response.delete(
"response_id",
) as http_response:
@@ -325,12 +340,75 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, http_response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
client.responses.with_raw_response.delete(
"",
)
+ @parametrize
+ def test_method_compact(self, client: OgxClient) -> None:
+ response = client.responses.compact(
+ model="model",
+ )
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ @parametrize
+ def test_method_compact_with_all_params(self, client: OgxClient) -> None:
+ response = client.responses.compact(
+ model="model",
+ input="string",
+ instructions="instructions",
+ parallel_tool_calls=True,
+ previous_response_id="previous_response_id",
+ prompt_cache_key="prompt_cache_key",
+ reasoning={
+ "effort": "none",
+ "summary": "auto",
+ },
+ text={
+ "format": {
+ "description": "description",
+ "name": "name",
+ "schema": {"foo": "bar"},
+ "strict": True,
+ "type": "text",
+ },
+ "verbosity": "low",
+ },
+ tools=[
+ {
+ "search_context_size": 'S?oC"high',
+ "type": "web_search",
+ }
+ ],
+ )
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ @parametrize
+ def test_raw_response_compact(self, client: OgxClient) -> None:
+ http_response = client.responses.with_raw_response.compact(
+ model="model",
+ )
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = http_response.parse()
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ @parametrize
+ def test_streaming_response_compact(self, client: OgxClient) -> None:
+ with client.responses.with_streaming_response.compact(
+ model="model",
+ ) as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = http_response.parse()
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ assert cast(Any, http_response.is_closed) is True
+
class TestAsyncResponses:
parametrize = pytest.mark.parametrize(
@@ -338,7 +416,7 @@ class TestAsyncResponses:
)
@parametrize
- async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_overload_1(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.create(
input="string",
model="model",
@@ -346,11 +424,17 @@ async def test_method_create_overload_1(self, async_client: AsyncLlamaStackClien
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- async def test_method_create_with_all_params_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.create(
input="string",
model="model",
background=True,
+ context_management=[
+ {
+ "type": "compaction",
+ "compact_threshold": 0,
+ }
+ ],
conversation="conversation",
frequency_penalty=-2,
guardrails=["string"],
@@ -391,7 +475,8 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
"schema": {"foo": "bar"},
"strict": True,
"type": "text",
- }
+ },
+ "verbosity": "low",
},
tool_choice="auto",
tools=[
@@ -407,7 +492,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create_overload_1(self, async_client: AsyncOgxClient) -> None:
http_response = await async_client.responses.with_raw_response.create(
input="string",
model="model",
@@ -419,7 +504,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncLlamaStac
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- async def test_streaming_response_create_overload_1(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncOgxClient) -> None:
async with async_client.responses.with_streaming_response.create(
input="string",
model="model",
@@ -433,7 +518,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncLla
assert cast(Any, http_response.is_closed) is True
@parametrize
- async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_overload_2(self, async_client: AsyncOgxClient) -> None:
response_stream = await async_client.responses.create(
input="string",
model="model",
@@ -442,12 +527,18 @@ async def test_method_create_overload_2(self, async_client: AsyncLlamaStackClien
await response_stream.response.aclose()
@parametrize
- async def test_method_create_with_all_params_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOgxClient) -> None:
response_stream = await async_client.responses.create(
input="string",
model="model",
stream=True,
background=True,
+ context_management=[
+ {
+ "type": "compaction",
+ "compact_threshold": 0,
+ }
+ ],
conversation="conversation",
frequency_penalty=-2,
guardrails=["string"],
@@ -487,7 +578,8 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"schema": {"foo": "bar"},
"strict": True,
"type": "text",
- }
+ },
+ "verbosity": "low",
},
tool_choice="auto",
tools=[
@@ -503,7 +595,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
await response_stream.response.aclose()
@parametrize
- async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create_overload_2(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.with_raw_response.create(
input="string",
model="model",
@@ -515,7 +607,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncLlamaStac
await stream.close()
@parametrize
- async def test_streaming_response_create_overload_2(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncOgxClient) -> None:
async with async_client.responses.with_streaming_response.create(
input="string",
model="model",
@@ -530,14 +622,14 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncLla
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.retrieve(
"response_id",
)
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
http_response = await async_client.responses.with_raw_response.retrieve(
"response_id",
)
@@ -548,7 +640,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(ResponseObject, response, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.responses.with_streaming_response.retrieve(
"response_id",
) as http_response:
@@ -561,19 +653,19 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, http_response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
await async_client.responses.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.list()
assert_matches_type(AsyncOpenAICursorPage[ResponseListResponse], response, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.list(
after="after",
limit=0,
@@ -583,7 +675,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl
assert_matches_type(AsyncOpenAICursorPage[ResponseListResponse], response, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
http_response = await async_client.responses.with_raw_response.list()
assert http_response.is_closed is True
@@ -592,7 +684,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(AsyncOpenAICursorPage[ResponseListResponse], response, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.responses.with_streaming_response.list() as http_response:
assert not http_response.is_closed
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -603,14 +695,14 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, http_response.is_closed) is True
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
response = await async_client.responses.delete(
"response_id",
)
assert_matches_type(ResponseDeleteResponse, response, path=["response"])
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
http_response = await async_client.responses.with_raw_response.delete(
"response_id",
)
@@ -621,7 +713,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(ResponseDeleteResponse, response, path=["response"])
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
async with async_client.responses.with_streaming_response.delete(
"response_id",
) as http_response:
@@ -634,8 +726,71 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, http_response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
await async_client.responses.with_raw_response.delete(
"",
)
+
+ @parametrize
+ async def test_method_compact(self, async_client: AsyncOgxClient) -> None:
+ response = await async_client.responses.compact(
+ model="model",
+ )
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ @parametrize
+ async def test_method_compact_with_all_params(self, async_client: AsyncOgxClient) -> None:
+ response = await async_client.responses.compact(
+ model="model",
+ input="string",
+ instructions="instructions",
+ parallel_tool_calls=True,
+ previous_response_id="previous_response_id",
+ prompt_cache_key="prompt_cache_key",
+ reasoning={
+ "effort": "none",
+ "summary": "auto",
+ },
+ text={
+ "format": {
+ "description": "description",
+ "name": "name",
+ "schema": {"foo": "bar"},
+ "strict": True,
+ "type": "text",
+ },
+ "verbosity": "low",
+ },
+ tools=[
+ {
+ "search_context_size": 'S?oC"high',
+ "type": "web_search",
+ }
+ ],
+ )
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ @parametrize
+ async def test_raw_response_compact(self, async_client: AsyncOgxClient) -> None:
+ http_response = await async_client.responses.with_raw_response.compact(
+ model="model",
+ )
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = await http_response.parse()
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_compact(self, async_client: AsyncOgxClient) -> None:
+ async with async_client.responses.with_streaming_response.compact(
+ model="model",
+ ) as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = await http_response.parse()
+ assert_matches_type(CompactedResponse, response, path=["response"])
+
+ assert cast(Any, http_response.is_closed) is True
diff --git a/tests/api_resources/test_routes.py b/tests/api_resources/test_routes.py
index 58ab8ad9..d2b6c232 100644
--- a/tests/api_resources/test_routes.py
+++ b/tests/api_resources/test_routes.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import RouteListResponse
+from ogx_client.types import RouteListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,19 +24,19 @@ class TestRoutes:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
route = client.routes.list()
assert_matches_type(RouteListResponse, route, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
route = client.routes.list(
api_filter="v1",
)
assert_matches_type(RouteListResponse, route, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.routes.with_raw_response.list()
assert response.is_closed is True
@@ -45,7 +45,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(RouteListResponse, route, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.routes.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -62,19 +62,19 @@ class TestAsyncRoutes:
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
route = await async_client.routes.list()
assert_matches_type(RouteListResponse, route, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
route = await async_client.routes.list(
api_filter="v1",
)
assert_matches_type(RouteListResponse, route, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.routes.with_raw_response.list()
assert response.is_closed is True
@@ -83,7 +83,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(RouteListResponse, route, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.routes.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/test_safety.py b/tests/api_resources/test_safety.py
index 7f3be9e4..a85dccdc 100644
--- a/tests/api_resources/test_safety.py
+++ b/tests/api_resources/test_safety.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import RunShieldResponse
+from ogx_client.types import RunShieldResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,7 +24,7 @@ class TestSafety:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_run_shield(self, client: LlamaStackClient) -> None:
+ def test_method_run_shield(self, client: OgxClient) -> None:
safety = client.safety.run_shield(
messages=[
{
@@ -37,7 +37,7 @@ def test_method_run_shield(self, client: LlamaStackClient) -> None:
assert_matches_type(RunShieldResponse, safety, path=["response"])
@parametrize
- def test_raw_response_run_shield(self, client: LlamaStackClient) -> None:
+ def test_raw_response_run_shield(self, client: OgxClient) -> None:
response = client.safety.with_raw_response.run_shield(
messages=[
{
@@ -54,7 +54,7 @@ def test_raw_response_run_shield(self, client: LlamaStackClient) -> None:
assert_matches_type(RunShieldResponse, safety, path=["response"])
@parametrize
- def test_streaming_response_run_shield(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_run_shield(self, client: OgxClient) -> None:
with client.safety.with_streaming_response.run_shield(
messages=[
{
@@ -79,7 +79,7 @@ class TestAsyncSafety:
)
@parametrize
- async def test_method_run_shield(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_run_shield(self, async_client: AsyncOgxClient) -> None:
safety = await async_client.safety.run_shield(
messages=[
{
@@ -92,7 +92,7 @@ async def test_method_run_shield(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(RunShieldResponse, safety, path=["response"])
@parametrize
- async def test_raw_response_run_shield(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_run_shield(self, async_client: AsyncOgxClient) -> None:
response = await async_client.safety.with_raw_response.run_shield(
messages=[
{
@@ -109,7 +109,7 @@ async def test_raw_response_run_shield(self, async_client: AsyncLlamaStackClient
assert_matches_type(RunShieldResponse, safety, path=["response"])
@parametrize
- async def test_streaming_response_run_shield(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_run_shield(self, async_client: AsyncOgxClient) -> None:
async with async_client.safety.with_streaming_response.run_shield(
messages=[
{
diff --git a/tests/api_resources/test_scoring.py b/tests/api_resources/test_scoring.py
deleted file mode 100644
index 634348a8..00000000
--- a/tests/api_resources/test_scoring.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
- ScoringScoreResponse,
- ScoringScoreBatchResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestScoring:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_score(self, client: LlamaStackClient) -> None:
- scoring = client.scoring.score(
- input_rows=[{"foo": "bar"}],
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
- assert_matches_type(ScoringScoreResponse, scoring, path=["response"])
-
- @parametrize
- def test_raw_response_score(self, client: LlamaStackClient) -> None:
- response = client.scoring.with_raw_response.score(
- input_rows=[{"foo": "bar"}],
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring = response.parse()
- assert_matches_type(ScoringScoreResponse, scoring, path=["response"])
-
- @parametrize
- def test_streaming_response_score(self, client: LlamaStackClient) -> None:
- with client.scoring.with_streaming_response.score(
- input_rows=[{"foo": "bar"}],
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring = response.parse()
- assert_matches_type(ScoringScoreResponse, scoring, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_score_batch(self, client: LlamaStackClient) -> None:
- scoring = client.scoring.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- @parametrize
- def test_method_score_batch_with_all_params(self, client: LlamaStackClient) -> None:
- scoring = client.scoring.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- save_results_dataset=True,
- )
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- @parametrize
- def test_raw_response_score_batch(self, client: LlamaStackClient) -> None:
- response = client.scoring.with_raw_response.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring = response.parse()
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- @parametrize
- def test_streaming_response_score_batch(self, client: LlamaStackClient) -> None:
- with client.scoring.with_streaming_response.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring = response.parse()
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncScoring:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_score(self, async_client: AsyncLlamaStackClient) -> None:
- scoring = await async_client.scoring.score(
- input_rows=[{"foo": "bar"}],
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
- assert_matches_type(ScoringScoreResponse, scoring, path=["response"])
-
- @parametrize
- async def test_raw_response_score(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.scoring.with_raw_response.score(
- input_rows=[{"foo": "bar"}],
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring = await response.parse()
- assert_matches_type(ScoringScoreResponse, scoring, path=["response"])
-
- @parametrize
- async def test_streaming_response_score(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.scoring.with_streaming_response.score(
- input_rows=[{"foo": "bar"}],
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring = await response.parse()
- assert_matches_type(ScoringScoreResponse, scoring, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_score_batch(self, async_client: AsyncLlamaStackClient) -> None:
- scoring = await async_client.scoring.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- @parametrize
- async def test_method_score_batch_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- scoring = await async_client.scoring.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- }
- },
- save_results_dataset=True,
- )
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- @parametrize
- async def test_raw_response_score_batch(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.scoring.with_raw_response.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring = await response.parse()
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- @parametrize
- async def test_streaming_response_score_batch(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.scoring.with_streaming_response.score_batch(
- dataset_id="dataset_id",
- scoring_functions={
- "foo": {
- "judge_model": "judge_model",
- "type": "llm_as_judge",
- }
- },
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring = await response.parse()
- assert_matches_type(ScoringScoreBatchResponse, scoring, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_scoring_functions.py b/tests/api_resources/test_scoring_functions.py
deleted file mode 100644
index 6b0aa723..00000000
--- a/tests/api_resources/test_scoring_functions.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import ScoringFn, ScoringFunctionListResponse
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestScoringFunctions:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
- scoring_function = client.scoring_functions.retrieve(
- "scoring_fn_id",
- )
- assert_matches_type(ScoringFn, scoring_function, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
- response = client.scoring_functions.with_raw_response.retrieve(
- "scoring_fn_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = response.parse()
- assert_matches_type(ScoringFn, scoring_function, path=["response"])
-
- @parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
- with client.scoring_functions.with_streaming_response.retrieve(
- "scoring_fn_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = response.parse()
- assert_matches_type(ScoringFn, scoring_function, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `scoring_fn_id` but received ''"):
- client.scoring_functions.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
- scoring_function = client.scoring_functions.list()
- assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
- response = client.scoring_functions.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = response.parse()
- assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"])
-
- @parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
- with client.scoring_functions.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = response.parse()
- assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- scoring_function = client.scoring_functions.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- )
-
- assert scoring_function is None
-
- @parametrize
- def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- scoring_function = client.scoring_functions.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- params={
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- },
- provider_id="provider_id",
- provider_scoring_fn_id="provider_scoring_fn_id",
- )
-
- assert scoring_function is None
-
- @parametrize
- def test_raw_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.scoring_functions.with_raw_response.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = response.parse()
- assert scoring_function is None
-
- @parametrize
- def test_streaming_response_register(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.scoring_functions.with_streaming_response.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = response.parse()
- assert scoring_function is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- scoring_function = client.scoring_functions.unregister(
- "scoring_fn_id",
- )
-
- assert scoring_function is None
-
- @parametrize
- def test_raw_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.scoring_functions.with_raw_response.unregister(
- "scoring_fn_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = response.parse()
- assert scoring_function is None
-
- @parametrize
- def test_streaming_response_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with client.scoring_functions.with_streaming_response.unregister(
- "scoring_fn_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = response.parse()
- assert scoring_function is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_path_params_unregister(self, client: LlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `scoring_fn_id` but received ''"):
- client.scoring_functions.with_raw_response.unregister(
- "",
- )
-
-
-class TestAsyncScoringFunctions:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- scoring_function = await async_client.scoring_functions.retrieve(
- "scoring_fn_id",
- )
- assert_matches_type(ScoringFn, scoring_function, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.scoring_functions.with_raw_response.retrieve(
- "scoring_fn_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = await response.parse()
- assert_matches_type(ScoringFn, scoring_function, path=["response"])
-
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.scoring_functions.with_streaming_response.retrieve(
- "scoring_fn_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = await response.parse()
- assert_matches_type(ScoringFn, scoring_function, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `scoring_fn_id` but received ''"):
- await async_client.scoring_functions.with_raw_response.retrieve(
- "",
- )
-
- @parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
- scoring_function = await async_client.scoring_functions.list()
- assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- response = await async_client.scoring_functions.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = await response.parse()
- assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"])
-
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
- async with async_client.scoring_functions.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = await response.parse()
- assert_matches_type(ScoringFunctionListResponse, scoring_function, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- scoring_function = await async_client.scoring_functions.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- )
-
- assert scoring_function is None
-
- @parametrize
- async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- scoring_function = await async_client.scoring_functions.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- params={
- "judge_model": "judge_model",
- "aggregation_functions": ["average"],
- "judge_score_regexes": ["string"],
- "prompt_template": "prompt_template",
- "type": "llm_as_judge",
- },
- provider_id="provider_id",
- provider_scoring_fn_id="provider_scoring_fn_id",
- )
-
- assert scoring_function is None
-
- @parametrize
- async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.scoring_functions.with_raw_response.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = await response.parse()
- assert scoring_function is None
-
- @parametrize
- async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.scoring_functions.with_streaming_response.register(
- description="description",
- return_type={"type": "string"},
- scoring_fn_id="scoring_fn_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = await response.parse()
- assert scoring_function is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- scoring_function = await async_client.scoring_functions.unregister(
- "scoring_fn_id",
- )
-
- assert scoring_function is None
-
- @parametrize
- async def test_raw_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- response = await async_client.scoring_functions.with_raw_response.unregister(
- "scoring_fn_id",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- scoring_function = await response.parse()
- assert scoring_function is None
-
- @parametrize
- async def test_streaming_response_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- async with async_client.scoring_functions.with_streaming_response.unregister(
- "scoring_fn_id",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- scoring_function = await response.parse()
- assert scoring_function is None
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_path_params_unregister(self, async_client: AsyncLlamaStackClient) -> None:
- with pytest.warns(DeprecationWarning):
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `scoring_fn_id` but received ''"):
- await async_client.scoring_functions.with_raw_response.unregister(
- "",
- )
diff --git a/tests/api_resources/test_shields.py b/tests/api_resources/test_shields.py
index afcc128b..938c0745 100644
--- a/tests/api_resources/test_shields.py
+++ b/tests/api_resources/test_shields.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import Shield, ShieldListResponse
+from ogx_client.types import Shield, ShieldListResponse
# pyright: reportDeprecated=false
@@ -26,14 +26,14 @@ class TestShields:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
shield = client.shields.retrieve(
"identifier",
)
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.shields.with_raw_response.retrieve(
"identifier",
)
@@ -44,7 +44,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.shields.with_streaming_response.retrieve(
"identifier",
) as response:
@@ -57,19 +57,19 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `identifier` but received ''"):
client.shields.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
shield = client.shields.list()
assert_matches_type(ShieldListResponse, shield, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.shields.with_raw_response.list()
assert response.is_closed is True
@@ -78,7 +78,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(ShieldListResponse, shield, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.shields.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -89,7 +89,7 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
shield = client.shields.delete(
"identifier",
@@ -98,7 +98,7 @@ def test_method_delete(self, client: LlamaStackClient) -> None:
assert shield is None
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
response = client.shields.with_raw_response.delete(
"identifier",
@@ -110,7 +110,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert shield is None
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
with client.shields.with_streaming_response.delete(
"identifier",
@@ -124,7 +124,7 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `identifier` but received ''"):
client.shields.with_raw_response.delete(
@@ -132,7 +132,7 @@ def test_path_params_delete(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_register(self, client: LlamaStackClient) -> None:
+ def test_method_register(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
shield = client.shields.register(
shield_id="shield_id",
@@ -141,7 +141,7 @@ def test_method_register(self, client: LlamaStackClient) -> None:
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- def test_method_register_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_register_with_all_params(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
shield = client.shields.register(
shield_id="shield_id",
@@ -153,7 +153,7 @@ def test_method_register_with_all_params(self, client: LlamaStackClient) -> None
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- def test_raw_response_register(self, client: LlamaStackClient) -> None:
+ def test_raw_response_register(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
response = client.shields.with_raw_response.register(
shield_id="shield_id",
@@ -165,7 +165,7 @@ def test_raw_response_register(self, client: LlamaStackClient) -> None:
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- def test_streaming_response_register(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_register(self, client: OgxClient) -> None:
with pytest.warns(DeprecationWarning):
with client.shields.with_streaming_response.register(
shield_id="shield_id",
@@ -185,14 +185,14 @@ class TestAsyncShields:
)
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
shield = await async_client.shields.retrieve(
"identifier",
)
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.shields.with_raw_response.retrieve(
"identifier",
)
@@ -203,7 +203,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.shields.with_streaming_response.retrieve(
"identifier",
) as response:
@@ -216,19 +216,19 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `identifier` but received ''"):
await async_client.shields.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
shield = await async_client.shields.list()
assert_matches_type(ShieldListResponse, shield, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.shields.with_raw_response.list()
assert response.is_closed is True
@@ -237,7 +237,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(ShieldListResponse, shield, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.shields.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -248,7 +248,7 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
shield = await async_client.shields.delete(
"identifier",
@@ -257,7 +257,7 @@ async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
assert shield is None
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.shields.with_raw_response.delete(
"identifier",
@@ -269,7 +269,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert shield is None
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.shields.with_streaming_response.delete(
"identifier",
@@ -283,7 +283,7 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `identifier` but received ''"):
await async_client.shields.with_raw_response.delete(
@@ -291,7 +291,7 @@ async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_register(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_register(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
shield = await async_client.shields.register(
shield_id="shield_id",
@@ -300,7 +300,7 @@ async def test_method_register(self, async_client: AsyncLlamaStackClient) -> Non
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- async def test_method_register_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_register_with_all_params(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
shield = await async_client.shields.register(
shield_id="shield_id",
@@ -312,7 +312,7 @@ async def test_method_register_with_all_params(self, async_client: AsyncLlamaSta
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- async def test_raw_response_register(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_register(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.shields.with_raw_response.register(
shield_id="shield_id",
@@ -324,7 +324,7 @@ async def test_raw_response_register(self, async_client: AsyncLlamaStackClient)
assert_matches_type(Shield, shield, path=["response"])
@parametrize
- async def test_streaming_response_register(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_register(self, async_client: AsyncOgxClient) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.shields.with_streaming_response.register(
shield_id="shield_id",
diff --git a/tests/api_resources/test_vector_io.py b/tests/api_resources/test_vector_io.py
index 42c0808d..1b93fbca 100644
--- a/tests/api_resources/test_vector_io.py
+++ b/tests/api_resources/test_vector_io.py
@@ -13,9 +13,9 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import QueryChunksResponse
+from ogx_client.types import QueryChunksResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -24,7 +24,7 @@ class TestVectorIo:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_insert(self, client: LlamaStackClient) -> None:
+ def test_method_insert(self, client: OgxClient) -> None:
vector_io = client.vector_io.insert(
chunks=[
{
@@ -41,7 +41,7 @@ def test_method_insert(self, client: LlamaStackClient) -> None:
assert vector_io is None
@parametrize
- def test_method_insert_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_insert_with_all_params(self, client: OgxClient) -> None:
vector_io = client.vector_io.insert(
chunks=[
{
@@ -70,7 +70,7 @@ def test_method_insert_with_all_params(self, client: LlamaStackClient) -> None:
assert vector_io is None
@parametrize
- def test_raw_response_insert(self, client: LlamaStackClient) -> None:
+ def test_raw_response_insert(self, client: OgxClient) -> None:
response = client.vector_io.with_raw_response.insert(
chunks=[
{
@@ -91,7 +91,7 @@ def test_raw_response_insert(self, client: LlamaStackClient) -> None:
assert vector_io is None
@parametrize
- def test_streaming_response_insert(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_insert(self, client: OgxClient) -> None:
with client.vector_io.with_streaming_response.insert(
chunks=[
{
@@ -114,7 +114,7 @@ def test_streaming_response_insert(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_query(self, client: LlamaStackClient) -> None:
+ def test_method_query(self, client: OgxClient) -> None:
vector_io = client.vector_io.query(
query="string",
vector_store_id="vector_store_id",
@@ -122,7 +122,7 @@ def test_method_query(self, client: LlamaStackClient) -> None:
assert_matches_type(QueryChunksResponse, vector_io, path=["response"])
@parametrize
- def test_method_query_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_query_with_all_params(self, client: OgxClient) -> None:
vector_io = client.vector_io.query(
query="string",
vector_store_id="vector_store_id",
@@ -131,7 +131,7 @@ def test_method_query_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(QueryChunksResponse, vector_io, path=["response"])
@parametrize
- def test_raw_response_query(self, client: LlamaStackClient) -> None:
+ def test_raw_response_query(self, client: OgxClient) -> None:
response = client.vector_io.with_raw_response.query(
query="string",
vector_store_id="vector_store_id",
@@ -143,7 +143,7 @@ def test_raw_response_query(self, client: LlamaStackClient) -> None:
assert_matches_type(QueryChunksResponse, vector_io, path=["response"])
@parametrize
- def test_streaming_response_query(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_query(self, client: OgxClient) -> None:
with client.vector_io.with_streaming_response.query(
query="string",
vector_store_id="vector_store_id",
@@ -163,7 +163,7 @@ class TestAsyncVectorIo:
)
@parametrize
- async def test_method_insert(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_insert(self, async_client: AsyncOgxClient) -> None:
vector_io = await async_client.vector_io.insert(
chunks=[
{
@@ -180,7 +180,7 @@ async def test_method_insert(self, async_client: AsyncLlamaStackClient) -> None:
assert vector_io is None
@parametrize
- async def test_method_insert_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_insert_with_all_params(self, async_client: AsyncOgxClient) -> None:
vector_io = await async_client.vector_io.insert(
chunks=[
{
@@ -209,7 +209,7 @@ async def test_method_insert_with_all_params(self, async_client: AsyncLlamaStack
assert vector_io is None
@parametrize
- async def test_raw_response_insert(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_insert(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_io.with_raw_response.insert(
chunks=[
{
@@ -230,7 +230,7 @@ async def test_raw_response_insert(self, async_client: AsyncLlamaStackClient) ->
assert vector_io is None
@parametrize
- async def test_streaming_response_insert(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_insert(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_io.with_streaming_response.insert(
chunks=[
{
@@ -253,7 +253,7 @@ async def test_streaming_response_insert(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_query(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_query(self, async_client: AsyncOgxClient) -> None:
vector_io = await async_client.vector_io.query(
query="string",
vector_store_id="vector_store_id",
@@ -261,7 +261,7 @@ async def test_method_query(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(QueryChunksResponse, vector_io, path=["response"])
@parametrize
- async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_query_with_all_params(self, async_client: AsyncOgxClient) -> None:
vector_io = await async_client.vector_io.query(
query="string",
vector_store_id="vector_store_id",
@@ -270,7 +270,7 @@ async def test_method_query_with_all_params(self, async_client: AsyncLlamaStackC
assert_matches_type(QueryChunksResponse, vector_io, path=["response"])
@parametrize
- async def test_raw_response_query(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_query(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_io.with_raw_response.query(
query="string",
vector_store_id="vector_store_id",
@@ -282,7 +282,7 @@ async def test_raw_response_query(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(QueryChunksResponse, vector_io, path=["response"])
@parametrize
- async def test_streaming_response_query(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_query(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_io.with_streaming_response.query(
query="string",
vector_store_id="vector_store_id",
diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py
index e769f15b..8f3677b4 100644
--- a/tests/api_resources/test_vector_stores.py
+++ b/tests/api_resources/test_vector_stores.py
@@ -13,14 +13,14 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.types import (
+from ogx_client.types import (
VectorStore,
VectorStoreDeleteResponse,
VectorStoreSearchResponse,
)
-from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
+from ogx_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -29,15 +29,19 @@ class TestVectorStores:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
vector_store = client.vector_stores.create()
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
vector_store = client.vector_stores.create(
chunking_strategy={"type": "auto"},
- expires_after={"foo": "bar"},
+ description="description",
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
file_ids=["string"],
metadata={"foo": "bar"},
name="name",
@@ -45,7 +49,7 @@ def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.vector_stores.with_raw_response.create()
assert response.is_closed is True
@@ -54,7 +58,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.vector_stores.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -65,14 +69,14 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
vector_store = client.vector_stores.retrieve(
"vector_store_id",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.vector_stores.with_raw_response.retrieve(
"vector_store_id",
)
@@ -83,7 +87,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.vector_stores.with_streaming_response.retrieve(
"vector_store_id",
) as response:
@@ -96,31 +100,34 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.with_raw_response.retrieve(
"",
)
@parametrize
- def test_method_update(self, client: LlamaStackClient) -> None:
+ def test_method_update(self, client: OgxClient) -> None:
vector_store = client.vector_stores.update(
vector_store_id="vector_store_id",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_method_update_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_update_with_all_params(self, client: OgxClient) -> None:
vector_store = client.vector_stores.update(
vector_store_id="vector_store_id",
- expires_after={"foo": "bar"},
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
metadata={"foo": "bar"},
name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_raw_response_update(self, client: LlamaStackClient) -> None:
+ def test_raw_response_update(self, client: OgxClient) -> None:
response = client.vector_stores.with_raw_response.update(
vector_store_id="vector_store_id",
)
@@ -131,7 +138,7 @@ def test_raw_response_update(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- def test_streaming_response_update(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_update(self, client: OgxClient) -> None:
with client.vector_stores.with_streaming_response.update(
vector_store_id="vector_store_id",
) as response:
@@ -144,19 +151,19 @@ def test_streaming_response_update(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_update(self, client: LlamaStackClient) -> None:
+ def test_path_params_update(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.with_raw_response.update(
vector_store_id="",
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
vector_store = client.vector_stores.list()
assert_matches_type(SyncOpenAICursorPage[VectorStore], vector_store, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
vector_store = client.vector_stores.list(
after="after",
before="before",
@@ -166,7 +173,7 @@ def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[VectorStore], vector_store, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.vector_stores.with_raw_response.list()
assert response.is_closed is True
@@ -175,7 +182,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[VectorStore], vector_store, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.vector_stores.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -186,14 +193,14 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
vector_store = client.vector_stores.delete(
"vector_store_id",
)
assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
response = client.vector_stores.with_raw_response.delete(
"vector_store_id",
)
@@ -204,7 +211,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with client.vector_stores.with_streaming_response.delete(
"vector_store_id",
) as response:
@@ -217,14 +224,14 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.with_raw_response.delete(
"",
)
@parametrize
- def test_method_search(self, client: LlamaStackClient) -> None:
+ def test_method_search(self, client: OgxClient) -> None:
vector_store = client.vector_stores.search(
vector_store_id="vector_store_id",
query="string",
@@ -232,12 +239,12 @@ def test_method_search(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
@parametrize
- def test_method_search_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_search_with_all_params(self, client: OgxClient) -> None:
vector_store = client.vector_stores.search(
vector_store_id="vector_store_id",
query="string",
filters={"foo": "bar"},
- max_num_results=0,
+ max_num_results=1,
ranking_options={
"alpha": 0,
"impact_factor": 0,
@@ -252,7 +259,7 @@ def test_method_search_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
@parametrize
- def test_raw_response_search(self, client: LlamaStackClient) -> None:
+ def test_raw_response_search(self, client: OgxClient) -> None:
response = client.vector_stores.with_raw_response.search(
vector_store_id="vector_store_id",
query="string",
@@ -264,7 +271,7 @@ def test_raw_response_search(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
@parametrize
- def test_streaming_response_search(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_search(self, client: OgxClient) -> None:
with client.vector_stores.with_streaming_response.search(
vector_store_id="vector_store_id",
query="string",
@@ -278,7 +285,7 @@ def test_streaming_response_search(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_search(self, client: LlamaStackClient) -> None:
+ def test_path_params_search(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.with_raw_response.search(
vector_store_id="",
@@ -292,15 +299,19 @@ class TestAsyncVectorStores:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.create()
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.create(
chunking_strategy={"type": "auto"},
- expires_after={"foo": "bar"},
+ description="description",
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
file_ids=["string"],
metadata={"foo": "bar"},
name="name",
@@ -308,7 +319,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.with_raw_response.create()
assert response.is_closed is True
@@ -317,7 +328,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -328,14 +339,14 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.retrieve(
"vector_store_id",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.with_raw_response.retrieve(
"vector_store_id",
)
@@ -346,7 +357,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.with_streaming_response.retrieve(
"vector_store_id",
) as response:
@@ -359,31 +370,34 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.with_raw_response.retrieve(
"",
)
@parametrize
- async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_update(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.update(
vector_store_id="vector_store_id",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_update_with_all_params(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.update(
vector_store_id="vector_store_id",
- expires_after={"foo": "bar"},
+ expires_after={
+ "anchor": "last_active_at",
+ "days": 1,
+ },
metadata={"foo": "bar"},
name="name",
)
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_update(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.with_raw_response.update(
vector_store_id="vector_store_id",
)
@@ -394,7 +408,7 @@ async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStore, vector_store, path=["response"])
@parametrize
- async def test_streaming_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_update(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.with_streaming_response.update(
vector_store_id="vector_store_id",
) as response:
@@ -407,19 +421,19 @@ async def test_streaming_response_update(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_update(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.with_raw_response.update(
vector_store_id="",
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.list()
assert_matches_type(AsyncOpenAICursorPage[VectorStore], vector_store, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.list(
after="after",
before="before",
@@ -429,7 +443,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackCl
assert_matches_type(AsyncOpenAICursorPage[VectorStore], vector_store, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.with_raw_response.list()
assert response.is_closed is True
@@ -438,7 +452,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(AsyncOpenAICursorPage[VectorStore], vector_store, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -449,14 +463,14 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.delete(
"vector_store_id",
)
assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.with_raw_response.delete(
"vector_store_id",
)
@@ -467,7 +481,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStoreDeleteResponse, vector_store, path=["response"])
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.with_streaming_response.delete(
"vector_store_id",
) as response:
@@ -480,14 +494,14 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.with_raw_response.delete(
"",
)
@parametrize
- async def test_method_search(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_search(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.search(
vector_store_id="vector_store_id",
query="string",
@@ -495,12 +509,12 @@ async def test_method_search(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
@parametrize
- async def test_method_search_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_search_with_all_params(self, async_client: AsyncOgxClient) -> None:
vector_store = await async_client.vector_stores.search(
vector_store_id="vector_store_id",
query="string",
filters={"foo": "bar"},
- max_num_results=0,
+ max_num_results=1,
ranking_options={
"alpha": 0,
"impact_factor": 0,
@@ -515,7 +529,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncLlamaStack
assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
@parametrize
- async def test_raw_response_search(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_search(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.with_raw_response.search(
vector_store_id="vector_store_id",
query="string",
@@ -527,7 +541,7 @@ async def test_raw_response_search(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStoreSearchResponse, vector_store, path=["response"])
@parametrize
- async def test_streaming_response_search(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_search(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.with_streaming_response.search(
vector_store_id="vector_store_id",
query="string",
@@ -541,7 +555,7 @@ async def test_streaming_response_search(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_search(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_search(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.with_raw_response.search(
vector_store_id="",
diff --git a/tests/api_resources/vector_stores/test_file_batches.py b/tests/api_resources/vector_stores/test_file_batches.py
index 166c43d9..58584126 100644
--- a/tests/api_resources/vector_stores/test_file_batches.py
+++ b/tests/api_resources/vector_stores/test_file_batches.py
@@ -13,10 +13,10 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
-from llama_stack_client.types.vector_stores import (
+from ogx_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
+from ogx_client.types.vector_stores import (
VectorStoreFile,
VectorStoreFileBatches,
)
@@ -28,28 +28,33 @@ class TestFileBatches:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
file_batch = client.vector_stores.file_batches.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
)
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
file_batch = client.vector_stores.file_batches.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
- attributes={"foo": "bar"},
+ attributes={"foo": "string"},
chunking_strategy={"type": "auto"},
+ file_ids=["string"],
+ files=[
+ {
+ "file_id": "file_id",
+ "attributes": {"foo": "string"},
+ "chunking_strategy": {"type": "auto"},
+ }
+ ],
)
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.vector_stores.file_batches.with_raw_response.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
)
assert response.is_closed is True
@@ -58,10 +63,9 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.vector_stores.file_batches.with_streaming_response.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -72,15 +76,14 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_create(self, client: LlamaStackClient) -> None:
+ def test_path_params_create(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.file_batches.with_raw_response.create(
vector_store_id="",
- file_ids=["string"],
)
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
file_batch = client.vector_stores.file_batches.retrieve(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -88,7 +91,7 @@ def test_method_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.vector_stores.file_batches.with_raw_response.retrieve(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -100,7 +103,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.vector_stores.file_batches.with_streaming_response.retrieve(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -114,7 +117,7 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.file_batches.with_raw_response.retrieve(
batch_id="batch_id",
@@ -128,7 +131,7 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_cancel(self, client: LlamaStackClient) -> None:
+ def test_method_cancel(self, client: OgxClient) -> None:
file_batch = client.vector_stores.file_batches.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -136,7 +139,7 @@ def test_method_cancel(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
+ def test_raw_response_cancel(self, client: OgxClient) -> None:
response = client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -148,7 +151,7 @@ def test_raw_response_cancel(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_cancel(self, client: OgxClient) -> None:
with client.vector_stores.file_batches.with_streaming_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -162,7 +165,7 @@ def test_streaming_response_cancel(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_cancel(self, client: LlamaStackClient) -> None:
+ def test_path_params_cancel(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
@@ -176,7 +179,7 @@ def test_path_params_cancel(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_list_files(self, client: LlamaStackClient) -> None:
+ def test_method_list_files(self, client: OgxClient) -> None:
file_batch = client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -184,7 +187,7 @@ def test_method_list_files(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- def test_method_list_files_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_files_with_all_params(self, client: OgxClient) -> None:
file_batch = client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -197,7 +200,7 @@ def test_method_list_files_with_all_params(self, client: LlamaStackClient) -> No
assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- def test_raw_response_list_files(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list_files(self, client: OgxClient) -> None:
response = client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -209,7 +212,7 @@ def test_raw_response_list_files(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- def test_streaming_response_list_files(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list_files(self, client: OgxClient) -> None:
with client.vector_stores.file_batches.with_streaming_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -223,7 +226,7 @@ def test_streaming_response_list_files(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_list_files(self, client: LlamaStackClient) -> None:
+ def test_path_params_list_files(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
@@ -243,28 +246,33 @@ class TestAsyncFileBatches:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
file_batch = await async_client.vector_stores.file_batches.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
)
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
file_batch = await async_client.vector_stores.file_batches.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
- attributes={"foo": "bar"},
+ attributes={"foo": "string"},
chunking_strategy={"type": "auto"},
+ file_ids=["string"],
+ files=[
+ {
+ "file_id": "file_id",
+ "attributes": {"foo": "string"},
+ "chunking_strategy": {"type": "auto"},
+ }
+ ],
)
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
)
assert response.is_closed is True
@@ -273,10 +281,9 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.create(
vector_store_id="vector_store_id",
- file_ids=["string"],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -287,15 +294,14 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_create(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.create(
vector_store_id="",
- file_ids=["string"],
)
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
file_batch = await async_client.vector_stores.file_batches.retrieve(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -303,7 +309,7 @@ async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> Non
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.retrieve(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -315,7 +321,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.retrieve(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -329,7 +335,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.retrieve(
batch_id="batch_id",
@@ -343,7 +349,7 @@ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -
)
@parametrize
- async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_cancel(self, async_client: AsyncOgxClient) -> None:
file_batch = await async_client.vector_stores.file_batches.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -351,7 +357,7 @@ async def test_method_cancel(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_cancel(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -363,7 +369,7 @@ async def test_raw_response_cancel(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStoreFileBatches, file_batch, path=["response"])
@parametrize
- async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_cancel(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -377,7 +383,7 @@ async def test_streaming_response_cancel(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_cancel(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
@@ -391,7 +397,7 @@ async def test_path_params_cancel(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_list_files(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_files(self, async_client: AsyncOgxClient) -> None:
file_batch = await async_client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -399,7 +405,7 @@ async def test_method_list_files(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- async def test_method_list_files_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_files_with_all_params(self, async_client: AsyncOgxClient) -> None:
file_batch = await async_client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -412,7 +418,7 @@ async def test_method_list_files_with_all_params(self, async_client: AsyncLlamaS
assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- async def test_raw_response_list_files(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list_files(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -424,7 +430,7 @@ async def test_raw_response_list_files(self, async_client: AsyncLlamaStackClient
assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
- async def test_streaming_response_list_files(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list_files(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
@@ -438,7 +444,7 @@ async def test_streaming_response_list_files(self, async_client: AsyncLlamaStack
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_list_files(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_list_files(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py
index 9a0e94b6..a2c927a0 100644
--- a/tests/api_resources/vector_stores/test_files.py
+++ b/tests/api_resources/vector_stores/test_files.py
@@ -13,10 +13,10 @@
import pytest
+from ogx_client import OgxClient, AsyncOgxClient
from tests.utils import assert_matches_type
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
-from llama_stack_client.types.vector_stores import (
+from ogx_client.pagination import SyncOpenAICursorPage, AsyncOpenAICursorPage
+from ogx_client.types.vector_stores import (
VectorStoreFile,
FileDeleteResponse,
FileContentResponse,
@@ -29,7 +29,7 @@ class TestFiles:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
- def test_method_create(self, client: LlamaStackClient) -> None:
+ def test_method_create(self, client: OgxClient) -> None:
file = client.vector_stores.files.create(
vector_store_id="vector_store_id",
file_id="file_id",
@@ -37,17 +37,17 @@ def test_method_create(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- def test_method_create_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_create_with_all_params(self, client: OgxClient) -> None:
file = client.vector_stores.files.create(
vector_store_id="vector_store_id",
file_id="file_id",
- attributes={"foo": "bar"},
+ attributes={"foo": "string"},
chunking_strategy={"type": "auto"},
)
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- def test_raw_response_create(self, client: LlamaStackClient) -> None:
+ def test_raw_response_create(self, client: OgxClient) -> None:
response = client.vector_stores.files.with_raw_response.create(
vector_store_id="vector_store_id",
file_id="file_id",
@@ -59,7 +59,7 @@ def test_raw_response_create(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- def test_streaming_response_create(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_create(self, client: OgxClient) -> None:
with client.vector_stores.files.with_streaming_response.create(
vector_store_id="vector_store_id",
file_id="file_id",
@@ -73,7 +73,7 @@ def test_streaming_response_create(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_create(self, client: LlamaStackClient) -> None:
+ def test_path_params_create(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.create(
vector_store_id="",
@@ -81,7 +81,7 @@ def test_path_params_create(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_retrieve(self, client: LlamaStackClient) -> None:
+ def test_method_retrieve(self, client: OgxClient) -> None:
file = client.vector_stores.files.retrieve(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -89,7 +89,7 @@ def test_method_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_raw_response_retrieve(self, client: OgxClient) -> None:
response = client.vector_stores.files.with_raw_response.retrieve(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -101,7 +101,7 @@ def test_raw_response_retrieve(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_retrieve(self, client: OgxClient) -> None:
with client.vector_stores.files.with_streaming_response.retrieve(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -115,7 +115,7 @@ def test_streaming_response_retrieve(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
+ def test_path_params_retrieve(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.retrieve(
file_id="file_id",
@@ -129,7 +129,7 @@ def test_path_params_retrieve(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_update(self, client: LlamaStackClient) -> None:
+ def test_method_update(self, client: OgxClient) -> None:
file = client.vector_stores.files.update(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -138,7 +138,7 @@ def test_method_update(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- def test_raw_response_update(self, client: LlamaStackClient) -> None:
+ def test_raw_response_update(self, client: OgxClient) -> None:
response = client.vector_stores.files.with_raw_response.update(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -151,7 +151,7 @@ def test_raw_response_update(self, client: LlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- def test_streaming_response_update(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_update(self, client: OgxClient) -> None:
with client.vector_stores.files.with_streaming_response.update(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -166,7 +166,7 @@ def test_streaming_response_update(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_update(self, client: LlamaStackClient) -> None:
+ def test_path_params_update(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.update(
file_id="file_id",
@@ -182,26 +182,26 @@ def test_path_params_update(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_list(self, client: LlamaStackClient) -> None:
+ def test_method_list(self, client: OgxClient) -> None:
file = client.vector_stores.files.list(
vector_store_id="vector_store_id",
)
assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file, path=["response"])
@parametrize
- def test_method_list_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_list_with_all_params(self, client: OgxClient) -> None:
file = client.vector_stores.files.list(
vector_store_id="vector_store_id",
after="after",
before="before",
- filter="completed",
+ filter="in_progress",
limit=1,
order="order",
)
assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file, path=["response"])
@parametrize
- def test_raw_response_list(self, client: LlamaStackClient) -> None:
+ def test_raw_response_list(self, client: OgxClient) -> None:
response = client.vector_stores.files.with_raw_response.list(
vector_store_id="vector_store_id",
)
@@ -212,7 +212,7 @@ def test_raw_response_list(self, client: LlamaStackClient) -> None:
assert_matches_type(SyncOpenAICursorPage[VectorStoreFile], file, path=["response"])
@parametrize
- def test_streaming_response_list(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_list(self, client: OgxClient) -> None:
with client.vector_stores.files.with_streaming_response.list(
vector_store_id="vector_store_id",
) as response:
@@ -225,14 +225,14 @@ def test_streaming_response_list(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_list(self, client: LlamaStackClient) -> None:
+ def test_path_params_list(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.list(
vector_store_id="",
)
@parametrize
- def test_method_delete(self, client: LlamaStackClient) -> None:
+ def test_method_delete(self, client: OgxClient) -> None:
file = client.vector_stores.files.delete(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -240,7 +240,7 @@ def test_method_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(FileDeleteResponse, file, path=["response"])
@parametrize
- def test_raw_response_delete(self, client: LlamaStackClient) -> None:
+ def test_raw_response_delete(self, client: OgxClient) -> None:
response = client.vector_stores.files.with_raw_response.delete(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -252,7 +252,7 @@ def test_raw_response_delete(self, client: LlamaStackClient) -> None:
assert_matches_type(FileDeleteResponse, file, path=["response"])
@parametrize
- def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_delete(self, client: OgxClient) -> None:
with client.vector_stores.files.with_streaming_response.delete(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -266,7 +266,7 @@ def test_streaming_response_delete(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_delete(self, client: LlamaStackClient) -> None:
+ def test_path_params_delete(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.delete(
file_id="file_id",
@@ -280,7 +280,7 @@ def test_path_params_delete(self, client: LlamaStackClient) -> None:
)
@parametrize
- def test_method_content(self, client: LlamaStackClient) -> None:
+ def test_method_content(self, client: OgxClient) -> None:
file = client.vector_stores.files.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -288,7 +288,7 @@ def test_method_content(self, client: LlamaStackClient) -> None:
assert_matches_type(FileContentResponse, file, path=["response"])
@parametrize
- def test_method_content_with_all_params(self, client: LlamaStackClient) -> None:
+ def test_method_content_with_all_params(self, client: OgxClient) -> None:
file = client.vector_stores.files.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -298,7 +298,7 @@ def test_method_content_with_all_params(self, client: LlamaStackClient) -> None:
assert_matches_type(FileContentResponse, file, path=["response"])
@parametrize
- def test_raw_response_content(self, client: LlamaStackClient) -> None:
+ def test_raw_response_content(self, client: OgxClient) -> None:
response = client.vector_stores.files.with_raw_response.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -310,7 +310,7 @@ def test_raw_response_content(self, client: LlamaStackClient) -> None:
assert_matches_type(FileContentResponse, file, path=["response"])
@parametrize
- def test_streaming_response_content(self, client: LlamaStackClient) -> None:
+ def test_streaming_response_content(self, client: OgxClient) -> None:
with client.vector_stores.files.with_streaming_response.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -324,7 +324,7 @@ def test_streaming_response_content(self, client: LlamaStackClient) -> None:
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_content(self, client: LlamaStackClient) -> None:
+ def test_path_params_content(self, client: OgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
client.vector_stores.files.with_raw_response.content(
file_id="file_id",
@@ -344,7 +344,7 @@ class TestAsyncFiles:
)
@parametrize
- async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.create(
vector_store_id="vector_store_id",
file_id="file_id",
@@ -352,17 +352,17 @@ async def test_method_create(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_create_with_all_params(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.create(
vector_store_id="vector_store_id",
file_id="file_id",
- attributes={"foo": "bar"},
+ attributes={"foo": "string"},
chunking_strategy={"type": "auto"},
)
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_create(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.files.with_raw_response.create(
vector_store_id="vector_store_id",
file_id="file_id",
@@ -374,7 +374,7 @@ async def test_raw_response_create(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_create(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.files.with_streaming_response.create(
vector_store_id="vector_store_id",
file_id="file_id",
@@ -388,7 +388,7 @@ async def test_streaming_response_create(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_create(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_create(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.files.with_raw_response.create(
vector_store_id="",
@@ -396,7 +396,7 @@ async def test_path_params_create(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_retrieve(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.retrieve(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -404,7 +404,7 @@ async def test_method_retrieve(self, async_client: AsyncLlamaStackClient) -> Non
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_retrieve(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.files.with_raw_response.retrieve(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -416,7 +416,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncLlamaStackClient)
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_retrieve(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.files.with_streaming_response.retrieve(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -430,7 +430,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncLlamaStackCl
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_retrieve(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.files.with_raw_response.retrieve(
file_id="file_id",
@@ -444,7 +444,7 @@ async def test_path_params_retrieve(self, async_client: AsyncLlamaStackClient) -
)
@parametrize
- async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_update(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.update(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -453,7 +453,7 @@ async def test_method_update(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_update(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.files.with_raw_response.update(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -466,7 +466,7 @@ async def test_raw_response_update(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(VectorStoreFile, file, path=["response"])
@parametrize
- async def test_streaming_response_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_update(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.files.with_streaming_response.update(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -481,7 +481,7 @@ async def test_streaming_response_update(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_update(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_update(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.files.with_raw_response.update(
file_id="file_id",
@@ -497,26 +497,26 @@ async def test_path_params_update(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.list(
vector_store_id="vector_store_id",
)
assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file, path=["response"])
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_list_with_all_params(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.list(
vector_store_id="vector_store_id",
after="after",
before="before",
- filter="completed",
+ filter="in_progress",
limit=1,
order="order",
)
assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file, path=["response"])
@parametrize
- async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_list(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.files.with_raw_response.list(
vector_store_id="vector_store_id",
)
@@ -527,7 +527,7 @@ async def test_raw_response_list(self, async_client: AsyncLlamaStackClient) -> N
assert_matches_type(AsyncOpenAICursorPage[VectorStoreFile], file, path=["response"])
@parametrize
- async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_list(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.files.with_streaming_response.list(
vector_store_id="vector_store_id",
) as response:
@@ -540,14 +540,14 @@ async def test_streaming_response_list(self, async_client: AsyncLlamaStackClient
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_list(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_list(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.files.with_raw_response.list(
vector_store_id="",
)
@parametrize
- async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_delete(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.delete(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -555,7 +555,7 @@ async def test_method_delete(self, async_client: AsyncLlamaStackClient) -> None:
assert_matches_type(FileDeleteResponse, file, path=["response"])
@parametrize
- async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_delete(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.files.with_raw_response.delete(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -567,7 +567,7 @@ async def test_raw_response_delete(self, async_client: AsyncLlamaStackClient) ->
assert_matches_type(FileDeleteResponse, file, path=["response"])
@parametrize
- async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_delete(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.files.with_streaming_response.delete(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -581,7 +581,7 @@ async def test_streaming_response_delete(self, async_client: AsyncLlamaStackClie
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_delete(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.files.with_raw_response.delete(
file_id="file_id",
@@ -595,7 +595,7 @@ async def test_path_params_delete(self, async_client: AsyncLlamaStackClient) ->
)
@parametrize
- async def test_method_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_content(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -603,7 +603,7 @@ async def test_method_content(self, async_client: AsyncLlamaStackClient) -> None
assert_matches_type(FileContentResponse, file, path=["response"])
@parametrize
- async def test_method_content_with_all_params(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_method_content_with_all_params(self, async_client: AsyncOgxClient) -> None:
file = await async_client.vector_stores.files.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -613,7 +613,7 @@ async def test_method_content_with_all_params(self, async_client: AsyncLlamaStac
assert_matches_type(FileContentResponse, file, path=["response"])
@parametrize
- async def test_raw_response_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_content(self, async_client: AsyncOgxClient) -> None:
response = await async_client.vector_stores.files.with_raw_response.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -625,7 +625,7 @@ async def test_raw_response_content(self, async_client: AsyncLlamaStackClient) -
assert_matches_type(FileContentResponse, file, path=["response"])
@parametrize
- async def test_streaming_response_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_streaming_response_content(self, async_client: AsyncOgxClient) -> None:
async with async_client.vector_stores.files.with_streaming_response.content(
file_id="file_id",
vector_store_id="vector_store_id",
@@ -639,7 +639,7 @@ async def test_streaming_response_content(self, async_client: AsyncLlamaStackCli
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_content(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_path_params_content(self, async_client: AsyncOgxClient) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.files.with_raw_response.content(
file_id="file_id",
diff --git a/tests/conftest.py b/tests/conftest.py
index 58374a34..7b9e895d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -16,15 +16,15 @@
import pytest
from pytest_asyncio import is_async_test
-from llama_stack_client import LlamaStackClient, DefaultAioHttpClient, AsyncLlamaStackClient
-from llama_stack_client._utils import is_dict
+from ogx_client import OgxClient, AsyncOgxClient, DefaultAioHttpClient
+from ogx_client._utils import is_dict
if TYPE_CHECKING:
from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage]
pytest.register_assert_rewrite("tests.utils")
-logging.getLogger("llama_stack_client").setLevel(logging.DEBUG)
+logging.getLogger("ogx_client").setLevel(logging.DEBUG)
# automatically add `pytest.mark.asyncio()` to all of our async tests
@@ -53,17 +53,17 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None:
@pytest.fixture(scope="session")
-def client(request: FixtureRequest) -> Iterator[LlamaStackClient]:
+def client(request: FixtureRequest) -> Iterator[OgxClient]:
strict = getattr(request, "param", True)
if not isinstance(strict, bool):
raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}")
- with LlamaStackClient(base_url=base_url, _strict_response_validation=strict) as client:
+ with OgxClient(base_url=base_url, _strict_response_validation=strict) as client:
yield client
@pytest.fixture(scope="session")
-async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncLlamaStackClient]:
+async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncOgxClient]:
param = getattr(request, "param", True)
# defaults
@@ -82,7 +82,5 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncLlamaStack
else:
raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict")
- async with AsyncLlamaStackClient(
- base_url=base_url, _strict_response_validation=strict, http_client=http_client
- ) as client:
+ async with AsyncOgxClient(base_url=base_url, _strict_response_validation=strict, http_client=http_client) as client:
yield client
diff --git a/tests/test_client.py b/tests/test_client.py
index bd48b0cf..fb1ebc8e 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -25,13 +25,13 @@
from respx import MockRouter
from pydantic import ValidationError
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient, APIResponseValidationError
-from llama_stack_client._types import Omit
-from llama_stack_client._utils import asyncify
-from llama_stack_client._models import BaseModel, FinalRequestOptions
-from llama_stack_client._streaming import Stream, AsyncStream
-from llama_stack_client._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
-from llama_stack_client._base_client import (
+from ogx_client import OgxClient, AsyncOgxClient, APIResponseValidationError
+from ogx_client._types import Omit
+from ogx_client._utils import asyncify
+from ogx_client._models import BaseModel, FinalRequestOptions
+from ogx_client._streaming import Stream, AsyncStream
+from ogx_client._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
+from ogx_client._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
@@ -109,7 +109,7 @@ async def _make_async_iterator(iterable: Iterable[T], counter: Optional[Counter]
yield item
-def _get_open_connections(client: LlamaStackClient | AsyncLlamaStackClient) -> int:
+def _get_open_connections(client: OgxClient | AsyncOgxClient) -> int:
transport = client._client._transport
assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport)
@@ -117,9 +117,9 @@ def _get_open_connections(client: LlamaStackClient | AsyncLlamaStackClient) -> i
return len(pool._requests)
-class TestLlamaStackClient:
+class TestOgxClient:
@pytest.mark.respx(base_url=base_url)
- def test_raw_response(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_raw_response(self, respx_mock: MockRouter, client: OgxClient) -> None:
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = client.post("/foo", cast_to=httpx.Response)
@@ -128,7 +128,7 @@ def test_raw_response(self, respx_mock: MockRouter, client: LlamaStackClient) ->
assert response.json() == {"foo": "bar"}
@pytest.mark.respx(base_url=base_url)
- def test_raw_response_for_binary(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_raw_response_for_binary(self, respx_mock: MockRouter, client: OgxClient) -> None:
respx_mock.post("/foo").mock(
return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}')
)
@@ -138,11 +138,11 @@ def test_raw_response_for_binary(self, respx_mock: MockRouter, client: LlamaStac
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
- def test_copy(self, client: LlamaStackClient) -> None:
+ def test_copy(self, client: OgxClient) -> None:
copied = client.copy()
assert id(copied) != id(client)
- def test_copy_default_options(self, client: LlamaStackClient) -> None:
+ def test_copy_default_options(self, client: OgxClient) -> None:
# options that have a default are overridden correctly
copied = client.copy(max_retries=7)
assert copied.max_retries == 7
@@ -159,7 +159,7 @@ def test_copy_default_options(self, client: LlamaStackClient) -> None:
assert isinstance(client.timeout, httpx.Timeout)
def test_copy_default_headers(self) -> None:
- client = LlamaStackClient(base_url=base_url, _strict_response_validation=True, default_headers={"X-Foo": "bar"})
+ client = OgxClient(base_url=base_url, _strict_response_validation=True, default_headers={"X-Foo": "bar"})
assert client.default_headers["X-Foo"] == "bar"
# does not override the already given value when not specified
@@ -192,7 +192,7 @@ def test_copy_default_headers(self) -> None:
client.close()
def test_copy_default_query(self) -> None:
- client = LlamaStackClient(base_url=base_url, _strict_response_validation=True, default_query={"foo": "bar"})
+ client = OgxClient(base_url=base_url, _strict_response_validation=True, default_query={"foo": "bar"})
assert _get_params(client)["foo"] == "bar"
# does not override the already given value when not specified
@@ -227,7 +227,7 @@ def test_copy_default_query(self) -> None:
client.close()
- def test_copy_signature(self, client: LlamaStackClient) -> None:
+ def test_copy_signature(self, client: OgxClient) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
@@ -244,7 +244,7 @@ def test_copy_signature(self, client: LlamaStackClient) -> None:
assert copy_param is not None, f"copy() signature is missing the {name} param"
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12")
- def test_copy_build_request(self, client: LlamaStackClient) -> None:
+ def test_copy_build_request(self, client: OgxClient) -> None:
options = FinalRequestOptions(method="get", url="/foo")
def build_request(options: FinalRequestOptions) -> None:
@@ -284,10 +284,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
# to_raw_response_wrapper leaks through the @functools.wraps() decorator.
#
# removing the decorator fixes the leak for reasons we don't understand.
- "llama_stack_client/_legacy_response.py",
- "llama_stack_client/_response.py",
+ "ogx_client/_legacy_response.py",
+ "ogx_client/_response.py",
# pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
- "llama_stack_client/_compat.py",
+ "ogx_client/_compat.py",
# Standard library leaks we don't care about.
"/logging/__init__.py",
]
@@ -306,7 +306,7 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
print(frame)
raise AssertionError()
- def test_request_timeout(self, client: LlamaStackClient) -> None:
+ def test_request_timeout(self, client: OgxClient) -> None:
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
@@ -316,7 +316,7 @@ def test_request_timeout(self, client: LlamaStackClient) -> None:
assert timeout == httpx.Timeout(100.0)
def test_client_timeout_option(self) -> None:
- client = LlamaStackClient(base_url=base_url, _strict_response_validation=True, timeout=httpx.Timeout(0))
+ client = OgxClient(base_url=base_url, _strict_response_validation=True, timeout=httpx.Timeout(0))
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -327,7 +327,7 @@ def test_client_timeout_option(self) -> None:
def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
with httpx.Client(timeout=None) as http_client:
- client = LlamaStackClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
+ client = OgxClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -337,7 +337,7 @@ def test_http_client_timeout_option(self) -> None:
# no timeout given to the httpx client should not use the httpx default
with httpx.Client() as http_client:
- client = LlamaStackClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
+ client = OgxClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -347,7 +347,7 @@ def test_http_client_timeout_option(self) -> None:
# explicitly passing the default timeout currently results in it being ignored
with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
- client = LlamaStackClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
+ client = OgxClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -358,19 +358,15 @@ def test_http_client_timeout_option(self) -> None:
async def test_invalid_http_client(self) -> None:
with pytest.raises(TypeError, match="Invalid `http_client` arg"):
async with httpx.AsyncClient() as http_client:
- LlamaStackClient(
- base_url=base_url, _strict_response_validation=True, http_client=cast(Any, http_client)
- )
+ OgxClient(base_url=base_url, _strict_response_validation=True, http_client=cast(Any, http_client))
def test_default_headers_option(self) -> None:
- test_client = LlamaStackClient(
- base_url=base_url, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
- )
+ test_client = OgxClient(base_url=base_url, _strict_response_validation=True, default_headers={"X-Foo": "bar"})
request = test_client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
- test_client2 = LlamaStackClient(
+ test_client2 = OgxClient(
base_url=base_url,
_strict_response_validation=True,
default_headers={
@@ -386,9 +382,7 @@ def test_default_headers_option(self) -> None:
test_client2.close()
def test_default_query_option(self) -> None:
- client = LlamaStackClient(
- base_url=base_url, _strict_response_validation=True, default_query={"query_param": "bar"}
- )
+ client = OgxClient(base_url=base_url, _strict_response_validation=True, default_query={"query_param": "bar"})
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
assert dict(url.params) == {"query_param": "bar"}
@@ -405,7 +399,31 @@ def test_default_query_option(self) -> None:
client.close()
- def test_request_extra_json(self, client: LlamaStackClient) -> None:
+ def test_hardcoded_query_params_in_url(self, client: OgxClient) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
+ def test_request_extra_json(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -439,7 +457,7 @@ def test_request_extra_json(self, client: LlamaStackClient) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
- def test_request_extra_headers(self, client: LlamaStackClient) -> None:
+ def test_request_extra_headers(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -461,7 +479,7 @@ def test_request_extra_headers(self, client: LlamaStackClient) -> None:
)
assert request.headers.get("X-Bar") == "false"
- def test_request_extra_query(self, client: LlamaStackClient) -> None:
+ def test_request_extra_query(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -502,7 +520,7 @@ def test_request_extra_query(self, client: LlamaStackClient) -> None:
params = dict(request.url.params)
assert params == {"foo": "2"}
- def test_multipart_repeating_array(self, client: LlamaStackClient) -> None:
+ def test_multipart_repeating_array(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions.construct(
method="post",
@@ -532,7 +550,7 @@ def test_multipart_repeating_array(self, client: LlamaStackClient) -> None:
]
@pytest.mark.respx(base_url=base_url)
- def test_binary_content_upload(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_binary_content_upload(self, respx_mock: MockRouter, client: OgxClient) -> None:
respx_mock.post("/upload").mock(side_effect=mirror_request_content)
file_content = b"Hello, this is a test file."
@@ -557,7 +575,7 @@ def mock_handler(request: httpx.Request) -> httpx.Response:
assert counter.value == 0, "the request body should not have been read"
return httpx.Response(200, content=request.read())
- with LlamaStackClient(
+ with OgxClient(
base_url=base_url,
_strict_response_validation=True,
http_client=httpx.Client(transport=MockTransport(handler=mock_handler)),
@@ -575,9 +593,7 @@ def mock_handler(request: httpx.Request) -> httpx.Response:
assert counter.value == 1
@pytest.mark.respx(base_url=base_url)
- def test_binary_content_upload_with_body_is_deprecated(
- self, respx_mock: MockRouter, client: LlamaStackClient
- ) -> None:
+ def test_binary_content_upload_with_body_is_deprecated(self, respx_mock: MockRouter, client: OgxClient) -> None:
respx_mock.post("/upload").mock(side_effect=mirror_request_content)
file_content = b"Hello, this is a test file."
@@ -597,7 +613,7 @@ def test_binary_content_upload_with_body_is_deprecated(
assert response.content == file_content
@pytest.mark.respx(base_url=base_url)
- def test_basic_union_response(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_basic_union_response(self, respx_mock: MockRouter, client: OgxClient) -> None:
class Model1(BaseModel):
name: str
@@ -611,7 +627,7 @@ class Model2(BaseModel):
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
- def test_union_response_different_types(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_union_response_different_types(self, respx_mock: MockRouter, client: OgxClient) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
@@ -633,9 +649,7 @@ class Model2(BaseModel):
assert response.foo == 1
@pytest.mark.respx(base_url=base_url)
- def test_non_application_json_content_type_for_json_data(
- self, respx_mock: MockRouter, client: LlamaStackClient
- ) -> None:
+ def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter, client: OgxClient) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""
@@ -656,7 +670,7 @@ class Model(BaseModel):
assert response.foo == 2
def test_base_url_setter(self) -> None:
- client = LlamaStackClient(base_url="https://example.com/from_init", _strict_response_validation=True)
+ client = OgxClient(base_url="https://example.com/from_init", _strict_response_validation=True)
assert client.base_url == "https://example.com/from_init/"
client.base_url = "https://example.com/from_setter" # type: ignore[assignment]
@@ -666,15 +680,15 @@ def test_base_url_setter(self) -> None:
client.close()
def test_base_url_env(self) -> None:
- with update_env(LLAMA_STACK_CLIENT_BASE_URL="http://localhost:5000/from/env"):
- client = LlamaStackClient(_strict_response_validation=True)
+ with update_env(OGX_CLIENT_BASE_URL="http://localhost:5000/from/env"):
+ client = OgxClient(_strict_response_validation=True)
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
- LlamaStackClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
- LlamaStackClient(
+ OgxClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
+ OgxClient(
base_url="http://localhost:5000/custom/path/",
_strict_response_validation=True,
http_client=httpx.Client(),
@@ -682,7 +696,7 @@ def test_base_url_env(self) -> None:
],
ids=["standard", "custom http client"],
)
- def test_base_url_trailing_slash(self, client: LlamaStackClient) -> None:
+ def test_base_url_trailing_slash(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -696,8 +710,8 @@ def test_base_url_trailing_slash(self, client: LlamaStackClient) -> None:
@pytest.mark.parametrize(
"client",
[
- LlamaStackClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
- LlamaStackClient(
+ OgxClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
+ OgxClient(
base_url="http://localhost:5000/custom/path/",
_strict_response_validation=True,
http_client=httpx.Client(),
@@ -705,7 +719,7 @@ def test_base_url_trailing_slash(self, client: LlamaStackClient) -> None:
],
ids=["standard", "custom http client"],
)
- def test_base_url_no_trailing_slash(self, client: LlamaStackClient) -> None:
+ def test_base_url_no_trailing_slash(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -719,8 +733,8 @@ def test_base_url_no_trailing_slash(self, client: LlamaStackClient) -> None:
@pytest.mark.parametrize(
"client",
[
- LlamaStackClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
- LlamaStackClient(
+ OgxClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
+ OgxClient(
base_url="http://localhost:5000/custom/path/",
_strict_response_validation=True,
http_client=httpx.Client(),
@@ -728,7 +742,7 @@ def test_base_url_no_trailing_slash(self, client: LlamaStackClient) -> None:
],
ids=["standard", "custom http client"],
)
- def test_absolute_request_url(self, client: LlamaStackClient) -> None:
+ def test_absolute_request_url(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -740,7 +754,7 @@ def test_absolute_request_url(self, client: LlamaStackClient) -> None:
client.close()
def test_copied_client_does_not_close_http(self) -> None:
- test_client = LlamaStackClient(base_url=base_url, _strict_response_validation=True)
+ test_client = OgxClient(base_url=base_url, _strict_response_validation=True)
assert not test_client.is_closed()
copied = test_client.copy()
@@ -751,7 +765,7 @@ def test_copied_client_does_not_close_http(self) -> None:
assert not test_client.is_closed()
def test_client_context_manager(self) -> None:
- test_client = LlamaStackClient(base_url=base_url, _strict_response_validation=True)
+ test_client = OgxClient(base_url=base_url, _strict_response_validation=True)
with test_client as c2:
assert c2 is test_client
assert not c2.is_closed()
@@ -759,7 +773,7 @@ def test_client_context_manager(self) -> None:
assert test_client.is_closed()
@pytest.mark.respx(base_url=base_url)
- def test_client_response_validation_error(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_client_response_validation_error(self, respx_mock: MockRouter, client: OgxClient) -> None:
class Model(BaseModel):
foo: str
@@ -772,10 +786,10 @@ class Model(BaseModel):
def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
- LlamaStackClient(base_url=base_url, _strict_response_validation=True, max_retries=cast(Any, None))
+ OgxClient(base_url=base_url, _strict_response_validation=True, max_retries=cast(Any, None))
@pytest.mark.respx(base_url=base_url)
- def test_default_stream_cls(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_default_stream_cls(self, respx_mock: MockRouter, client: OgxClient) -> None:
class Model(BaseModel):
name: str
@@ -792,12 +806,12 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = LlamaStackClient(base_url=base_url, _strict_response_validation=True)
+ strict_client = OgxClient(base_url=base_url, _strict_response_validation=True)
with pytest.raises(APIResponseValidationError):
strict_client.get("/foo", cast_to=Model)
- non_strict_client = LlamaStackClient(base_url=base_url, _strict_response_validation=False)
+ non_strict_client = OgxClient(base_url=base_url, _strict_response_validation=False)
response = non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
@@ -828,16 +842,16 @@ class Model(BaseModel):
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
def test_parse_retry_after_header(
- self, remaining_retries: int, retry_after: str, timeout: float, client: LlamaStackClient
+ self, remaining_retries: int, retry_after: str, timeout: float, client: OgxClient
) -> None:
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: OgxClient) -> None:
respx_mock.post("/v1/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
@@ -853,9 +867,9 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien
assert _get_open_connections(client) == 0
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: OgxClient) -> None:
respx_mock.post("/v1/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
@@ -871,12 +885,12 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client
assert _get_open_connections(client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
def test_retries_taken(
self,
- client: LlamaStackClient,
+ client: OgxClient,
failures_before_success: int,
failure_mode: Literal["status", "exception"],
respx_mock: MockRouter,
@@ -910,10 +924,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_omit_retry_count_header(
- self, client: LlamaStackClient, failures_before_success: int, respx_mock: MockRouter
+ self, client: OgxClient, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = client.with_options(max_retries=4)
@@ -942,10 +956,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_overwrite_retry_count_header(
- self, client: LlamaStackClient, failures_before_success: int, respx_mock: MockRouter
+ self, client: OgxClient, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = client.with_options(max_retries=4)
@@ -1004,7 +1018,7 @@ def test_default_client_creation(self) -> None:
)
@pytest.mark.respx(base_url=base_url)
- def test_follow_redirects(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_follow_redirects(self, respx_mock: MockRouter, client: OgxClient) -> None:
# Test that the default follow_redirects=True allows following redirects
respx_mock.post("/redirect").mock(
return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
@@ -1016,7 +1030,7 @@ def test_follow_redirects(self, respx_mock: MockRouter, client: LlamaStackClient
assert response.json() == {"status": "ok"}
@pytest.mark.respx(base_url=base_url)
- def test_follow_redirects_disabled(self, respx_mock: MockRouter, client: LlamaStackClient) -> None:
+ def test_follow_redirects_disabled(self, respx_mock: MockRouter, client: OgxClient) -> None:
# Test that follow_redirects=False prevents following redirects
respx_mock.post("/redirect").mock(
return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
@@ -1029,9 +1043,9 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter, client: LlamaSt
assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
-class TestAsyncLlamaStackClient:
+class TestAsyncOgxClient:
@pytest.mark.respx(base_url=base_url)
- async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.post("/foo", cast_to=httpx.Response)
@@ -1040,7 +1054,7 @@ async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncLla
assert response.json() == {"foo": "bar"}
@pytest.mark.respx(base_url=base_url)
- async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient) -> None:
+ async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
respx_mock.post("/foo").mock(
return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}')
)
@@ -1050,11 +1064,11 @@ async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_clien
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
- def test_copy(self, async_client: AsyncLlamaStackClient) -> None:
+ def test_copy(self, async_client: AsyncOgxClient) -> None:
copied = async_client.copy()
assert id(copied) != id(async_client)
- def test_copy_default_options(self, async_client: AsyncLlamaStackClient) -> None:
+ def test_copy_default_options(self, async_client: AsyncOgxClient) -> None:
# options that have a default are overridden correctly
copied = async_client.copy(max_retries=7)
assert copied.max_retries == 7
@@ -1071,9 +1085,7 @@ def test_copy_default_options(self, async_client: AsyncLlamaStackClient) -> None
assert isinstance(async_client.timeout, httpx.Timeout)
async def test_copy_default_headers(self) -> None:
- client = AsyncLlamaStackClient(
- base_url=base_url, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
- )
+ client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True, default_headers={"X-Foo": "bar"})
assert client.default_headers["X-Foo"] == "bar"
# does not override the already given value when not specified
@@ -1106,9 +1118,7 @@ async def test_copy_default_headers(self) -> None:
await client.close()
async def test_copy_default_query(self) -> None:
- client = AsyncLlamaStackClient(
- base_url=base_url, _strict_response_validation=True, default_query={"foo": "bar"}
- )
+ client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True, default_query={"foo": "bar"})
assert _get_params(client)["foo"] == "bar"
# does not override the already given value when not specified
@@ -1143,7 +1153,7 @@ async def test_copy_default_query(self) -> None:
await client.close()
- def test_copy_signature(self, async_client: AsyncLlamaStackClient) -> None:
+ def test_copy_signature(self, async_client: AsyncOgxClient) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
@@ -1160,7 +1170,7 @@ def test_copy_signature(self, async_client: AsyncLlamaStackClient) -> None:
assert copy_param is not None, f"copy() signature is missing the {name} param"
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12")
- def test_copy_build_request(self, async_client: AsyncLlamaStackClient) -> None:
+ def test_copy_build_request(self, async_client: AsyncOgxClient) -> None:
options = FinalRequestOptions(method="get", url="/foo")
def build_request(options: FinalRequestOptions) -> None:
@@ -1200,10 +1210,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
# to_raw_response_wrapper leaks through the @functools.wraps() decorator.
#
# removing the decorator fixes the leak for reasons we don't understand.
- "llama_stack_client/_legacy_response.py",
- "llama_stack_client/_response.py",
+ "ogx_client/_legacy_response.py",
+ "ogx_client/_response.py",
# pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
- "llama_stack_client/_compat.py",
+ "ogx_client/_compat.py",
# Standard library leaks we don't care about.
"/logging/__init__.py",
]
@@ -1222,7 +1232,7 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
print(frame)
raise AssertionError()
- async def test_request_timeout(self, async_client: AsyncLlamaStackClient) -> None:
+ async def test_request_timeout(self, async_client: AsyncOgxClient) -> None:
request = async_client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
@@ -1234,7 +1244,7 @@ async def test_request_timeout(self, async_client: AsyncLlamaStackClient) -> Non
assert timeout == httpx.Timeout(100.0)
async def test_client_timeout_option(self) -> None:
- client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True, timeout=httpx.Timeout(0))
+ client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True, timeout=httpx.Timeout(0))
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -1245,7 +1255,7 @@ async def test_client_timeout_option(self) -> None:
async def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
async with httpx.AsyncClient(timeout=None) as http_client:
- client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
+ client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -1255,7 +1265,7 @@ async def test_http_client_timeout_option(self) -> None:
# no timeout given to the httpx client should not use the httpx default
async with httpx.AsyncClient() as http_client:
- client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
+ client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -1265,7 +1275,7 @@ async def test_http_client_timeout_option(self) -> None:
# explicitly passing the default timeout currently results in it being ignored
async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
- client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
+ client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True, http_client=http_client)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -1276,19 +1286,17 @@ async def test_http_client_timeout_option(self) -> None:
def test_invalid_http_client(self) -> None:
with pytest.raises(TypeError, match="Invalid `http_client` arg"):
with httpx.Client() as http_client:
- AsyncLlamaStackClient(
- base_url=base_url, _strict_response_validation=True, http_client=cast(Any, http_client)
- )
+ AsyncOgxClient(base_url=base_url, _strict_response_validation=True, http_client=cast(Any, http_client))
async def test_default_headers_option(self) -> None:
- test_client = AsyncLlamaStackClient(
+ test_client = AsyncOgxClient(
base_url=base_url, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
)
request = test_client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
- test_client2 = AsyncLlamaStackClient(
+ test_client2 = AsyncOgxClient(
base_url=base_url,
_strict_response_validation=True,
default_headers={
@@ -1304,7 +1312,7 @@ async def test_default_headers_option(self) -> None:
await test_client2.close()
async def test_default_query_option(self) -> None:
- client = AsyncLlamaStackClient(
+ client = AsyncOgxClient(
base_url=base_url, _strict_response_validation=True, default_query={"query_param": "bar"}
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1323,7 +1331,31 @@ async def test_default_query_option(self) -> None:
await client.close()
- def test_request_extra_json(self, client: LlamaStackClient) -> None:
+ async def test_hardcoded_query_params_in_url(self, async_client: AsyncOgxClient) -> None:
+ request = async_client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true"))
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/foo?beta=true",
+ params={"limit": "10", "page": "abc"},
+ )
+ )
+ url = httpx.URL(request.url)
+ assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"}
+
+ request = async_client._build_request(
+ FinalRequestOptions(
+ method="get",
+ url="/files/a%2Fb?beta=true",
+ params={"limit": "10"},
+ )
+ )
+ assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10"
+
+ def test_request_extra_json(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1357,7 +1389,7 @@ def test_request_extra_json(self, client: LlamaStackClient) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
- def test_request_extra_headers(self, client: LlamaStackClient) -> None:
+ def test_request_extra_headers(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1379,7 +1411,7 @@ def test_request_extra_headers(self, client: LlamaStackClient) -> None:
)
assert request.headers.get("X-Bar") == "false"
- def test_request_extra_query(self, client: LlamaStackClient) -> None:
+ def test_request_extra_query(self, client: OgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1420,7 +1452,7 @@ def test_request_extra_query(self, client: LlamaStackClient) -> None:
params = dict(request.url.params)
assert params == {"foo": "2"}
- def test_multipart_repeating_array(self, async_client: AsyncLlamaStackClient) -> None:
+ def test_multipart_repeating_array(self, async_client: AsyncOgxClient) -> None:
request = async_client._build_request(
FinalRequestOptions.construct(
method="post",
@@ -1450,7 +1482,7 @@ def test_multipart_repeating_array(self, async_client: AsyncLlamaStackClient) ->
]
@pytest.mark.respx(base_url=base_url)
- async def test_binary_content_upload(self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient) -> None:
+ async def test_binary_content_upload(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
respx_mock.post("/upload").mock(side_effect=mirror_request_content)
file_content = b"Hello, this is a test file."
@@ -1475,7 +1507,7 @@ async def mock_handler(request: httpx.Request) -> httpx.Response:
assert counter.value == 0, "the request body should not have been read"
return httpx.Response(200, content=await request.aread())
- async with AsyncLlamaStackClient(
+ async with AsyncOgxClient(
base_url=base_url,
_strict_response_validation=True,
http_client=httpx.AsyncClient(transport=MockTransport(handler=mock_handler)),
@@ -1494,7 +1526,7 @@ async def mock_handler(request: httpx.Request) -> httpx.Response:
@pytest.mark.respx(base_url=base_url)
async def test_binary_content_upload_with_body_is_deprecated(
- self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient
+ self, respx_mock: MockRouter, async_client: AsyncOgxClient
) -> None:
respx_mock.post("/upload").mock(side_effect=mirror_request_content)
@@ -1515,7 +1547,7 @@ async def test_binary_content_upload_with_body_is_deprecated(
assert response.content == file_content
@pytest.mark.respx(base_url=base_url)
- async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient) -> None:
+ async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
class Model1(BaseModel):
name: str
@@ -1529,9 +1561,7 @@ class Model2(BaseModel):
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
- async def test_union_response_different_types(
- self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient
- ) -> None:
+ async def test_union_response_different_types(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
@@ -1554,7 +1584,7 @@ class Model2(BaseModel):
@pytest.mark.respx(base_url=base_url)
async def test_non_application_json_content_type_for_json_data(
- self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient
+ self, respx_mock: MockRouter, async_client: AsyncOgxClient
) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
@@ -1576,7 +1606,7 @@ class Model(BaseModel):
assert response.foo == 2
async def test_base_url_setter(self) -> None:
- client = AsyncLlamaStackClient(base_url="https://example.com/from_init", _strict_response_validation=True)
+ client = AsyncOgxClient(base_url="https://example.com/from_init", _strict_response_validation=True)
assert client.base_url == "https://example.com/from_init/"
client.base_url = "https://example.com/from_setter" # type: ignore[assignment]
@@ -1586,15 +1616,15 @@ async def test_base_url_setter(self) -> None:
await client.close()
async def test_base_url_env(self) -> None:
- with update_env(LLAMA_STACK_CLIENT_BASE_URL="http://localhost:5000/from/env"):
- client = AsyncLlamaStackClient(_strict_response_validation=True)
+ with update_env(OGX_CLIENT_BASE_URL="http://localhost:5000/from/env"):
+ client = AsyncOgxClient(_strict_response_validation=True)
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
- AsyncLlamaStackClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
- AsyncLlamaStackClient(
+ AsyncOgxClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
+ AsyncOgxClient(
base_url="http://localhost:5000/custom/path/",
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
@@ -1602,7 +1632,7 @@ async def test_base_url_env(self) -> None:
],
ids=["standard", "custom http client"],
)
- async def test_base_url_trailing_slash(self, client: AsyncLlamaStackClient) -> None:
+ async def test_base_url_trailing_slash(self, client: AsyncOgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1616,8 +1646,8 @@ async def test_base_url_trailing_slash(self, client: AsyncLlamaStackClient) -> N
@pytest.mark.parametrize(
"client",
[
- AsyncLlamaStackClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
- AsyncLlamaStackClient(
+ AsyncOgxClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
+ AsyncOgxClient(
base_url="http://localhost:5000/custom/path/",
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
@@ -1625,7 +1655,7 @@ async def test_base_url_trailing_slash(self, client: AsyncLlamaStackClient) -> N
],
ids=["standard", "custom http client"],
)
- async def test_base_url_no_trailing_slash(self, client: AsyncLlamaStackClient) -> None:
+ async def test_base_url_no_trailing_slash(self, client: AsyncOgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1639,8 +1669,8 @@ async def test_base_url_no_trailing_slash(self, client: AsyncLlamaStackClient) -
@pytest.mark.parametrize(
"client",
[
- AsyncLlamaStackClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
- AsyncLlamaStackClient(
+ AsyncOgxClient(base_url="http://localhost:5000/custom/path/", _strict_response_validation=True),
+ AsyncOgxClient(
base_url="http://localhost:5000/custom/path/",
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
@@ -1648,7 +1678,7 @@ async def test_base_url_no_trailing_slash(self, client: AsyncLlamaStackClient) -
],
ids=["standard", "custom http client"],
)
- async def test_absolute_request_url(self, client: AsyncLlamaStackClient) -> None:
+ async def test_absolute_request_url(self, client: AsyncOgxClient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1660,7 +1690,7 @@ async def test_absolute_request_url(self, client: AsyncLlamaStackClient) -> None
await client.close()
async def test_copied_client_does_not_close_http(self) -> None:
- test_client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True)
+ test_client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True)
assert not test_client.is_closed()
copied = test_client.copy()
@@ -1672,7 +1702,7 @@ async def test_copied_client_does_not_close_http(self) -> None:
assert not test_client.is_closed()
async def test_client_context_manager(self) -> None:
- test_client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True)
+ test_client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True)
async with test_client as c2:
assert c2 is test_client
assert not c2.is_closed()
@@ -1680,9 +1710,7 @@ async def test_client_context_manager(self) -> None:
assert test_client.is_closed()
@pytest.mark.respx(base_url=base_url)
- async def test_client_response_validation_error(
- self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient
- ) -> None:
+ async def test_client_response_validation_error(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
class Model(BaseModel):
foo: str
@@ -1695,10 +1723,10 @@ class Model(BaseModel):
async def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
- AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True, max_retries=cast(Any, None))
+ AsyncOgxClient(base_url=base_url, _strict_response_validation=True, max_retries=cast(Any, None))
@pytest.mark.respx(base_url=base_url)
- async def test_default_stream_cls(self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient) -> None:
+ async def test_default_stream_cls(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
class Model(BaseModel):
name: str
@@ -1715,12 +1743,12 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=True)
+ strict_client = AsyncOgxClient(base_url=base_url, _strict_response_validation=True)
with pytest.raises(APIResponseValidationError):
await strict_client.get("/foo", cast_to=Model)
- non_strict_client = AsyncLlamaStackClient(base_url=base_url, _strict_response_validation=False)
+ non_strict_client = AsyncOgxClient(base_url=base_url, _strict_response_validation=False)
response = await non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
@@ -1751,17 +1779,17 @@ class Model(BaseModel):
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
async def test_parse_retry_after_header(
- self, remaining_retries: int, retry_after: str, timeout: float, async_client: AsyncLlamaStackClient
+ self, remaining_retries: int, retry_after: str, timeout: float, async_client: AsyncOgxClient
) -> None:
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers)
assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_timeout_errors_doesnt_leak(
- self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient
+ self, respx_mock: MockRouter, async_client: AsyncOgxClient
) -> None:
respx_mock.post("/v1/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
@@ -1778,10 +1806,10 @@ async def test_retrying_timeout_errors_doesnt_leak(
assert _get_open_connections(async_client) == 0
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_status_errors_doesnt_leak(
- self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient
+ self, respx_mock: MockRouter, async_client: AsyncOgxClient
) -> None:
respx_mock.post("/v1/chat/completions").mock(return_value=httpx.Response(500))
@@ -1798,12 +1826,12 @@ async def test_retrying_status_errors_doesnt_leak(
assert _get_open_connections(async_client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
async def test_retries_taken(
self,
- async_client: AsyncLlamaStackClient,
+ async_client: AsyncOgxClient,
failures_before_success: int,
failure_mode: Literal["status", "exception"],
respx_mock: MockRouter,
@@ -1837,10 +1865,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_omit_retry_count_header(
- self, async_client: AsyncLlamaStackClient, failures_before_success: int, respx_mock: MockRouter
+ self, async_client: AsyncOgxClient, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = async_client.with_options(max_retries=4)
@@ -1869,10 +1897,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("llama_stack_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("ogx_client._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_overwrite_retry_count_header(
- self, async_client: AsyncLlamaStackClient, failures_before_success: int, respx_mock: MockRouter
+ self, async_client: AsyncOgxClient, failures_before_success: int, respx_mock: MockRouter
) -> None:
client = async_client.with_options(max_retries=4)
@@ -1935,7 +1963,7 @@ async def test_default_client_creation(self) -> None:
)
@pytest.mark.respx(base_url=base_url)
- async def test_follow_redirects(self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient) -> None:
+ async def test_follow_redirects(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
# Test that the default follow_redirects=True allows following redirects
respx_mock.post("/redirect").mock(
return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
@@ -1947,7 +1975,7 @@ async def test_follow_redirects(self, respx_mock: MockRouter, async_client: Asyn
assert response.json() == {"status": "ok"}
@pytest.mark.respx(base_url=base_url)
- async def test_follow_redirects_disabled(self, respx_mock: MockRouter, async_client: AsyncLlamaStackClient) -> None:
+ async def test_follow_redirects_disabled(self, respx_mock: MockRouter, async_client: AsyncOgxClient) -> None:
# Test that follow_redirects=False prevents following redirects
respx_mock.post("/redirect").mock(
return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
deleted file mode 100644
index d63fb98d..00000000
--- a/tests/test_deepcopy.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from llama_stack_client._utils import deepcopy_minimal
-
-
-def assert_different_identities(obj1: object, obj2: object) -> None:
- assert obj1 == obj2
- assert id(obj1) != id(obj2)
-
-
-def test_simple_dict() -> None:
- obj1 = {"foo": "bar"}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_dict() -> None:
- obj1 = {"foo": {"bar": True}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
-
-
-def test_complex_nested_dict() -> None:
- obj1 = {"foo": {"bar": [{"hello": "world"}]}}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1["foo"], obj2["foo"])
- assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"])
- assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0])
-
-
-def test_simple_list() -> None:
- obj1 = ["a", "b", "c"]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
-
-
-def test_nested_list() -> None:
- obj1 = ["a", [1, 2, 3]]
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert_different_identities(obj1[1], obj2[1])
-
-
-class MyObject: ...
-
-
-def test_ignores_other_types() -> None:
- # custom classes
- my_obj = MyObject()
- obj1 = {"foo": my_obj}
- obj2 = deepcopy_minimal(obj1)
- assert_different_identities(obj1, obj2)
- assert obj1["foo"] is my_obj
-
- # tuples
- obj3 = ("a", "b")
- obj4 = deepcopy_minimal(obj3)
- assert obj3 is obj4
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index a597a04f..f0377b44 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -10,8 +10,8 @@
import pytest
-from llama_stack_client._types import FileTypes
-from llama_stack_client._utils import extract_files
+from ogx_client._types import FileTypes, ArrayFormat
+from ogx_client._utils import extract_files
def test_removes_files_from_input() -> None:
@@ -41,6 +41,12 @@ def test_multiple_files() -> None:
assert query == {"documents": [{}, {}]}
+def test_top_level_file_array() -> None:
+ query = {"files": [b"file one", b"file two"], "title": "hello"}
+ assert extract_files(query, paths=[["files", ""]]) == [("files[]", b"file one"), ("files[]", b"file two")]
+ assert query == {"title": "hello"}
+
+
@pytest.mark.parametrize(
"query,paths,expected",
[
@@ -68,3 +74,24 @@ def test_ignores_incorrect_paths(
expected: list[tuple[str, FileTypes]],
) -> None:
assert extract_files(query, paths=paths) == expected
+
+
+@pytest.mark.parametrize(
+ "array_format,expected_top_level,expected_nested",
+ [
+ ("brackets", [("files[]", b"a"), ("files[]", b"b")], [("items[][file]", b"a"), ("items[][file]", b"b")]),
+ ("repeat", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("comma", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("indices", [("files[0]", b"a"), ("files[1]", b"b")], [("items[0][file]", b"a"), ("items[1][file]", b"b")]),
+ ],
+)
+def test_array_format_controls_file_field_names(
+ array_format: ArrayFormat,
+ expected_top_level: list[tuple[str, FileTypes]],
+ expected_nested: list[tuple[str, FileTypes]],
+) -> None:
+ top_level = {"files": [b"a", b"b"]}
+ assert extract_files(top_level, paths=[["files", ""]], array_format=array_format) == expected_top_level
+
+ nested = {"items": [{"file": b"a"}, {"file": b"b"}]}
+ assert extract_files(nested, paths=[["items", "", "file"]], array_format=array_format) == expected_nested
diff --git a/tests/test_files.py b/tests/test_files.py
index 91008825..ea02a4b0 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -10,7 +10,8 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from llama_stack_client._files import to_httpx_files, async_to_httpx_files
+from ogx_client._files import to_httpx_files, deepcopy_with_paths, async_to_httpx_files
+from ogx_client._utils import extract_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
@@ -55,3 +56,99 @@ def test_string_not_allowed() -> None:
"file": "foo", # type: ignore
}
)
+
+
+def assert_different_identities(obj1: object, obj2: object) -> None:
+ assert obj1 == obj2
+ assert obj1 is not obj2
+
+
+class TestDeepcopyWithPaths:
+ def test_copies_top_level_dict(self) -> None:
+ original = {"file": b"data", "other": "value"}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+
+ def test_file_value_is_same_reference(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes}
+ result = deepcopy_with_paths(original, [["file"]])
+ assert_different_identities(result, original)
+ assert result["file"] is file_bytes
+
+ def test_list_popped_wholesale(self) -> None:
+ files = [b"f1", b"f2"]
+ original = {"files": files, "title": "t"}
+ result = deepcopy_with_paths(original, [["files", ""]])
+ assert_different_identities(result, original)
+ result_files = result["files"]
+ assert isinstance(result_files, list)
+ assert_different_identities(result_files, files)
+
+ def test_nested_array_path_copies_list_and_elements(self) -> None:
+ elem1 = {"file": b"f1", "extra": 1}
+ elem2 = {"file": b"f2", "extra": 2}
+ original = {"items": [elem1, elem2]}
+ result = deepcopy_with_paths(original, [["items", "", "file"]])
+ assert_different_identities(result, original)
+ result_items = result["items"]
+ assert isinstance(result_items, list)
+ assert_different_identities(result_items, original["items"])
+ assert_different_identities(result_items[0], elem1)
+ assert_different_identities(result_items[1], elem2)
+
+ def test_empty_paths_returns_same_object(self) -> None:
+ original = {"foo": "bar"}
+ result = deepcopy_with_paths(original, [])
+ assert result is original
+
+ def test_multiple_paths(self) -> None:
+ f1 = b"file1"
+ f2 = b"file2"
+ original = {"a": f1, "b": f2, "c": "unchanged"}
+ result = deepcopy_with_paths(original, [["a"], ["b"]])
+ assert_different_identities(result, original)
+ assert result["a"] is f1
+ assert result["b"] is f2
+ assert result["c"] is original["c"]
+
+ def test_extract_files_does_not_mutate_original_top_level(self) -> None:
+ file_bytes = b"contents"
+ original = {"file": file_bytes, "other": "value"}
+
+ copied = deepcopy_with_paths(original, [["file"]])
+ extracted = extract_files(copied, paths=[["file"]])
+
+ assert extracted == [("file", file_bytes)]
+ assert original == {"file": file_bytes, "other": "value"}
+ assert copied == {"other": "value"}
+
+ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
+ file1 = b"f1"
+ file2 = b"f2"
+ original = {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+
+ copied = deepcopy_with_paths(original, [["items", "", "file"]])
+ extracted = extract_files(copied, paths=[["items", "", "file"]])
+
+ assert [entry for _, entry in extracted] == [file1, file2]
+ assert original == {
+ "items": [
+ {"file": file1, "extra": 1},
+ {"file": file2, "extra": 2},
+ ],
+ "title": "example",
+ }
+ assert copied == {
+ "items": [
+ {"extra": 1},
+ {"extra": 2},
+ ],
+ "title": "example",
+ }
diff --git a/tests/test_models.py b/tests/test_models.py
index 790cabf9..5e379c35 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -13,9 +13,9 @@
import pydantic
from pydantic import Field
-from llama_stack_client._utils import PropertyInfo
-from llama_stack_client._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
-from llama_stack_client._models import DISCRIMINATOR_CACHE, BaseModel, construct_type
+from ogx_client._utils import PropertyInfo
+from ogx_client._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
+from ogx_client._models import DISCRIMINATOR_CACHE, BaseModel, construct_type
class BasicModel(BaseModel):
diff --git a/tests/test_qs.py b/tests/test_qs.py
index 838c4549..9447f7e6 100644
--- a/tests/test_qs.py
+++ b/tests/test_qs.py
@@ -10,7 +10,7 @@
import pytest
-from llama_stack_client._qs import Querystring, stringify
+from ogx_client._qs import Querystring, stringify
def test_empty() -> None:
diff --git a/tests/test_required_args.py b/tests/test_required_args.py
index e01f4d70..2e48e9af 100644
--- a/tests/test_required_args.py
+++ b/tests/test_required_args.py
@@ -8,7 +8,7 @@
import pytest
-from llama_stack_client._utils import required_args
+from ogx_client._utils import required_args
def test_too_many_positional_params() -> None:
diff --git a/tests/test_response.py b/tests/test_response.py
index bf3796ed..34c3e163 100644
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -12,8 +12,8 @@
import pytest
import pydantic
-from llama_stack_client import BaseModel, LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client._response import (
+from ogx_client import BaseModel, OgxClient, AsyncOgxClient
+from ogx_client._response import (
APIResponse,
BaseAPIResponse,
AsyncAPIResponse,
@@ -21,8 +21,8 @@
AsyncBinaryAPIResponse,
extract_response_type,
)
-from llama_stack_client._streaming import Stream
-from llama_stack_client._base_client import FinalRequestOptions
+from ogx_client._streaming import Stream
+from ogx_client._base_client import FinalRequestOptions
class ConcreteBaseAPIResponse(APIResponse[bytes]): ...
@@ -43,7 +43,7 @@ def test_extract_response_type_direct_classes() -> None:
def test_extract_response_type_direct_class_missing_type_arg() -> None:
with pytest.raises(
RuntimeError,
- match="Expected type to have a type argument at index 0 but it did not",
+ match="Expected type to have a type argument at index 0 but it did not",
):
extract_response_type(AsyncAPIResponse)
@@ -62,7 +62,7 @@ def test_extract_response_type_binary_response() -> None:
class PydanticModel(pydantic.BaseModel): ...
-def test_response_parse_mismatched_basemodel(client: LlamaStackClient) -> None:
+def test_response_parse_mismatched_basemodel(client: OgxClient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
@@ -74,13 +74,13 @@ def test_response_parse_mismatched_basemodel(client: LlamaStackClient) -> None:
with pytest.raises(
TypeError,
- match="Pydantic models must subclass our base model type, e.g. `from llama_stack_client import BaseModel`",
+ match="Pydantic models must subclass our base model type, e.g. `from ogx_client import BaseModel`",
):
response.parse(to=PydanticModel)
@pytest.mark.asyncio
-async def test_async_response_parse_mismatched_basemodel(async_client: AsyncLlamaStackClient) -> None:
+async def test_async_response_parse_mismatched_basemodel(async_client: AsyncOgxClient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
@@ -92,12 +92,12 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncLlam
with pytest.raises(
TypeError,
- match="Pydantic models must subclass our base model type, e.g. `from llama_stack_client import BaseModel`",
+ match="Pydantic models must subclass our base model type, e.g. `from ogx_client import BaseModel`",
):
await response.parse(to=PydanticModel)
-def test_response_parse_custom_stream(client: LlamaStackClient) -> None:
+def test_response_parse_custom_stream(client: OgxClient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
@@ -112,7 +112,7 @@ def test_response_parse_custom_stream(client: LlamaStackClient) -> None:
@pytest.mark.asyncio
-async def test_async_response_parse_custom_stream(async_client: AsyncLlamaStackClient) -> None:
+async def test_async_response_parse_custom_stream(async_client: AsyncOgxClient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=async_client,
@@ -131,7 +131,7 @@ class CustomModel(BaseModel):
bar: int
-def test_response_parse_custom_model(client: LlamaStackClient) -> None:
+def test_response_parse_custom_model(client: OgxClient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
@@ -147,7 +147,7 @@ def test_response_parse_custom_model(client: LlamaStackClient) -> None:
@pytest.mark.asyncio
-async def test_async_response_parse_custom_model(async_client: AsyncLlamaStackClient) -> None:
+async def test_async_response_parse_custom_model(async_client: AsyncOgxClient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=async_client,
@@ -162,7 +162,7 @@ async def test_async_response_parse_custom_model(async_client: AsyncLlamaStackCl
assert obj.bar == 2
-def test_response_parse_annotated_type(client: LlamaStackClient) -> None:
+def test_response_parse_annotated_type(client: OgxClient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
@@ -179,7 +179,7 @@ def test_response_parse_annotated_type(client: LlamaStackClient) -> None:
assert obj.bar == 2
-async def test_async_response_parse_annotated_type(async_client: AsyncLlamaStackClient) -> None:
+async def test_async_response_parse_annotated_type(async_client: AsyncOgxClient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=async_client,
@@ -207,7 +207,7 @@ async def test_async_response_parse_annotated_type(async_client: AsyncLlamaStack
("FalSe", False),
],
)
-def test_response_parse_bool(client: LlamaStackClient, content: str, expected: bool) -> None:
+def test_response_parse_bool(client: OgxClient, content: str, expected: bool) -> None:
response = APIResponse(
raw=httpx.Response(200, content=content),
client=client,
@@ -232,7 +232,7 @@ def test_response_parse_bool(client: LlamaStackClient, content: str, expected: b
("FalSe", False),
],
)
-async def test_async_response_parse_bool(client: AsyncLlamaStackClient, content: str, expected: bool) -> None:
+async def test_async_response_parse_bool(client: AsyncOgxClient, content: str, expected: bool) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=content),
client=client,
@@ -251,7 +251,7 @@ class OtherModel(BaseModel):
@pytest.mark.parametrize("client", [False], indirect=True) # loose validation
-def test_response_parse_expect_model_union_non_json_content(client: LlamaStackClient) -> None:
+def test_response_parse_expect_model_union_non_json_content(client: OgxClient) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=client,
@@ -268,7 +268,7 @@ def test_response_parse_expect_model_union_non_json_content(client: LlamaStackCl
@pytest.mark.asyncio
@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation
-async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncLlamaStackClient) -> None:
+async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncOgxClient) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=async_client,
diff --git a/tests/test_streaming.py b/tests/test_streaming.py
index e41edce5..907653b0 100644
--- a/tests/test_streaming.py
+++ b/tests/test_streaming.py
@@ -11,13 +11,13 @@
import httpx
import pytest
-from llama_stack_client import LlamaStackClient, AsyncLlamaStackClient
-from llama_stack_client._streaming import Stream, AsyncStream, ServerSentEvent
+from ogx_client import OgxClient, AsyncOgxClient
+from ogx_client._streaming import Stream, AsyncStream, ServerSentEvent
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_basic(sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient) -> None:
+async def test_basic(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b"event: completion\n"
yield b'data: {"foo":true}\n'
@@ -34,7 +34,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_data_missing_event(sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient) -> None:
+async def test_data_missing_event(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b'data: {"foo":true}\n'
yield b"\n"
@@ -50,7 +50,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_event_missing_data(sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient) -> None:
+async def test_event_missing_data(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"\n"
@@ -66,7 +66,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_events(sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient) -> None:
+async def test_multiple_events(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"\n"
@@ -88,9 +88,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_events_with_data(
- sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient
-) -> None:
+async def test_multiple_events_with_data(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b'data: {"foo":true}\n'
@@ -114,9 +112,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_data_lines_with_empty_line(
- sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient
-) -> None:
+async def test_multiple_data_lines_with_empty_line(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"data: {\n"
@@ -138,9 +134,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_data_json_escaped_double_new_line(
- sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient
-) -> None:
+async def test_data_json_escaped_double_new_line(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b'data: {"foo": "my long\\n\\ncontent"}'
@@ -157,7 +151,7 @@ def body() -> Iterator[bytes]:
@pytest.mark.asyncio
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
-async def test_multiple_data_lines(sync: bool, client: LlamaStackClient, async_client: AsyncLlamaStackClient) -> None:
+async def test_multiple_data_lines(sync: bool, client: OgxClient, async_client: AsyncOgxClient) -> None:
def body() -> Iterator[bytes]:
yield b"event: ping\n"
yield b"data: {\n"
@@ -177,8 +171,8 @@ def body() -> Iterator[bytes]:
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
async def test_special_new_line_character(
sync: bool,
- client: LlamaStackClient,
- async_client: AsyncLlamaStackClient,
+ client: OgxClient,
+ async_client: AsyncOgxClient,
) -> None:
def body() -> Iterator[bytes]:
yield b'data: {"content":" culpa"}\n'
@@ -208,8 +202,8 @@ def body() -> Iterator[bytes]:
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
async def test_multi_byte_character_multiple_chunks(
sync: bool,
- client: LlamaStackClient,
- async_client: AsyncLlamaStackClient,
+ client: OgxClient,
+ async_client: AsyncOgxClient,
) -> None:
def body() -> Iterator[bytes]:
yield b'data: {"content":"'
@@ -249,8 +243,8 @@ def make_event_iterator(
content: Iterator[bytes],
*,
sync: bool,
- client: LlamaStackClient,
- async_client: AsyncLlamaStackClient,
+ client: OgxClient,
+ async_client: AsyncOgxClient,
) -> Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]:
if sync:
return Stream(cast_to=object, client=client, response=httpx.Response(200, content=content))._iter_events()
diff --git a/tests/test_transform.py b/tests/test_transform.py
index d309e42c..bc70e9f5 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -14,15 +14,15 @@
import pytest
-from llama_stack_client._types import Base64FileInput, omit, not_given
-from llama_stack_client._utils import (
+from ogx_client._types import Base64FileInput, omit, not_given
+from ogx_client._utils import (
PropertyInfo,
transform as _transform,
parse_datetime,
async_transform as _async_transform,
)
-from llama_stack_client._compat import PYDANTIC_V1
-from llama_stack_client._models import BaseModel
+from ogx_client._compat import PYDANTIC_V1
+from ogx_client._models import BaseModel
_T = TypeVar("_T")
diff --git a/tests/test_utils/test_datetime_parse.py b/tests/test_utils/test_datetime_parse.py
index 9a3f30ed..53b1c320 100644
--- a/tests/test_utils/test_datetime_parse.py
+++ b/tests/test_utils/test_datetime_parse.py
@@ -14,7 +14,7 @@
import pytest
-from llama_stack_client._utils import parse_date, parse_datetime
+from ogx_client._utils import parse_date, parse_datetime
def create_tz(minutes: int) -> timezone:
diff --git a/tests/test_utils/test_json.py b/tests/test_utils/test_json.py
index c86a4e4c..b901338b 100644
--- a/tests/test_utils/test_json.py
+++ b/tests/test_utils/test_json.py
@@ -11,8 +11,8 @@
import pydantic
-from llama_stack_client import _compat
-from llama_stack_client._utils._json import openapi_dumps
+from ogx_client import _compat
+from ogx_client._utils._json import openapi_dumps
class TestOpenapiDumps:
diff --git a/tests/test_utils/test_path.py b/tests/test_utils/test_path.py
index cd223d37..31731fb7 100644
--- a/tests/test_utils/test_path.py
+++ b/tests/test_utils/test_path.py
@@ -10,7 +10,7 @@
import pytest
-from llama_stack_client._utils._path import path_template
+from ogx_client._utils._path import path_template
@pytest.mark.parametrize(
diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py
index c99cdc11..7613e853 100644
--- a/tests/test_utils/test_proxy.py
+++ b/tests/test_utils/test_proxy.py
@@ -8,7 +8,7 @@
from typing import Any
from typing_extensions import override
-from llama_stack_client._utils import LazyProxy
+from ogx_client._utils import LazyProxy
class RecursiveLazyProxy(LazyProxy[Any]):
diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py
index 07ce18a2..8223086a 100644
--- a/tests/test_utils/test_typing.py
+++ b/tests/test_utils/test_typing.py
@@ -8,7 +8,7 @@
from typing import Generic, TypeVar, cast
-from llama_stack_client._utils import extract_type_var_from_base
+from ogx_client._utils import extract_type_var_from_base
_T = TypeVar("_T")
_T2 = TypeVar("_T2")
diff --git a/tests/utils.py b/tests/utils.py
index 82710945..2558e2da 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -14,8 +14,8 @@
from datetime import date, datetime
from typing_extensions import Literal, get_args, get_origin, assert_type
-from llama_stack_client._types import Omit, NoneType
-from llama_stack_client._utils import (
+from ogx_client._types import Omit, NoneType
+from ogx_client._utils import (
is_dict,
is_list,
is_list_type,
@@ -25,8 +25,8 @@
is_annotated_type,
is_type_alias_type,
)
-from llama_stack_client._compat import PYDANTIC_V1, field_outer_type, get_model_fields
-from llama_stack_client._models import BaseModel
+from ogx_client._compat import PYDANTIC_V1, field_outer_type, get_model_fields
+from ogx_client._models import BaseModel
BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
diff --git a/uv.lock b/uv.lock
index 857e7067..838aa116 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1,17 +1,18 @@
version = 1
revision = 3
-requires-python = ">=3.12"
+requires-python = ">=3.9"
resolution-markers = [
- "python_full_version >= '3.14' and sys_platform == 'win32'",
- "python_full_version >= '3.14' and sys_platform == 'emscripten'",
- "python_full_version >= '3.14' and sys_platform != 'emscripten' and sys_platform != 'win32'",
- "python_full_version < '3.14' and sys_platform == 'win32'",
- "python_full_version < '3.14' and sys_platform == 'emscripten'",
- "python_full_version < '3.14' and sys_platform != 'emscripten' and sys_platform != 'win32'",
+ "python_full_version >= '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version < '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version < '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version < '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
]
conflicts = [[
- { package = "llama-stack-client", group = "pydantic-v1" },
- { package = "llama-stack-client", group = "pydantic-v2" },
+ { package = "ogx-client", group = "pydantic-v1" },
+ { package = "ogx-client", group = "pydantic-v2" },
]]
[[package]]
@@ -30,6 +31,7 @@ source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "aiohappyeyeballs" },
{ name = "aiosignal" },
+ { name = "async-timeout", marker = "python_full_version < '3.11' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
{ name = "attrs" },
{ name = "frozenlist" },
{ name = "multidict" },
@@ -38,6 +40,40 @@ dependencies = [
]
sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/36/d6/5aec9313ee6ea9c7cde8b891b69f4ff4001416867104580670a31daeba5b/aiohttp-3.13.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5a372fd5afd301b3a89582817fdcdb6c34124787c70dbcc616f259013e7eef7", size = 738950, upload-time = "2026-01-03T17:29:13.002Z" },
+ { url = "https://files.pythonhosted.org/packages/68/03/8fa90a7e6d11ff20a18837a8e2b5dd23db01aabc475aa9271c8ad33299f5/aiohttp-3.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:147e422fd1223005c22b4fe080f5d93ced44460f5f9c105406b753612b587821", size = 496099, upload-time = "2026-01-03T17:29:15.268Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/23/b81f744d402510a8366b74eb420fc0cc1170d0c43daca12d10814df85f10/aiohttp-3.13.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:859bd3f2156e81dd01432f5849fc73e2243d4a487c4fd26609b1299534ee1845", size = 491072, upload-time = "2026-01-03T17:29:16.922Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/e1/56d1d1c0dd334cd203dd97706ce004c1aa24b34a813b0b8daf3383039706/aiohttp-3.13.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dca68018bf48c251ba17c72ed479f4dafe9dbd5a73707ad8d28a38d11f3d42af", size = 1671588, upload-time = "2026-01-03T17:29:18.539Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/34/8d7f962604f4bc2b4e39eb1220dac7d4e4cba91fb9ba0474b4ecd67db165/aiohttp-3.13.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fee0c6bc7db1de362252affec009707a17478a00ec69f797d23ca256e36d5940", size = 1640334, upload-time = "2026-01-03T17:29:21.028Z" },
+ { url = "https://files.pythonhosted.org/packages/94/1d/fcccf2c668d87337ddeef9881537baee13c58d8f01f12ba8a24215f2b804/aiohttp-3.13.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c048058117fd649334d81b4b526e94bde3ccaddb20463a815ced6ecbb7d11160", size = 1722656, upload-time = "2026-01-03T17:29:22.531Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/98/c6f3b081c4c606bc1e5f2ec102e87d6411c73a9ef3616fea6f2d5c98c062/aiohttp-3.13.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:215a685b6fbbfcf71dfe96e3eba7a6f58f10da1dfdf4889c7dd856abe430dca7", size = 1817625, upload-time = "2026-01-03T17:29:24.276Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/c0/cfcc3d2e11b477f86e1af2863f3858c8850d751ce8dc39c4058a072c9e54/aiohttp-3.13.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2c184bb1fe2cbd2cefba613e9db29a5ab559323f994b6737e370d3da0ac455", size = 1672604, upload-time = "2026-01-03T17:29:26.099Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/77/6b4ffcbcac4c6a5d041343a756f34a6dd26174ae07f977a64fe028dda5b0/aiohttp-3.13.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:75ca857eba4e20ce9f546cd59c7007b33906a4cd48f2ff6ccf1ccfc3b646f279", size = 1554370, upload-time = "2026-01-03T17:29:28.121Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/f0/e3ddfa93f17d689dbe014ba048f18e0c9f9b456033b70e94349a2e9048be/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81e97251d9298386c2b7dbeb490d3d1badbdc69107fb8c9299dd04eb39bddc0e", size = 1642023, upload-time = "2026-01-03T17:29:30.002Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/45/c14019c9ec60a8e243d06d601b33dcc4fd92379424bde3021725859d7f99/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c0e2d366af265797506f0283487223146af57815b388623f0357ef7eac9b209d", size = 1649680, upload-time = "2026-01-03T17:29:31.782Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/fd/09c9451dae5aa5c5ed756df95ff9ef549d45d4be663bafd1e4954fd836f0/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4e239d501f73d6db1522599e14b9b321a7e3b1de66ce33d53a765d975e9f4808", size = 1692407, upload-time = "2026-01-03T17:29:33.392Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/81/938bc2ec33c10efd6637ccb3d22f9f3160d08e8f3aa2587a2c2d5ab578eb/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0db318f7a6f065d84cb1e02662c526294450b314a02bd9e2a8e67f0d8564ce40", size = 1543047, upload-time = "2026-01-03T17:29:34.855Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/23/80488ee21c8d567c83045e412e1d9b7077d27171591a4eb7822586e8c06a/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:bfc1cc2fe31a6026a8a88e4ecfb98d7f6b1fec150cfd708adbfd1d2f42257c29", size = 1715264, upload-time = "2026-01-03T17:29:36.389Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/83/259a8da6683182768200b368120ab3deff5370bed93880fb9a3a86299f34/aiohttp-3.13.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af71fff7bac6bb7508956696dce8f6eec2bbb045eceb40343944b1ae62b5ef11", size = 1657275, upload-time = "2026-01-03T17:29:38.162Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/4f/2c41f800a0b560785c10fb316216ac058c105f9be50bdc6a285de88db625/aiohttp-3.13.3-cp310-cp310-win32.whl", hash = "sha256:37da61e244d1749798c151421602884db5270faf479cf0ef03af0ff68954c9dd", size = 434053, upload-time = "2026-01-03T17:29:40.074Z" },
+ { url = "https://files.pythonhosted.org/packages/80/df/29cd63c7ecfdb65ccc12f7d808cac4fa2a19544660c06c61a4a48462de0c/aiohttp-3.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:7e63f210bc1b57ef699035f2b4b6d9ce096b5914414a49b0997c839b2bd2223c", size = 456687, upload-time = "2026-01-03T17:29:41.819Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/4c/a164164834f03924d9a29dc3acd9e7ee58f95857e0b467f6d04298594ebb/aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b", size = 746051, upload-time = "2026-01-03T17:29:43.287Z" },
+ { url = "https://files.pythonhosted.org/packages/82/71/d5c31390d18d4f58115037c432b7e0348c60f6f53b727cad33172144a112/aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64", size = 499234, upload-time = "2026-01-03T17:29:44.822Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/c9/741f8ac91e14b1d2e7100690425a5b2b919a87a5075406582991fb7de920/aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea", size = 494979, upload-time = "2026-01-03T17:29:46.405Z" },
+ { url = "https://files.pythonhosted.org/packages/75/b5/31d4d2e802dfd59f74ed47eba48869c1c21552c586d5e81a9d0d5c2ad640/aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a", size = 1748297, upload-time = "2026-01-03T17:29:48.083Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/3e/eefad0ad42959f226bb79664826883f2687d602a9ae2941a18e0484a74d3/aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540", size = 1707172, upload-time = "2026-01-03T17:29:49.648Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/3a/54a64299fac2891c346cdcf2aa6803f994a2e4beeaf2e5a09dcc54acc842/aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b", size = 1805405, upload-time = "2026-01-03T17:29:51.244Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/70/ddc1b7169cf64075e864f64595a14b147a895a868394a48f6a8031979038/aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3", size = 1899449, upload-time = "2026-01-03T17:29:53.938Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/7e/6815aab7d3a56610891c76ef79095677b8b5be6646aaf00f69b221765021/aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1", size = 1748444, upload-time = "2026-01-03T17:29:55.484Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/f2/073b145c4100da5511f457dc0f7558e99b2987cf72600d42b559db856fbc/aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3", size = 1606038, upload-time = "2026-01-03T17:29:57.179Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/c1/778d011920cae03ae01424ec202c513dc69243cf2db303965615b81deeea/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440", size = 1724156, upload-time = "2026-01-03T17:29:58.914Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/cb/3419eabf4ec1e9ec6f242c32b689248365a1cf621891f6f0386632525494/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7", size = 1722340, upload-time = "2026-01-03T17:30:01.962Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/e5/76cf77bdbc435bf233c1f114edad39ed4177ccbfab7c329482b179cff4f4/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c", size = 1783041, upload-time = "2026-01-03T17:30:03.609Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/d4/dd1ca234c794fd29c057ce8c0566b8ef7fd6a51069de5f06fa84b9a1971c/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51", size = 1596024, upload-time = "2026-01-03T17:30:05.132Z" },
+ { url = "https://files.pythonhosted.org/packages/55/58/4345b5f26661a6180afa686c473620c30a66afdf120ed3dd545bbc809e85/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4", size = 1804590, upload-time = "2026-01-03T17:30:07.135Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/06/05950619af6c2df7e0a431d889ba2813c9f0129cec76f663e547a5ad56f2/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29", size = 1740355, upload-time = "2026-01-03T17:30:09.083Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/80/958f16de79ba0422d7c1e284b2abd0c84bc03394fbe631d0a39ffa10e1eb/aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239", size = 433701, upload-time = "2026-01-03T17:30:10.869Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/f2/27cdf04c9851712d6c1b99df6821a6623c3c9e55956d4b1e318c337b5a48/aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f", size = 457678, upload-time = "2026-01-03T17:30:12.719Z" },
{ url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" },
{ url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" },
{ url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" },
@@ -106,6 +142,23 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3c/5f/24155e30ba7f8c96918af1350eb0663e2430aad9e001c0489d89cd708ab1/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa", size = 1769801, upload-time = "2026-01-03T17:32:20.25Z" },
{ url = "https://files.pythonhosted.org/packages/eb/f8/7314031ff5c10e6ece114da79b338ec17eeff3a079e53151f7e9f43c4723/aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767", size = 466523, upload-time = "2026-01-03T17:32:22.215Z" },
{ url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/79/446655656861d3e7e2c32bfcf160c7aa9e9dc63776a691b124dba65cdd77/aiohttp-3.13.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31a83ea4aead760dfcb6962efb1d861db48c34379f2ff72db9ddddd4cda9ea2e", size = 741433, upload-time = "2026-01-03T17:32:26.453Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/49/773c4b310b5140d2fb5e79bb0bf40b7b41dad80a288ca1a8759f5f72bda9/aiohttp-3.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:988a8c5e317544fdf0d39871559e67b6341065b87fceac641108c2096d5506b7", size = 497332, upload-time = "2026-01-03T17:32:28.37Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/31/1dcbc4b83a4e6f76a0ad883f07f21ffbfe29750c89db97381701508c9f45/aiohttp-3.13.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b174f267b5cfb9a7dba9ee6859cecd234e9a681841eb85068059bc867fb8f02", size = 492365, upload-time = "2026-01-03T17:32:30.234Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/b5/b50657496c8754482cd7964e50aaf3aa84b3db61ed45daec4c1aec5b94b4/aiohttp-3.13.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:947c26539750deeaee933b000fb6517cc770bbd064bad6033f1cff4803881e43", size = 1660440, upload-time = "2026-01-03T17:32:32.586Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/73/9b69e5139d89d75127569298931444ad78ea86a5befd5599780b1e9a6880/aiohttp-3.13.3-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9ebf57d09e131f5323464bd347135a88622d1c0976e88ce15b670e7ad57e4bd6", size = 1632740, upload-time = "2026-01-03T17:32:34.793Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/fe/3ea9b5af694b4e3aec0d0613a806132ca744747146fca68e96bf056f61a7/aiohttp-3.13.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4ae5b5a0e1926e504c81c5b84353e7a5516d8778fbbff00429fe7b05bb25cbce", size = 1719782, upload-time = "2026-01-03T17:32:37.737Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/c2/46b3b06e60851cbb71efb0f79a3267279cbef7b12c58e68a1e897f269cca/aiohttp-3.13.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2ba0eea45eb5cc3172dbfc497c066f19c41bac70963ea1a67d51fc92e4cf9a80", size = 1813527, upload-time = "2026-01-03T17:32:39.973Z" },
+ { url = "https://files.pythonhosted.org/packages/36/23/71ceb78c769ed65fe4c697692de232b63dab399210678d2b00961ccb0619/aiohttp-3.13.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bae5c2ed2eae26cc382020edad80d01f36cb8e746da40b292e68fec40421dc6a", size = 1661268, upload-time = "2026-01-03T17:32:42.082Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/8d/86e929523d955e85ebab7c0e2b9e0cb63604cfc27dc3280e10d0063cf682/aiohttp-3.13.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8a60e60746623925eab7d25823329941aee7242d559baa119ca2b253c88a7bd6", size = 1552742, upload-time = "2026-01-03T17:32:44.622Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/ea/3f5987cba1bab6bd151f0d97aa60f0ce04d3c83316692a6bb6ba2fb69f92/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e50a2e1404f063427c9d027378472316201a2290959a295169bcf25992d04558", size = 1632918, upload-time = "2026-01-03T17:32:46.749Z" },
+ { url = "https://files.pythonhosted.org/packages/be/2c/7e1e85121f2e31ee938cb83a8f32dfafd4908530c10fabd6d46761c12ac7/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:9a9dc347e5a3dc7dfdbc1f82da0ef29e388ddb2ed281bfce9dd8248a313e62b7", size = 1644446, upload-time = "2026-01-03T17:32:49.063Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/35/ce6133d423ad0e8ca976a7c848f7146bca3520eea4ccf6b95e2d077c9d20/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b46020d11d23fe16551466c77823df9cc2f2c1e63cc965daf67fa5eec6ca1877", size = 1689487, upload-time = "2026-01-03T17:32:51.113Z" },
+ { url = "https://files.pythonhosted.org/packages/50/f7/ff7a27c15603d460fd1366b3c22054f7ae4fa9310aca40b43bde35867fcd/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:69c56fbc1993fa17043e24a546959c0178fe2b5782405ad4559e6c13975c15e3", size = 1540715, upload-time = "2026-01-03T17:32:53.38Z" },
+ { url = "https://files.pythonhosted.org/packages/17/02/053f11346e5b962e6d8a1c4f8c70c29d5970a1b4b8e7894c68e12c27a57f/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b99281b0704c103d4e11e72a76f1b543d4946fea7dd10767e7e1b5f00d4e5704", size = 1711835, upload-time = "2026-01-03T17:32:56.088Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/71/9b9761ddf276fd6708d13720197cbac19b8d67ecfa9116777924056cfcaa/aiohttp-3.13.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:40c5e40ecc29ba010656c18052b877a1c28f84344825efa106705e835c28530f", size = 1649593, upload-time = "2026-01-03T17:32:58.181Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/72/5d817e9ea218acae12a5e3b9ad1178cf0c12fc3570c0b47eea2daf95f9ea/aiohttp-3.13.3-cp39-cp39-win32.whl", hash = "sha256:56339a36b9f1fc708260c76c87e593e2afb30d26de9ae1eb445b5e051b98a7a1", size = 434831, upload-time = "2026-01-03T17:33:00.577Z" },
+ { url = "https://files.pythonhosted.org/packages/39/cb/22659d9bf3149b7a2927bc2769cc9c8f8f5a80eba098398e03c199a43a85/aiohttp-3.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:c6b8568a3bb5819a0ad087f16d40e5a3fb6099f39ea1d5625a3edc1e923fc538", size = 457697, upload-time = "2026-01-03T17:33:03.167Z" },
]
[[package]]
@@ -114,7 +167,7 @@ version = "1.4.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "frozenlist" },
- { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" }
wheels = [
@@ -135,14 +188,24 @@ name = "anyio"
version = "4.12.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
+ { name = "exceptiongroup", marker = "python_full_version < '3.11' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
{ name = "idna" },
- { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
]
+[[package]]
+name = "async-timeout"
+version = "5.0.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a5/ae/136395dfbfe00dfc94da3f3e136d0b13f394cba8f4841120e34226265780/async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3", size = 9274, upload-time = "2024-11-06T16:41:39.6Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fe/ba/e2081de779ca30d473f21f5b30e0e737c438205440784c7dfc81efc2b029/async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c", size = 6233, upload-time = "2024-11-06T16:41:37.9Z" },
+]
+
[[package]]
name = "attrs"
version = "25.4.0"
@@ -153,35 +216,12 @@ wheels = [
]
[[package]]
-name = "black"
-version = "26.3.1"
+name = "backports-asyncio-runner"
+version = "1.2.0"
source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "click" },
- { name = "mypy-extensions" },
- { name = "packaging" },
- { name = "pathspec" },
- { name = "platformdirs" },
- { name = "pytokens" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/e1/c5/61175d618685d42b005847464b8fb4743a67b1b8fdb75e50e5a96c31a27a/black-26.3.1.tar.gz", hash = "sha256:2c50f5063a9641c7eed7795014ba37b0f5fa227f3d408b968936e24bc0566b07", size = 666155, upload-time = "2026-03-12T03:36:03.593Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/dc/f8/da5eae4fc75e78e6dceb60624e1b9662ab00d6b452996046dfa9b8a6025b/black-26.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e6f89631eb88a7302d416594a32faeee9fb8fb848290da9d0a5f2903519fc1", size = 1895920, upload-time = "2026-03-12T03:40:13.921Z" },
- { url = "https://files.pythonhosted.org/packages/2c/9f/04e6f26534da2e1629b2b48255c264cabf5eedc5141d04516d9d68a24111/black-26.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41cd2012d35b47d589cb8a16faf8a32ef7a336f56356babd9fcf70939ad1897f", size = 1718499, upload-time = "2026-03-12T03:40:15.239Z" },
- { url = "https://files.pythonhosted.org/packages/04/91/a5935b2a63e31b331060c4a9fdb5a6c725840858c599032a6f3aac94055f/black-26.3.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f76ff19ec5297dd8e66eb64deda23631e642c9393ab592826fd4bdc97a4bce7", size = 1794994, upload-time = "2026-03-12T03:40:17.124Z" },
- { url = "https://files.pythonhosted.org/packages/e7/0a/86e462cdd311a3c2a8ece708d22aba17d0b2a0d5348ca34b40cdcbea512e/black-26.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:ddb113db38838eb9f043623ba274cfaf7d51d5b0c22ecb30afe58b1bb8322983", size = 1420867, upload-time = "2026-03-12T03:40:18.83Z" },
- { url = "https://files.pythonhosted.org/packages/5b/e5/22515a19cb7eaee3440325a6b0d95d2c0e88dd180cb011b12ae488e031d1/black-26.3.1-cp312-cp312-win_arm64.whl", hash = "sha256:dfdd51fc3e64ea4f35873d1b3fb25326773d55d2329ff8449139ebaad7357efb", size = 1230124, upload-time = "2026-03-12T03:40:20.425Z" },
- { url = "https://files.pythonhosted.org/packages/f5/77/5728052a3c0450c53d9bb3945c4c46b91baa62b2cafab6801411b6271e45/black-26.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:855822d90f884905362f602880ed8b5df1b7e3ee7d0db2502d4388a954cc8c54", size = 1895034, upload-time = "2026-03-12T03:40:21.813Z" },
- { url = "https://files.pythonhosted.org/packages/52/73/7cae55fdfdfbe9d19e9a8d25d145018965fe2079fa908101c3733b0c55a0/black-26.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8a33d657f3276328ce00e4d37fe70361e1ec7614da5d7b6e78de5426cb56332f", size = 1718503, upload-time = "2026-03-12T03:40:23.666Z" },
- { url = "https://files.pythonhosted.org/packages/e1/87/af89ad449e8254fdbc74654e6467e3c9381b61472cc532ee350d28cfdafb/black-26.3.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f1cd08e99d2f9317292a311dfe578fd2a24b15dbce97792f9c4d752275c1fa56", size = 1793557, upload-time = "2026-03-12T03:40:25.497Z" },
- { url = "https://files.pythonhosted.org/packages/43/10/d6c06a791d8124b843bf325ab4ac7d2f5b98731dff84d6064eafd687ded1/black-26.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:c7e72339f841b5a237ff14f7d3880ddd0fc7f98a1199e8c4327f9a4f478c1839", size = 1422766, upload-time = "2026-03-12T03:40:27.14Z" },
- { url = "https://files.pythonhosted.org/packages/59/4f/40a582c015f2d841ac24fed6390bd68f0fc896069ff3a886317959c9daf8/black-26.3.1-cp313-cp313-win_arm64.whl", hash = "sha256:afc622538b430aa4c8c853f7f63bc582b3b8030fd8c80b70fb5fa5b834e575c2", size = 1232140, upload-time = "2026-03-12T03:40:28.882Z" },
- { url = "https://files.pythonhosted.org/packages/d5/da/e36e27c9cebc1311b7579210df6f1c86e50f2d7143ae4fcf8a5017dc8809/black-26.3.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2d6bfaf7fd0993b420bed691f20f9492d53ce9a2bcccea4b797d34e947318a78", size = 1889234, upload-time = "2026-03-12T03:40:30.964Z" },
- { url = "https://files.pythonhosted.org/packages/0e/7b/9871acf393f64a5fa33668c19350ca87177b181f44bb3d0c33b2d534f22c/black-26.3.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f89f2ab047c76a9c03f78d0d66ca519e389519902fa27e7a91117ef7611c0568", size = 1720522, upload-time = "2026-03-12T03:40:32.346Z" },
- { url = "https://files.pythonhosted.org/packages/03/87/e766c7f2e90c07fb7586cc787c9ae6462b1eedab390191f2b7fc7f6170a9/black-26.3.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b07fc0dab849d24a80a29cfab8d8a19187d1c4685d8a5e6385a5ce323c1f015f", size = 1787824, upload-time = "2026-03-12T03:40:33.636Z" },
- { url = "https://files.pythonhosted.org/packages/ac/94/2424338fb2d1875e9e83eed4c8e9c67f6905ec25afd826a911aea2b02535/black-26.3.1-cp314-cp314-win_amd64.whl", hash = "sha256:0126ae5b7c09957da2bdbd91a9ba1207453feada9e9fe51992848658c6c8e01c", size = 1445855, upload-time = "2026-03-12T03:40:35.442Z" },
- { url = "https://files.pythonhosted.org/packages/86/43/0c3338bd928afb8ee7471f1a4eec3bdbe2245ccb4a646092a222e8669840/black-26.3.1-cp314-cp314-win_arm64.whl", hash = "sha256:92c0ec1f2cc149551a2b7b47efc32c866406b6891b0ee4625e95967c8f4acfb1", size = 1258109, upload-time = "2026-03-12T03:40:36.832Z" },
- { url = "https://files.pythonhosted.org/packages/8e/0d/52d98722666d6fc6c3dd4c76df339501d6efd40e0ff95e6186a7b7f0befd/black-26.3.1-py3-none-any.whl", hash = "sha256:2bd5aa94fc267d38bb21a70d7410a89f1a1d318841855f698746f8e7f51acd1b", size = 207542, upload-time = "2026-03-12T03:36:01.668Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" },
]
[[package]]
@@ -193,100 +233,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" },
]
-[[package]]
-name = "cfgv"
-version = "3.5.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/4e/b5/721b8799b04bf9afe054a3899c6cf4e880fcf8563cc71c15610242490a0c/cfgv-3.5.0.tar.gz", hash = "sha256:d5b1034354820651caa73ede66a6294d6e95c1b00acc5e9b098e917404669132", size = 7334, upload-time = "2025-11-19T20:55:51.612Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/db/3c/33bac158f8ab7f89b2e59426d5fe2e4f63f7ed25df84c036890172b412b5/cfgv-3.5.0-py2.py3-none-any.whl", hash = "sha256:a8dc6b26ad22ff227d2634a65cb388215ce6cc96bbcc5cfde7641ae87e8dacc0", size = 7445, upload-time = "2025-11-19T20:55:50.744Z" },
-]
-
-[[package]]
-name = "charset-normalizer"
-version = "3.4.6"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/7b/60/e3bec1881450851b087e301bedc3daa9377a4d45f1c26aa90b0b235e38aa/charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6", size = 143363, upload-time = "2026-03-15T18:53:25.478Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e5/62/c0815c992c9545347aeea7859b50dc9044d147e2e7278329c6e02ac9a616/charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab", size = 295154, upload-time = "2026-03-15T18:50:50.88Z" },
- { url = "https://files.pythonhosted.org/packages/a8/37/bdca6613c2e3c58c7421891d80cc3efa1d32e882f7c4a7ee6039c3fc951a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21", size = 199191, upload-time = "2026-03-15T18:50:52.658Z" },
- { url = "https://files.pythonhosted.org/packages/6c/92/9934d1bbd69f7f398b38c5dae1cbf9cc672e7c34a4adf7b17c0a9c17d15d/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2", size = 218674, upload-time = "2026-03-15T18:50:54.102Z" },
- { url = "https://files.pythonhosted.org/packages/af/90/25f6ab406659286be929fd89ab0e78e38aa183fc374e03aa3c12d730af8a/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff", size = 215259, upload-time = "2026-03-15T18:50:55.616Z" },
- { url = "https://files.pythonhosted.org/packages/4e/ef/79a463eb0fff7f96afa04c1d4c51f8fc85426f918db467854bfb6a569ce3/charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5", size = 207276, upload-time = "2026-03-15T18:50:57.054Z" },
- { url = "https://files.pythonhosted.org/packages/f7/72/d0426afec4b71dc159fa6b4e68f868cd5a3ecd918fec5813a15d292a7d10/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0", size = 195161, upload-time = "2026-03-15T18:50:58.686Z" },
- { url = "https://files.pythonhosted.org/packages/bf/18/c82b06a68bfcb6ce55e508225d210c7e6a4ea122bfc0748892f3dc4e8e11/charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a", size = 203452, upload-time = "2026-03-15T18:51:00.196Z" },
- { url = "https://files.pythonhosted.org/packages/44/d6/0c25979b92f8adafdbb946160348d8d44aa60ce99afdc27df524379875cb/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2", size = 202272, upload-time = "2026-03-15T18:51:01.703Z" },
- { url = "https://files.pythonhosted.org/packages/2e/3d/7fea3e8fe84136bebbac715dd1221cc25c173c57a699c030ab9b8900cbb7/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5", size = 195622, upload-time = "2026-03-15T18:51:03.526Z" },
- { url = "https://files.pythonhosted.org/packages/57/8a/d6f7fd5cb96c58ef2f681424fbca01264461336d2a7fc875e4446b1f1346/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6", size = 220056, upload-time = "2026-03-15T18:51:05.269Z" },
- { url = "https://files.pythonhosted.org/packages/16/50/478cdda782c8c9c3fb5da3cc72dd7f331f031e7f1363a893cdd6ca0f8de0/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d", size = 203751, upload-time = "2026-03-15T18:51:06.858Z" },
- { url = "https://files.pythonhosted.org/packages/75/fc/cc2fcac943939c8e4d8791abfa139f685e5150cae9f94b60f12520feaa9b/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2", size = 216563, upload-time = "2026-03-15T18:51:08.564Z" },
- { url = "https://files.pythonhosted.org/packages/a8/b7/a4add1d9a5f68f3d037261aecca83abdb0ab15960a3591d340e829b37298/charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923", size = 209265, upload-time = "2026-03-15T18:51:10.312Z" },
- { url = "https://files.pythonhosted.org/packages/6c/18/c094561b5d64a24277707698e54b7f67bd17a4f857bbfbb1072bba07c8bf/charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4", size = 144229, upload-time = "2026-03-15T18:51:11.694Z" },
- { url = "https://files.pythonhosted.org/packages/ab/20/0567efb3a8fd481b8f34f739ebddc098ed062a59fed41a8d193a61939e8f/charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb", size = 154277, upload-time = "2026-03-15T18:51:13.004Z" },
- { url = "https://files.pythonhosted.org/packages/15/57/28d79b44b51933119e21f65479d0864a8d5893e494cf5daab15df0247c17/charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4", size = 142817, upload-time = "2026-03-15T18:51:14.408Z" },
- { url = "https://files.pythonhosted.org/packages/1e/1d/4fdabeef4e231153b6ed7567602f3b68265ec4e5b76d6024cf647d43d981/charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f", size = 294823, upload-time = "2026-03-15T18:51:15.755Z" },
- { url = "https://files.pythonhosted.org/packages/47/7b/20e809b89c69d37be748d98e84dce6820bf663cf19cf6b942c951a3e8f41/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843", size = 198527, upload-time = "2026-03-15T18:51:17.177Z" },
- { url = "https://files.pythonhosted.org/packages/37/a6/4f8d27527d59c039dce6f7622593cdcd3d70a8504d87d09eb11e9fdc6062/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf", size = 218388, upload-time = "2026-03-15T18:51:18.934Z" },
- { url = "https://files.pythonhosted.org/packages/f6/9b/4770ccb3e491a9bacf1c46cc8b812214fe367c86a96353ccc6daf87b01ec/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8", size = 214563, upload-time = "2026-03-15T18:51:20.374Z" },
- { url = "https://files.pythonhosted.org/packages/2b/58/a199d245894b12db0b957d627516c78e055adc3a0d978bc7f65ddaf7c399/charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9", size = 206587, upload-time = "2026-03-15T18:51:21.807Z" },
- { url = "https://files.pythonhosted.org/packages/7e/70/3def227f1ec56f5c69dfc8392b8bd63b11a18ca8178d9211d7cc5e5e4f27/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88", size = 194724, upload-time = "2026-03-15T18:51:23.508Z" },
- { url = "https://files.pythonhosted.org/packages/58/ab/9318352e220c05efd31c2779a23b50969dc94b985a2efa643ed9077bfca5/charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84", size = 202956, upload-time = "2026-03-15T18:51:25.239Z" },
- { url = "https://files.pythonhosted.org/packages/75/13/f3550a3ac25b70f87ac98c40d3199a8503676c2f1620efbf8d42095cfc40/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd", size = 201923, upload-time = "2026-03-15T18:51:26.682Z" },
- { url = "https://files.pythonhosted.org/packages/1b/db/c5c643b912740b45e8eec21de1bbab8e7fc085944d37e1e709d3dcd9d72f/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c", size = 195366, upload-time = "2026-03-15T18:51:28.129Z" },
- { url = "https://files.pythonhosted.org/packages/5a/67/3b1c62744f9b2448443e0eb160d8b001c849ec3fef591e012eda6484787c/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194", size = 219752, upload-time = "2026-03-15T18:51:29.556Z" },
- { url = "https://files.pythonhosted.org/packages/f6/98/32ffbaf7f0366ffb0445930b87d103f6b406bc2c271563644bde8a2b1093/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc", size = 203296, upload-time = "2026-03-15T18:51:30.921Z" },
- { url = "https://files.pythonhosted.org/packages/41/12/5d308c1bbe60cabb0c5ef511574a647067e2a1f631bc8634fcafaccd8293/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f", size = 215956, upload-time = "2026-03-15T18:51:32.399Z" },
- { url = "https://files.pythonhosted.org/packages/53/e9/5f85f6c5e20669dbe56b165c67b0260547dea97dba7e187938833d791687/charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2", size = 208652, upload-time = "2026-03-15T18:51:34.214Z" },
- { url = "https://files.pythonhosted.org/packages/f1/11/897052ea6af56df3eef3ca94edafee410ca699ca0c7b87960ad19932c55e/charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d", size = 143940, upload-time = "2026-03-15T18:51:36.15Z" },
- { url = "https://files.pythonhosted.org/packages/a1/5c/724b6b363603e419829f561c854b87ed7c7e31231a7908708ac086cdf3e2/charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389", size = 154101, upload-time = "2026-03-15T18:51:37.876Z" },
- { url = "https://files.pythonhosted.org/packages/01/a5/7abf15b4c0968e47020f9ca0935fb3274deb87cb288cd187cad92e8cdffd/charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f", size = 143109, upload-time = "2026-03-15T18:51:39.565Z" },
- { url = "https://files.pythonhosted.org/packages/25/6f/ffe1e1259f384594063ea1869bfb6be5cdb8bc81020fc36c3636bc8302a1/charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8", size = 294458, upload-time = "2026-03-15T18:51:41.134Z" },
- { url = "https://files.pythonhosted.org/packages/56/60/09bb6c13a8c1016c2ed5c6a6488e4ffef506461aa5161662bd7636936fb1/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421", size = 199277, upload-time = "2026-03-15T18:51:42.953Z" },
- { url = "https://files.pythonhosted.org/packages/00/50/dcfbb72a5138bbefdc3332e8d81a23494bf67998b4b100703fd15fa52d81/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2", size = 218758, upload-time = "2026-03-15T18:51:44.339Z" },
- { url = "https://files.pythonhosted.org/packages/03/b3/d79a9a191bb75f5aa81f3aaaa387ef29ce7cb7a9e5074ba8ea095cc073c2/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30", size = 215299, upload-time = "2026-03-15T18:51:45.871Z" },
- { url = "https://files.pythonhosted.org/packages/76/7e/bc8911719f7084f72fd545f647601ea3532363927f807d296a8c88a62c0d/charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db", size = 206811, upload-time = "2026-03-15T18:51:47.308Z" },
- { url = "https://files.pythonhosted.org/packages/e2/40/c430b969d41dda0c465aa36cc7c2c068afb67177bef50905ac371b28ccc7/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8", size = 193706, upload-time = "2026-03-15T18:51:48.849Z" },
- { url = "https://files.pythonhosted.org/packages/48/15/e35e0590af254f7df984de1323640ef375df5761f615b6225ba8deb9799a/charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815", size = 202706, upload-time = "2026-03-15T18:51:50.257Z" },
- { url = "https://files.pythonhosted.org/packages/5e/bd/f736f7b9cc5e93a18b794a50346bb16fbfd6b37f99e8f306f7951d27c17c/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a", size = 202497, upload-time = "2026-03-15T18:51:52.012Z" },
- { url = "https://files.pythonhosted.org/packages/9d/ba/2cc9e3e7dfdf7760a6ed8da7446d22536f3d0ce114ac63dee2a5a3599e62/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43", size = 193511, upload-time = "2026-03-15T18:51:53.723Z" },
- { url = "https://files.pythonhosted.org/packages/9e/cb/5be49b5f776e5613be07298c80e1b02a2d900f7a7de807230595c85a8b2e/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0", size = 220133, upload-time = "2026-03-15T18:51:55.333Z" },
- { url = "https://files.pythonhosted.org/packages/83/43/99f1b5dad345accb322c80c7821071554f791a95ee50c1c90041c157ae99/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1", size = 203035, upload-time = "2026-03-15T18:51:56.736Z" },
- { url = "https://files.pythonhosted.org/packages/87/9a/62c2cb6a531483b55dddff1a68b3d891a8b498f3ca555fbcf2978e804d9d/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f", size = 216321, upload-time = "2026-03-15T18:51:58.17Z" },
- { url = "https://files.pythonhosted.org/packages/6e/79/94a010ff81e3aec7c293eb82c28f930918e517bc144c9906a060844462eb/charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815", size = 208973, upload-time = "2026-03-15T18:51:59.998Z" },
- { url = "https://files.pythonhosted.org/packages/2a/57/4ecff6d4ec8585342f0c71bc03efaa99cb7468f7c91a57b105bcd561cea8/charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d", size = 144610, upload-time = "2026-03-15T18:52:02.213Z" },
- { url = "https://files.pythonhosted.org/packages/80/94/8434a02d9d7f168c25767c64671fead8d599744a05d6a6c877144c754246/charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f", size = 154962, upload-time = "2026-03-15T18:52:03.658Z" },
- { url = "https://files.pythonhosted.org/packages/46/4c/48f2cdbfd923026503dfd67ccea45c94fd8fe988d9056b468579c66ed62b/charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e", size = 143595, upload-time = "2026-03-15T18:52:05.123Z" },
- { url = "https://files.pythonhosted.org/packages/31/93/8878be7569f87b14f1d52032946131bcb6ebbd8af3e20446bc04053dc3f1/charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866", size = 314828, upload-time = "2026-03-15T18:52:06.831Z" },
- { url = "https://files.pythonhosted.org/packages/06/b6/fae511ca98aac69ecc35cde828b0a3d146325dd03d99655ad38fc2cc3293/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc", size = 208138, upload-time = "2026-03-15T18:52:08.239Z" },
- { url = "https://files.pythonhosted.org/packages/54/57/64caf6e1bf07274a1e0b7c160a55ee9e8c9ec32c46846ce59b9c333f7008/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e", size = 224679, upload-time = "2026-03-15T18:52:10.043Z" },
- { url = "https://files.pythonhosted.org/packages/aa/cb/9ff5a25b9273ef160861b41f6937f86fae18b0792fe0a8e75e06acb08f1d/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077", size = 223475, upload-time = "2026-03-15T18:52:11.854Z" },
- { url = "https://files.pythonhosted.org/packages/fc/97/440635fc093b8d7347502a377031f9605a1039c958f3cd18dcacffb37743/charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f", size = 215230, upload-time = "2026-03-15T18:52:13.325Z" },
- { url = "https://files.pythonhosted.org/packages/cd/24/afff630feb571a13f07c8539fbb502d2ab494019492aaffc78ef41f1d1d0/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e", size = 199045, upload-time = "2026-03-15T18:52:14.752Z" },
- { url = "https://files.pythonhosted.org/packages/e5/17/d1399ecdaf7e0498c327433e7eefdd862b41236a7e484355b8e0e5ebd64b/charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484", size = 211658, upload-time = "2026-03-15T18:52:16.278Z" },
- { url = "https://files.pythonhosted.org/packages/b5/38/16baa0affb957b3d880e5ac2144caf3f9d7de7bc4a91842e447fbb5e8b67/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7", size = 210769, upload-time = "2026-03-15T18:52:17.782Z" },
- { url = "https://files.pythonhosted.org/packages/05/34/c531bc6ac4c21da9ddfddb3107be2287188b3ea4b53b70fc58f2a77ac8d8/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff", size = 201328, upload-time = "2026-03-15T18:52:19.553Z" },
- { url = "https://files.pythonhosted.org/packages/fa/73/a5a1e9ca5f234519c1953608a03fe109c306b97fdfb25f09182babad51a7/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e", size = 225302, upload-time = "2026-03-15T18:52:21.043Z" },
- { url = "https://files.pythonhosted.org/packages/ba/f6/cd782923d112d296294dea4bcc7af5a7ae0f86ab79f8fefbda5526b6cfc0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659", size = 211127, upload-time = "2026-03-15T18:52:22.491Z" },
- { url = "https://files.pythonhosted.org/packages/0e/c5/0b6898950627af7d6103a449b22320372c24c6feda91aa24e201a478d161/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602", size = 222840, upload-time = "2026-03-15T18:52:24.113Z" },
- { url = "https://files.pythonhosted.org/packages/7d/25/c4bba773bef442cbdc06111d40daa3de5050a676fa26e85090fc54dd12f0/charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407", size = 216890, upload-time = "2026-03-15T18:52:25.541Z" },
- { url = "https://files.pythonhosted.org/packages/35/1a/05dacadb0978da72ee287b0143097db12f2e7e8d3ffc4647da07a383b0b7/charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579", size = 155379, upload-time = "2026-03-15T18:52:27.05Z" },
- { url = "https://files.pythonhosted.org/packages/5d/7a/d269d834cb3a76291651256f3b9a5945e81d0a49ab9f4a498964e83c0416/charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4", size = 169043, upload-time = "2026-03-15T18:52:28.502Z" },
- { url = "https://files.pythonhosted.org/packages/23/06/28b29fba521a37a8932c6a84192175c34d49f84a6d4773fa63d05f9aff22/charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c", size = 148523, upload-time = "2026-03-15T18:52:29.956Z" },
- { url = "https://files.pythonhosted.org/packages/2a/68/687187c7e26cb24ccbd88e5069f5ef00eba804d36dde11d99aad0838ab45/charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69", size = 61455, upload-time = "2026-03-15T18:53:23.833Z" },
-]
-
-[[package]]
-name = "click"
-version = "8.3.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
-]
-
[[package]]
name = "colorama"
version = "0.4.6"
@@ -305,15 +251,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/bb/8d/dbff05239043271dbeace563a7686212a3dd517864a35623fe4d4a64ca19/dirty_equals-0.11-py3-none-any.whl", hash = "sha256:b1d7093273fc2f9be12f443a8ead954ef6daaf6746fd42ef3a5616433ee85286", size = 28051, upload-time = "2025-11-17T01:51:22.849Z" },
]
-[[package]]
-name = "distlib"
-version = "0.4.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" },
-]
-
[[package]]
name = "distro"
version = "1.9.0"
@@ -324,33 +261,24 @@ wheels = [
]
[[package]]
-name = "execnet"
-version = "2.1.2"
+name = "exceptiongroup"
+version = "1.3.1"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" },
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
-
-[[package]]
-name = "filelock"
-version = "3.25.2"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/94/b8/00651a0f559862f3bb7d6f7477b192afe3f583cc5e26403b44e59a55ab34/filelock-3.25.2.tar.gz", hash = "sha256:b64ece2b38f4ca29dd3e810287aa8c48182bbecd1ae6e9ae126c9b35f1382694", size = 40480, upload-time = "2026-03-11T20:45:38.487Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/a4/a5/842ae8f0c08b61d6484b52f99a03510a3a72d23141942d216ebe81fefbce/filelock-3.25.2-py3-none-any.whl", hash = "sha256:ca8afb0da15f229774c9ad1b455ed96e85a81373065fb10446672f64444ddf70", size = 26759, upload-time = "2026-03-11T20:45:37.437Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" },
]
[[package]]
-name = "fire"
-version = "0.7.1"
+name = "execnet"
+version = "2.1.2"
source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "termcolor" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/c0/00/f8d10588d2019d6d6452653def1ee807353b21983db48550318424b5ff18/fire-0.7.1.tar.gz", hash = "sha256:3b208f05c736de98fb343310d090dcc4d8c78b2a89ea4f32b837c586270a9cbf", size = 88720, upload-time = "2025-08-16T20:20:24.175Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/e5/4c/93d0f85318da65923e4b91c1c2ff03d8a458cbefebe3bc612a6693c7906d/fire-0.7.1-py3-none-any.whl", hash = "sha256:e43fd8a5033a9001e7e2973bab96070694b9f12f2e0ecf96d4683971b5ab1882", size = 115945, upload-time = "2025-08-16T20:20:22.87Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" },
]
[[package]]
@@ -359,6 +287,38 @@ version = "1.8.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/c831fac6cc817d26fd54c7eaccd04ef7e0288806943f7cc5bbf69f3ac1f0/frozenlist-1.8.0.tar.gz", hash = "sha256:3ede829ed8d842f6cd48fc7081d7a41001a56f1f38603f9d49bf3020d59a31ad", size = 45875, upload-time = "2025-10-06T05:38:17.865Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/83/4a/557715d5047da48d54e659203b9335be7bfaafda2c3f627b7c47e0b3aaf3/frozenlist-1.8.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b37f6d31b3dcea7deb5e9696e529a6aa4a898adc33db82da12e4c60a7c4d2011", size = 86230, upload-time = "2025-10-06T05:35:23.699Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/fb/c85f9fed3ea8fe8740e5b46a59cc141c23b842eca617da8876cfce5f760e/frozenlist-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ef2b7b394f208233e471abc541cc6991f907ffd47dc72584acee3147899d6565", size = 49621, upload-time = "2025-10-06T05:35:25.341Z" },
+ { url = "https://files.pythonhosted.org/packages/63/70/26ca3f06aace16f2352796b08704338d74b6d1a24ca38f2771afbb7ed915/frozenlist-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a88f062f072d1589b7b46e951698950e7da00442fc1cacbe17e19e025dc327ad", size = 49889, upload-time = "2025-10-06T05:35:26.797Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/ed/c7895fd2fde7f3ee70d248175f9b6cdf792fb741ab92dc59cd9ef3bd241b/frozenlist-1.8.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f57fb59d9f385710aa7060e89410aeb5058b99e62f4d16b08b91986b9a2140c2", size = 219464, upload-time = "2025-10-06T05:35:28.254Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/83/4d587dccbfca74cb8b810472392ad62bfa100bf8108c7223eb4c4fa2f7b3/frozenlist-1.8.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:799345ab092bee59f01a915620b5d014698547afd011e691a208637312db9186", size = 221649, upload-time = "2025-10-06T05:35:29.454Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/c6/fd3b9cd046ec5fff9dab66831083bc2077006a874a2d3d9247dea93ddf7e/frozenlist-1.8.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c23c3ff005322a6e16f71bf8692fcf4d5a304aaafe1e262c98c6d4adc7be863e", size = 219188, upload-time = "2025-10-06T05:35:30.951Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/80/6693f55eb2e085fc8afb28cf611448fb5b90e98e068fa1d1b8d8e66e5c7d/frozenlist-1.8.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8a76ea0f0b9dfa06f254ee06053d93a600865b3274358ca48a352ce4f0798450", size = 231748, upload-time = "2025-10-06T05:35:32.101Z" },
+ { url = "https://files.pythonhosted.org/packages/97/d6/e9459f7c5183854abd989ba384fe0cc1a0fb795a83c033f0571ec5933ca4/frozenlist-1.8.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c7366fe1418a6133d5aa824ee53d406550110984de7637d65a178010f759c6ef", size = 236351, upload-time = "2025-10-06T05:35:33.834Z" },
+ { url = "https://files.pythonhosted.org/packages/97/92/24e97474b65c0262e9ecd076e826bfd1d3074adcc165a256e42e7b8a7249/frozenlist-1.8.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13d23a45c4cebade99340c4165bd90eeb4a56c6d8a9d8aa49568cac19a6d0dc4", size = 218767, upload-time = "2025-10-06T05:35:35.205Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/bf/dc394a097508f15abff383c5108cb8ad880d1f64a725ed3b90d5c2fbf0bb/frozenlist-1.8.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:e4a3408834f65da56c83528fb52ce7911484f0d1eaf7b761fc66001db1646eff", size = 235887, upload-time = "2025-10-06T05:35:36.354Z" },
+ { url = "https://files.pythonhosted.org/packages/40/90/25b201b9c015dbc999a5baf475a257010471a1fa8c200c843fd4abbee725/frozenlist-1.8.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:42145cd2748ca39f32801dad54aeea10039da6f86e303659db90db1c4b614c8c", size = 228785, upload-time = "2025-10-06T05:35:37.949Z" },
+ { url = "https://files.pythonhosted.org/packages/84/f4/b5bc148df03082f05d2dd30c089e269acdbe251ac9a9cf4e727b2dbb8a3d/frozenlist-1.8.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e2de870d16a7a53901e41b64ffdf26f2fbb8917b3e6ebf398098d72c5b20bd7f", size = 230312, upload-time = "2025-10-06T05:35:39.178Z" },
+ { url = "https://files.pythonhosted.org/packages/db/4b/87e95b5d15097c302430e647136b7d7ab2398a702390cf4c8601975709e7/frozenlist-1.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:20e63c9493d33ee48536600d1a5c95eefc870cd71e7ab037763d1fbb89cc51e7", size = 217650, upload-time = "2025-10-06T05:35:40.377Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/70/78a0315d1fea97120591a83e0acd644da638c872f142fd72a6cebee825f3/frozenlist-1.8.0-cp310-cp310-win32.whl", hash = "sha256:adbeebaebae3526afc3c96fad434367cafbfd1b25d72369a9e5858453b1bb71a", size = 39659, upload-time = "2025-10-06T05:35:41.863Z" },
+ { url = "https://files.pythonhosted.org/packages/66/aa/3f04523fb189a00e147e60c5b2205126118f216b0aa908035c45336e27e4/frozenlist-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:667c3777ca571e5dbeb76f331562ff98b957431df140b54c85fd4d52eea8d8f6", size = 43837, upload-time = "2025-10-06T05:35:43.205Z" },
+ { url = "https://files.pythonhosted.org/packages/39/75/1135feecdd7c336938bd55b4dc3b0dfc46d85b9be12ef2628574b28de776/frozenlist-1.8.0-cp310-cp310-win_arm64.whl", hash = "sha256:80f85f0a7cc86e7a54c46d99c9e1318ff01f4687c172ede30fd52d19d1da1c8e", size = 39989, upload-time = "2025-10-06T05:35:44.596Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/03/077f869d540370db12165c0aa51640a873fb661d8b315d1d4d67b284d7ac/frozenlist-1.8.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:09474e9831bc2b2199fad6da3c14c7b0fbdd377cce9d3d77131be28906cb7d84", size = 86912, upload-time = "2025-10-06T05:35:45.98Z" },
+ { url = "https://files.pythonhosted.org/packages/df/b5/7610b6bd13e4ae77b96ba85abea1c8cb249683217ef09ac9e0ae93f25a91/frozenlist-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:17c883ab0ab67200b5f964d2b9ed6b00971917d5d8a92df149dc2c9779208ee9", size = 50046, upload-time = "2025-10-06T05:35:47.009Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/ef/0e8f1fe32f8a53dd26bdd1f9347efe0778b0fddf62789ea683f4cc7d787d/frozenlist-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fa47e444b8ba08fffd1c18e8cdb9a75db1b6a27f17507522834ad13ed5922b93", size = 50119, upload-time = "2025-10-06T05:35:48.38Z" },
+ { url = "https://files.pythonhosted.org/packages/11/b1/71a477adc7c36e5fb628245dfbdea2166feae310757dea848d02bd0689fd/frozenlist-1.8.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2552f44204b744fba866e573be4c1f9048d6a324dfe14475103fd51613eb1d1f", size = 231067, upload-time = "2025-10-06T05:35:49.97Z" },
+ { url = "https://files.pythonhosted.org/packages/45/7e/afe40eca3a2dc19b9904c0f5d7edfe82b5304cb831391edec0ac04af94c2/frozenlist-1.8.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:957e7c38f250991e48a9a73e6423db1bb9dd14e722a10f6b8bb8e16a0f55f695", size = 233160, upload-time = "2025-10-06T05:35:51.729Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/aa/7416eac95603ce428679d273255ffc7c998d4132cfae200103f164b108aa/frozenlist-1.8.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8585e3bb2cdea02fc88ffa245069c36555557ad3609e83be0ec71f54fd4abb52", size = 228544, upload-time = "2025-10-06T05:35:53.246Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/3d/2a2d1f683d55ac7e3875e4263d28410063e738384d3adc294f5ff3d7105e/frozenlist-1.8.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:edee74874ce20a373d62dc28b0b18b93f645633c2943fd90ee9d898550770581", size = 243797, upload-time = "2025-10-06T05:35:54.497Z" },
+ { url = "https://files.pythonhosted.org/packages/78/1e/2d5565b589e580c296d3bb54da08d206e797d941a83a6fdea42af23be79c/frozenlist-1.8.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c9a63152fe95756b85f31186bddf42e4c02c6321207fd6601a1c89ebac4fe567", size = 247923, upload-time = "2025-10-06T05:35:55.861Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/c3/65872fcf1d326a7f101ad4d86285c403c87be7d832b7470b77f6d2ed5ddc/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b6db2185db9be0a04fecf2f241c70b63b1a242e2805be291855078f2b404dd6b", size = 230886, upload-time = "2025-10-06T05:35:57.399Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/76/ac9ced601d62f6956f03cc794f9e04c81719509f85255abf96e2510f4265/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f4be2e3d8bc8aabd566f8d5b8ba7ecc09249d74ba3c9ed52e54dc23a293f0b92", size = 245731, upload-time = "2025-10-06T05:35:58.563Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/49/ecccb5f2598daf0b4a1415497eba4c33c1e8ce07495eb07d2860c731b8d5/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c8d1634419f39ea6f5c427ea2f90ca85126b54b50837f31497f3bf38266e853d", size = 241544, upload-time = "2025-10-06T05:35:59.719Z" },
+ { url = "https://files.pythonhosted.org/packages/53/4b/ddf24113323c0bbcc54cb38c8b8916f1da7165e07b8e24a717b4a12cbf10/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1a7fa382a4a223773ed64242dbe1c9c326ec09457e6b8428efb4118c685c3dfd", size = 241806, upload-time = "2025-10-06T05:36:00.959Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/fb/9b9a084d73c67175484ba2789a59f8eebebd0827d186a8102005ce41e1ba/frozenlist-1.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:11847b53d722050808926e785df837353bd4d75f1d494377e59b23594d834967", size = 229382, upload-time = "2025-10-06T05:36:02.22Z" },
+ { url = "https://files.pythonhosted.org/packages/95/a3/c8fb25aac55bf5e12dae5c5aa6a98f85d436c1dc658f21c3ac73f9fa95e5/frozenlist-1.8.0-cp311-cp311-win32.whl", hash = "sha256:27c6e8077956cf73eadd514be8fb04d77fc946a7fe9f7fe167648b0b9085cc25", size = 39647, upload-time = "2025-10-06T05:36:03.409Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/f5/603d0d6a02cfd4c8f2a095a54672b3cf967ad688a60fb9faf04fc4887f65/frozenlist-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac913f8403b36a2c8610bbfd25b8013488533e71e62b4b4adce9c86c8cea905b", size = 44064, upload-time = "2025-10-06T05:36:04.368Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/16/c2c9ab44e181f043a86f9a8f84d5124b62dbcb3a02c0977ec72b9ac1d3e0/frozenlist-1.8.0-cp311-cp311-win_arm64.whl", hash = "sha256:d4d3214a0f8394edfa3e303136d0575eece0745ff2b47bd2cb2e66dd92d4351a", size = 39937, upload-time = "2025-10-06T05:36:05.669Z" },
{ url = "https://files.pythonhosted.org/packages/69/29/948b9aa87e75820a38650af445d2ef2b6b8a6fab1a23b6bb9e4ef0be2d59/frozenlist-1.8.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:78f7b9e5d6f2fdb88cdde9440dc147259b62b9d3b019924def9f6478be254ac1", size = 87782, upload-time = "2025-10-06T05:36:06.649Z" },
{ url = "https://files.pythonhosted.org/packages/64/80/4f6e318ee2a7c0750ed724fa33a4bdf1eacdc5a39a7a24e818a773cd91af/frozenlist-1.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:229bf37d2e4acdaf808fd3f06e854a4a7a3661e871b10dc1f8f1896a3b05f18b", size = 50594, upload-time = "2025-10-06T05:36:07.69Z" },
{ url = "https://files.pythonhosted.org/packages/2b/94/5c8a2b50a496b11dd519f4a24cb5496cf125681dd99e94c604ccdea9419a/frozenlist-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f833670942247a14eafbb675458b4e61c82e002a148f49e68257b79296e865c4", size = 50448, upload-time = "2025-10-06T05:36:08.78Z" },
@@ -439,6 +399,22 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/af/d3/76bd4ed4317e7119c2b7f57c3f6934aba26d277acc6309f873341640e21f/frozenlist-1.8.0-cp314-cp314t-win32.whl", hash = "sha256:342c97bf697ac5480c0a7ec73cd700ecfa5a8a40ac923bd035484616efecc2df", size = 44676, upload-time = "2025-10-06T05:37:52.222Z" },
{ url = "https://files.pythonhosted.org/packages/89/76/c615883b7b521ead2944bb3480398cbb07e12b7b4e4d073d3752eb721558/frozenlist-1.8.0-cp314-cp314t-win_amd64.whl", hash = "sha256:06be8f67f39c8b1dc671f5d83aaefd3358ae5cdcf8314552c57e7ed3e6475bdd", size = 49451, upload-time = "2025-10-06T05:37:53.425Z" },
{ url = "https://files.pythonhosted.org/packages/e0/a3/5982da14e113d07b325230f95060e2169f5311b1017ea8af2a29b374c289/frozenlist-1.8.0-cp314-cp314t-win_arm64.whl", hash = "sha256:102e6314ca4da683dca92e3b1355490fed5f313b768500084fbe6371fddfdb79", size = 42507, upload-time = "2025-10-06T05:37:54.513Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/59/ae5cdac87a00962122ea37bb346d41b66aec05f9ce328fa2b9e216f8967b/frozenlist-1.8.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8b7138e5cd0647e4523d6685b0eac5d4be9a184ae9634492f25c6eb38c12a47", size = 86967, upload-time = "2025-10-06T05:37:55.607Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/10/17059b2db5a032fd9323c41c39e9d1f5f9d0c8f04d1e4e3e788573086e61/frozenlist-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a6483e309ca809f1efd154b4d37dc6d9f61037d6c6a81c2dc7a15cb22c8c5dca", size = 49984, upload-time = "2025-10-06T05:37:57.049Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/de/ad9d82ca8e5fa8f0c636e64606553c79e2b859ad253030b62a21fe9986f5/frozenlist-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b9290cf81e95e93fdf90548ce9d3c1211cf574b8e3f4b3b7cb0537cf2227068", size = 50240, upload-time = "2025-10-06T05:37:58.145Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/45/3dfb7767c2a67d123650122b62ce13c731b6c745bc14424eea67678b508c/frozenlist-1.8.0-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:59a6a5876ca59d1b63af8cd5e7ffffb024c3dc1e9cf9301b21a2e76286505c95", size = 219472, upload-time = "2025-10-06T05:37:59.239Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/bf/5bf23d913a741b960d5c1dac7c1985d8a2a1d015772b2d18ea168b08e7ff/frozenlist-1.8.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6dc4126390929823e2d2d9dc79ab4046ed74680360fc5f38b585c12c66cdf459", size = 221531, upload-time = "2025-10-06T05:38:00.521Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/03/27ec393f3b55860859f4b74cdc8c2a4af3dbf3533305e8eacf48a4fd9a54/frozenlist-1.8.0-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:332db6b2563333c5671fecacd085141b5800cb866be16d5e3eb15a2086476675", size = 219211, upload-time = "2025-10-06T05:38:01.842Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/ad/0fd00c404fa73fe9b169429e9a972d5ed807973c40ab6b3cf9365a33d360/frozenlist-1.8.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9ff15928d62a0b80bb875655c39bf517938c7d589554cbd2669be42d97c2cb61", size = 231775, upload-time = "2025-10-06T05:38:03.384Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/c3/86962566154cb4d2995358bc8331bfc4ea19d07db1a96f64935a1607f2b6/frozenlist-1.8.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7bf6cdf8e07c8151fba6fe85735441240ec7f619f935a5205953d58009aef8c6", size = 236631, upload-time = "2025-10-06T05:38:04.609Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/9e/6ffad161dbd83782d2c66dc4d378a9103b31770cb1e67febf43aea42d202/frozenlist-1.8.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:48e6d3f4ec5c7273dfe83ff27c91083c6c9065af655dc2684d2c200c94308bb5", size = 218632, upload-time = "2025-10-06T05:38:05.917Z" },
+ { url = "https://files.pythonhosted.org/packages/58/b2/4677eee46e0a97f9b30735e6ad0bf6aba3e497986066eb68807ac85cf60f/frozenlist-1.8.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:1a7607e17ad33361677adcd1443edf6f5da0ce5e5377b798fba20fae194825f3", size = 235967, upload-time = "2025-10-06T05:38:07.614Z" },
+ { url = "https://files.pythonhosted.org/packages/05/f3/86e75f8639c5a93745ca7addbbc9de6af56aebb930d233512b17e46f6493/frozenlist-1.8.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3a935c3a4e89c733303a2d5a7c257ea44af3a56c8202df486b7f5de40f37e1", size = 228799, upload-time = "2025-10-06T05:38:08.845Z" },
+ { url = "https://files.pythonhosted.org/packages/30/00/39aad3a7f0d98f5eb1d99a3c311215674ed87061aecee7851974b335c050/frozenlist-1.8.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:940d4a017dbfed9daf46a3b086e1d2167e7012ee297fef9e1c545c4d022f5178", size = 230566, upload-time = "2025-10-06T05:38:10.52Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/4d/aa144cac44568d137846ddc4d5210fb5d9719eb1d7ec6fa2728a54b5b94a/frozenlist-1.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b9be22a69a014bc47e78072d0ecae716f5eb56c15238acca0f43d6eb8e4a5bda", size = 217715, upload-time = "2025-10-06T05:38:11.832Z" },
+ { url = "https://files.pythonhosted.org/packages/64/4c/8f665921667509d25a0dd72540513bc86b356c95541686f6442a3283019f/frozenlist-1.8.0-cp39-cp39-win32.whl", hash = "sha256:1aa77cb5697069af47472e39612976ed05343ff2e84a3dcf15437b232cbfd087", size = 39933, upload-time = "2025-10-06T05:38:13.061Z" },
+ { url = "https://files.pythonhosted.org/packages/79/bd/bcc926f87027fad5e59926ff12d136e1082a115025d33c032d1cd69ab377/frozenlist-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:7398c222d1d405e796970320036b1b563892b65809d9e5261487bb2c7f7b5c6a", size = 44121, upload-time = "2025-10-06T05:38:14.572Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/07/9c2e4eb7584af4b705237b971b89a4155a8e57599c4483a131a39256a9a0/frozenlist-1.8.0-cp39-cp39-win_arm64.whl", hash = "sha256:b4f3b365f31c6cd4af24545ca0a244a53688cad8834e32f56831c4923b50a103", size = 40312, upload-time = "2025-10-06T05:38:15.699Z" },
{ url = "https://files.pythonhosted.org/packages/9a/9a/e35b4a917281c0b8419d4207f4334c8e8c5dbf4f3f5f9ada73958d937dcc/frozenlist-1.8.0-py3-none-any.whl", hash = "sha256:0c18a16eab41e82c295618a77502e17b195883241c563b00f0aa5106fc4eaa0d", size = 13409, upload-time = "2025-10-06T05:38:16.721Z" },
]
@@ -492,15 +468,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/16/8d/85c9701e9af72ca132a1783e2a54364a90c6da832304416a30fc11196ab2/httpx_aiohttp-0.1.12-py3-none-any.whl", hash = "sha256:5b0eac39a7f360fa7867a60bcb46bb1024eada9c01cbfecdb54dc1edb3fb7141", size = 6367, upload-time = "2025-12-12T10:12:14.018Z" },
]
-[[package]]
-name = "identify"
-version = "2.6.18"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/46/c4/7fb4db12296cdb11893d61c92048fe617ee853f8523b9b296ac03b43757e/identify-2.6.18.tar.gz", hash = "sha256:873ac56a5e3fd63e7438a7ecbc4d91aca692eb3fefa4534db2b7913f3fc352fd", size = 99580, upload-time = "2026-03-15T18:39:50.319Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/46/33/92ef41c6fad0233e41d3d84ba8e8ad18d1780f1e5d99b3c683e6d7f98b63/identify-2.6.18-py2.py3-none-any.whl", hash = "sha256:8db9d3c8ea9079db92cafb0ebf97abdc09d52e97f4dcf773a2e694048b7cd737", size = 99394, upload-time = "2026-03-15T18:39:48.915Z" },
-]
-
[[package]]
name = "idna"
version = "3.11"
@@ -524,116 +491,58 @@ wheels = [
[[package]]
name = "iniconfig"
-version = "2.3.0"
+version = "2.1.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
+resolution-markers = [
+ "python_full_version < '3.10'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
]
[[package]]
-name = "llama-stack-client"
-version = "0.6.1a1"
-source = { editable = "." }
-dependencies = [
- { name = "anyio" },
- { name = "click" },
- { name = "distro" },
- { name = "fire" },
- { name = "httpx" },
- { name = "pandas" },
- { name = "prompt-toolkit" },
- { name = "pyaml" },
- { name = "pydantic", version = "1.10.26", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'group-18-llama-stack-client-pydantic-v1'" },
- { name = "pydantic", version = "2.12.5", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'group-18-llama-stack-client-pydantic-v2' or extra != 'group-18-llama-stack-client-pydantic-v1'" },
- { name = "requests" },
- { name = "rich" },
- { name = "sniffio" },
- { name = "termcolor" },
- { name = "tqdm" },
- { name = "typing-extensions" },
-]
-
-[package.optional-dependencies]
-aiohttp = [
- { name = "aiohttp" },
- { name = "httpx-aiohttp" },
-]
-
-[package.dev-dependencies]
-dev = [
- { name = "black" },
- { name = "dirty-equals" },
- { name = "importlib-metadata" },
- { name = "mypy" },
- { name = "pre-commit" },
- { name = "pyright" },
- { name = "pytest" },
- { name = "pytest-asyncio" },
- { name = "pytest-xdist" },
- { name = "respx" },
- { name = "rich" },
- { name = "ruff" },
- { name = "time-machine" },
-]
-pydantic-v1 = [
- { name = "pydantic", version = "1.10.26", source = { registry = "https://pypi.org/simple" } },
+name = "iniconfig"
+version = "2.3.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
]
-pydantic-v2 = [
- { name = "pydantic", version = "2.12.5", source = { registry = "https://pypi.org/simple" } },
+sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
]
-[package.metadata]
-requires-dist = [
- { name = "aiohttp", marker = "extra == 'aiohttp'" },
- { name = "anyio", specifier = ">=3.5.0,<5" },
- { name = "click" },
- { name = "distro", specifier = ">=1.7.0,<2" },
- { name = "fire" },
- { name = "httpx", specifier = ">=0.23.0,<1" },
- { name = "httpx-aiohttp", marker = "extra == 'aiohttp'", specifier = ">=0.1.9" },
- { name = "pandas" },
- { name = "prompt-toolkit" },
- { name = "pyaml" },
- { name = "pydantic", specifier = ">=1.9.0,<3" },
- { name = "requests" },
- { name = "rich" },
- { name = "sniffio" },
- { name = "termcolor" },
- { name = "tqdm" },
- { name = "typing-extensions", specifier = ">=4.7,<5" },
- { name = "typing-extensions", specifier = ">=4.14,<5" },
+[[package]]
+name = "markdown-it-py"
+version = "3.0.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.10'",
]
-provides-extras = ["aiohttp"]
-
-[package.metadata.requires-dev]
-dev = [
- { name = "black" },
- { name = "dirty-equals", specifier = ">=0.6.0" },
- { name = "importlib-metadata", specifier = ">=6.7.0" },
- { name = "mypy", specifier = "==1.17" },
- { name = "pre-commit" },
- { name = "pyright", specifier = "==1.1.399" },
- { name = "pytest", specifier = ">=7.1.1" },
- { name = "pytest-asyncio" },
- { name = "pytest-xdist", specifier = ">=3.6.1" },
- { name = "respx" },
- { name = "rich", specifier = ">=13.7.1" },
- { name = "ruff" },
- { name = "time-machine" },
+dependencies = [
+ { name = "mdurl", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
-pydantic-v1 = [{ name = "pydantic", specifier = ">=1.9.0,<2" }]
-pydantic-v2 = [
- { name = "pydantic", marker = "python_full_version < '3.14'", specifier = "~=2.0" },
- { name = "pydantic", marker = "python_full_version >= '3.14'", specifier = "~=2.12" },
+sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" },
]
[[package]]
name = "markdown-it-py"
version = "4.0.0"
source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+]
dependencies = [
- { name = "mdurl" },
+ { name = "mdurl", marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" }
wheels = [
@@ -653,8 +562,47 @@ wheels = [
name = "multidict"
version = "6.7.0"
source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+]
sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/a9/63/7bdd4adc330abcca54c85728db2327130e49e52e8c3ce685cec44e0f2e9f/multidict-6.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9f474ad5acda359c8758c8accc22032c6abe6dc87a8be2440d097785e27a9349", size = 77153, upload-time = "2025-10-06T14:48:26.409Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/bb/b6c35ff175ed1a3142222b78455ee31be71a8396ed3ab5280fbe3ebe4e85/multidict-6.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b7a9db5a870f780220e931d0002bbfd88fb53aceb6293251e2c839415c1b20e", size = 44993, upload-time = "2025-10-06T14:48:28.4Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/1f/064c77877c5fa6df6d346e68075c0f6998547afe952d6471b4c5f6a7345d/multidict-6.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:03ca744319864e92721195fa28c7a3b2bc7b686246b35e4078c1e4d0eb5466d3", size = 44607, upload-time = "2025-10-06T14:48:29.581Z" },
+ { url = "https://files.pythonhosted.org/packages/04/7a/bf6aa92065dd47f287690000b3d7d332edfccb2277634cadf6a810463c6a/multidict-6.7.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f0e77e3c0008bc9316e662624535b88d360c3a5d3f81e15cf12c139a75250046", size = 241847, upload-time = "2025-10-06T14:48:32.107Z" },
+ { url = "https://files.pythonhosted.org/packages/94/39/297a8de920f76eda343e4ce05f3b489f0ab3f9504f2576dfb37b7c08ca08/multidict-6.7.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08325c9e5367aa379a3496aa9a022fe8837ff22e00b94db256d3a1378c76ab32", size = 242616, upload-time = "2025-10-06T14:48:34.054Z" },
+ { url = "https://files.pythonhosted.org/packages/39/3a/d0eee2898cfd9d654aea6cb8c4addc2f9756e9a7e09391cfe55541f917f7/multidict-6.7.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e2862408c99f84aa571ab462d25236ef9cb12a602ea959ba9c9009a54902fc73", size = 222333, upload-time = "2025-10-06T14:48:35.9Z" },
+ { url = "https://files.pythonhosted.org/packages/05/48/3b328851193c7a4240815b71eea165b49248867bbb6153a0aee227a0bb47/multidict-6.7.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4d72a9a2d885f5c208b0cb91ff2ed43636bb7e345ec839ff64708e04f69a13cc", size = 253239, upload-time = "2025-10-06T14:48:37.302Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/ca/0706a98c8d126a89245413225ca4a3fefc8435014de309cf8b30acb68841/multidict-6.7.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:478cc36476687bac1514d651cbbaa94b86b0732fb6855c60c673794c7dd2da62", size = 251618, upload-time = "2025-10-06T14:48:38.963Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/4f/9c7992f245554d8b173f6f0a048ad24b3e645d883f096857ec2c0822b8bd/multidict-6.7.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6843b28b0364dc605f21481c90fadb5f60d9123b442eb8a726bb74feef588a84", size = 241655, upload-time = "2025-10-06T14:48:40.312Z" },
+ { url = "https://files.pythonhosted.org/packages/31/79/26a85991ae67efd1c0b1fc2e0c275b8a6aceeb155a68861f63f87a798f16/multidict-6.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23bfeee5316266e5ee2d625df2d2c602b829435fc3a235c2ba2131495706e4a0", size = 239245, upload-time = "2025-10-06T14:48:41.848Z" },
+ { url = "https://files.pythonhosted.org/packages/14/1e/75fa96394478930b79d0302eaf9a6c69f34005a1a5251ac8b9c336486ec9/multidict-6.7.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:680878b9f3d45c31e1f730eef731f9b0bc1da456155688c6745ee84eb818e90e", size = 233523, upload-time = "2025-10-06T14:48:43.749Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/5e/085544cb9f9c4ad2b5d97467c15f856df8d9bac410cffd5c43991a5d878b/multidict-6.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:eb866162ef2f45063acc7a53a88ef6fe8bf121d45c30ea3c9cd87ce7e191a8d4", size = 243129, upload-time = "2025-10-06T14:48:45.225Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/c3/e9d9e2f20c9474e7a8fcef28f863c5cbd29bb5adce6b70cebe8bdad0039d/multidict-6.7.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:df0e3bf7993bdbeca5ac25aa859cf40d39019e015c9c91809ba7093967f7a648", size = 248999, upload-time = "2025-10-06T14:48:46.703Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/3f/df171b6efa3239ae33b97b887e42671cd1d94d460614bfb2c30ffdab3b95/multidict-6.7.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:661709cdcd919a2ece2234f9bae7174e5220c80b034585d7d8a755632d3e2111", size = 243711, upload-time = "2025-10-06T14:48:48.146Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/2f/9b5564888c4e14b9af64c54acf149263721a283aaf4aa0ae89b091d5d8c1/multidict-6.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:096f52730c3fb8ed419db2d44391932b63891b2c5ed14850a7e215c0ba9ade36", size = 237504, upload-time = "2025-10-06T14:48:49.447Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/3a/0bd6ca0f7d96d790542d591c8c3354c1e1b6bfd2024d4d92dc3d87485ec7/multidict-6.7.0-cp310-cp310-win32.whl", hash = "sha256:afa8a2978ec65d2336305550535c9c4ff50ee527914328c8677b3973ade52b85", size = 41422, upload-time = "2025-10-06T14:48:50.789Z" },
+ { url = "https://files.pythonhosted.org/packages/00/35/f6a637ea2c75f0d3b7c7d41b1189189acff0d9deeb8b8f35536bb30f5e33/multidict-6.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:b15b3afff74f707b9275d5ba6a91ae8f6429c3ffb29bbfd216b0b375a56f13d7", size = 46050, upload-time = "2025-10-06T14:48:51.938Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/b8/f7bf8329b39893d02d9d95cf610c75885d12fc0f402b1c894e1c8e01c916/multidict-6.7.0-cp310-cp310-win_arm64.whl", hash = "sha256:4b73189894398d59131a66ff157837b1fafea9974be486d036bb3d32331fdbf0", size = 43153, upload-time = "2025-10-06T14:48:53.146Z" },
+ { url = "https://files.pythonhosted.org/packages/34/9e/5c727587644d67b2ed479041e4b1c58e30afc011e3d45d25bbe35781217c/multidict-6.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4d409aa42a94c0b3fa617708ef5276dfe81012ba6753a0370fcc9d0195d0a1fc", size = 76604, upload-time = "2025-10-06T14:48:54.277Z" },
+ { url = "https://files.pythonhosted.org/packages/17/e4/67b5c27bd17c085a5ea8f1ec05b8a3e5cba0ca734bfcad5560fb129e70ca/multidict-6.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14c9e076eede3b54c636f8ce1c9c252b5f057c62131211f0ceeec273810c9721", size = 44715, upload-time = "2025-10-06T14:48:55.445Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/e1/866a5d77be6ea435711bef2a4291eed11032679b6b28b56b4776ab06ba3e/multidict-6.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c09703000a9d0fa3c3404b27041e574cc7f4df4c6563873246d0e11812a94b6", size = 44332, upload-time = "2025-10-06T14:48:56.706Z" },
+ { url = "https://files.pythonhosted.org/packages/31/61/0c2d50241ada71ff61a79518db85ada85fdabfcf395d5968dae1cbda04e5/multidict-6.7.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a265acbb7bb33a3a2d626afbe756371dce0279e7b17f4f4eda406459c2b5ff1c", size = 245212, upload-time = "2025-10-06T14:48:58.042Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/e0/919666a4e4b57fff1b57f279be1c9316e6cdc5de8a8b525d76f6598fefc7/multidict-6.7.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51cb455de290ae462593e5b1cb1118c5c22ea7f0d3620d9940bf695cea5a4bd7", size = 246671, upload-time = "2025-10-06T14:49:00.004Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/cc/d027d9c5a520f3321b65adea289b965e7bcbd2c34402663f482648c716ce/multidict-6.7.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:db99677b4457c7a5c5a949353e125ba72d62b35f74e26da141530fbb012218a7", size = 225491, upload-time = "2025-10-06T14:49:01.393Z" },
+ { url = "https://files.pythonhosted.org/packages/75/c4/bbd633980ce6155a28ff04e6a6492dd3335858394d7bb752d8b108708558/multidict-6.7.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f470f68adc395e0183b92a2f4689264d1ea4b40504a24d9882c27375e6662bb9", size = 257322, upload-time = "2025-10-06T14:49:02.745Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/6d/d622322d344f1f053eae47e033b0b3f965af01212de21b10bcf91be991fb/multidict-6.7.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0db4956f82723cc1c270de9c6e799b4c341d327762ec78ef82bb962f79cc07d8", size = 254694, upload-time = "2025-10-06T14:49:04.15Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/9f/78f8761c2705d4c6d7516faed63c0ebdac569f6db1bef95e0d5218fdc146/multidict-6.7.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3e56d780c238f9e1ae66a22d2adf8d16f485381878250db8d496623cd38b22bd", size = 246715, upload-time = "2025-10-06T14:49:05.967Z" },
+ { url = "https://files.pythonhosted.org/packages/78/59/950818e04f91b9c2b95aab3d923d9eabd01689d0dcd889563988e9ea0fd8/multidict-6.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9d14baca2ee12c1a64740d4531356ba50b82543017f3ad6de0deb943c5979abb", size = 243189, upload-time = "2025-10-06T14:49:07.37Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/3d/77c79e1934cad2ee74991840f8a0110966d9599b3af95964c0cd79bb905b/multidict-6.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:295a92a76188917c7f99cda95858c822f9e4aae5824246bba9b6b44004ddd0a6", size = 237845, upload-time = "2025-10-06T14:49:08.759Z" },
+ { url = "https://files.pythonhosted.org/packages/63/1b/834ce32a0a97a3b70f86437f685f880136677ac00d8bce0027e9fd9c2db7/multidict-6.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39f1719f57adbb767ef592a50ae5ebb794220d1188f9ca93de471336401c34d2", size = 246374, upload-time = "2025-10-06T14:49:10.574Z" },
+ { url = "https://files.pythonhosted.org/packages/23/ef/43d1c3ba205b5dec93dc97f3fba179dfa47910fc73aaaea4f7ceb41cec2a/multidict-6.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0a13fb8e748dfc94749f622de065dd5c1def7e0d2216dba72b1d8069a389c6ff", size = 253345, upload-time = "2025-10-06T14:49:12.331Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/03/eaf95bcc2d19ead522001f6a650ef32811aa9e3624ff0ad37c445c7a588c/multidict-6.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e3aa16de190d29a0ea1b48253c57d99a68492c8dd8948638073ab9e74dc9410b", size = 246940, upload-time = "2025-10-06T14:49:13.821Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/df/ec8a5fd66ea6cd6f525b1fcbb23511b033c3e9bc42b81384834ffa484a62/multidict-6.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a048ce45dcdaaf1defb76b2e684f997fb5abf74437b6cb7b22ddad934a964e34", size = 242229, upload-time = "2025-10-06T14:49:15.603Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/a2/59b405d59fd39ec86d1142630e9049243015a5f5291ba49cadf3c090c541/multidict-6.7.0-cp311-cp311-win32.whl", hash = "sha256:a90af66facec4cebe4181b9e62a68be65e45ac9b52b67de9eec118701856e7ff", size = 41308, upload-time = "2025-10-06T14:49:16.871Z" },
+ { url = "https://files.pythonhosted.org/packages/32/0f/13228f26f8b882c34da36efa776c3b7348455ec383bab4a66390e42963ae/multidict-6.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:95b5ffa4349df2887518bb839409bcf22caa72d82beec453216802f475b23c81", size = 46037, upload-time = "2025-10-06T14:49:18.457Z" },
+ { url = "https://files.pythonhosted.org/packages/84/1f/68588e31b000535a3207fd3c909ebeec4fb36b52c442107499c18a896a2a/multidict-6.7.0-cp311-cp311-win_arm64.whl", hash = "sha256:329aa225b085b6f004a4955271a7ba9f1087e39dcb7e65f6284a988264a63912", size = 43023, upload-time = "2025-10-06T14:49:19.648Z" },
{ url = "https://files.pythonhosted.org/packages/c2/9e/9f61ac18d9c8b475889f32ccfa91c9f59363480613fc807b6e3023d6f60b/multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184", size = 76877, upload-time = "2025-10-06T14:49:20.884Z" },
{ url = "https://files.pythonhosted.org/packages/38/6f/614f09a04e6184f8824268fce4bc925e9849edfa654ddd59f0b64508c595/multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45", size = 45467, upload-time = "2025-10-06T14:49:22.054Z" },
{ url = "https://files.pythonhosted.org/packages/b3/93/c4f67a436dd026f2e780c433277fff72be79152894d9fc36f44569cab1a6/multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa", size = 43834, upload-time = "2025-10-06T14:49:23.566Z" },
@@ -745,6 +693,24 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" },
{ url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" },
{ url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" },
+ { url = "https://files.pythonhosted.org/packages/90/d7/4cf84257902265c4250769ac49f4eaab81c182ee9aff8bf59d2714dbb174/multidict-6.7.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:363eb68a0a59bd2303216d2346e6c441ba10d36d1f9969fcb6f1ba700de7bb5c", size = 77073, upload-time = "2025-10-06T14:51:57.386Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/51/194e999630a656e76c2965a1590d12faa5cd528170f2abaa04423e09fe8d/multidict-6.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d874eb056410ca05fed180b6642e680373688efafc7f077b2a2f61811e873a40", size = 44928, upload-time = "2025-10-06T14:51:58.791Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/6b/2a195373c33068c9158e0941d0b46cfcc9c1d894ca2eb137d1128081dff0/multidict-6.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b55d5497b51afdfde55925e04a022f1de14d4f4f25cdfd4f5d9b0aa96166851", size = 44581, upload-time = "2025-10-06T14:52:00.174Z" },
+ { url = "https://files.pythonhosted.org/packages/69/7b/7f4f2e644b6978bf011a5fd9a5ebb7c21de3f38523b1f7897d36a1ac1311/multidict-6.7.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f8e5c0031b90ca9ce555e2e8fd5c3b02a25f14989cbc310701823832c99eb687", size = 239901, upload-time = "2025-10-06T14:52:02.416Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/b5/952c72786710a031aa204a9adf7db66d7f97a2c6573889d58b9e60fe6702/multidict-6.7.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9cf41880c991716f3c7cec48e2f19ae4045fc9db5fc9cff27347ada24d710bb5", size = 240534, upload-time = "2025-10-06T14:52:04.105Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/ef/109fe1f2471e4c458c74242c7e4a833f2d9fc8a6813cd7ee345b0bad18f9/multidict-6.7.0-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8cfc12a8630a29d601f48d47787bd7eb730e475e83edb5d6c5084317463373eb", size = 219545, upload-time = "2025-10-06T14:52:06.208Z" },
+ { url = "https://files.pythonhosted.org/packages/42/bd/327d91288114967f9fe90dc53de70aa3fec1b9073e46aa32c4828f771a87/multidict-6.7.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3996b50c3237c4aec17459217c1e7bbdead9a22a0fcd3c365564fbd16439dde6", size = 251187, upload-time = "2025-10-06T14:52:08.049Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/13/a8b078ebbaceb7819fd28cd004413c33b98f1b70d542a62e6a00b74fb09f/multidict-6.7.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7f5170993a0dd3ab871c74f45c0a21a4e2c37a2f2b01b5f722a2ad9c6650469e", size = 249379, upload-time = "2025-10-06T14:52:09.831Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/6d/ab12e1246be4d65d1f55de1e6f6aaa9b8120eddcfdd1d290439c7833d5ce/multidict-6.7.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ec81878ddf0e98817def1e77d4f50dae5ef5b0e4fe796fae3bd674304172416e", size = 239241, upload-time = "2025-10-06T14:52:11.561Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/d7/079a93625208c173b8fa756396814397c0fd9fee61ef87b75a748820b86e/multidict-6.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9281bf5b34f59afbc6b1e477a372e9526b66ca446f4bf62592839c195a718b32", size = 237418, upload-time = "2025-10-06T14:52:13.671Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/29/03777c2212274aa9440918d604dc9d6af0e6b4558c611c32c3dcf1a13870/multidict-6.7.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:68af405971779d8b37198726f2b6fe3955db846fee42db7a4286fc542203934c", size = 232987, upload-time = "2025-10-06T14:52:15.708Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/00/11188b68d85a84e8050ee34724d6ded19ad03975caebe0c8dcb2829b37bf/multidict-6.7.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ba3ef510467abb0667421a286dc906e30eb08569365f5cdb131d7aff7c2dd84", size = 240985, upload-time = "2025-10-06T14:52:17.317Z" },
+ { url = "https://files.pythonhosted.org/packages/df/0c/12eef6aeda21859c6cdf7d75bd5516d83be3efe3d8cc45fd1a3037f5b9dc/multidict-6.7.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b61189b29081a20c7e4e0b49b44d5d44bb0dc92be3c6d06a11cc043f81bf9329", size = 246855, upload-time = "2025-10-06T14:52:19.096Z" },
+ { url = "https://files.pythonhosted.org/packages/69/f6/076120fd8bb3975f09228e288e08bff6b9f1bfd5166397c7ba284f622ab2/multidict-6.7.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fb287618b9c7aa3bf8d825f02d9201b2f13078a5ed3b293c8f4d953917d84d5e", size = 241804, upload-time = "2025-10-06T14:52:21.166Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/51/41bb950c81437b88a93e6ddfca1d8763569ae861e638442838c4375f7497/multidict-6.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:521f33e377ff64b96c4c556b81c55d0cfffb96a11c194fd0c3f1e56f3d8dd5a4", size = 235321, upload-time = "2025-10-06T14:52:23.208Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/cf/5bbd31f055199d56c1f6b04bbadad3ccb24e6d5d4db75db774fc6d6674b8/multidict-6.7.0-cp39-cp39-win32.whl", hash = "sha256:ce8fdc2dca699f8dbf055a61d73eaa10482569ad20ee3c36ef9641f69afa8c91", size = 41435, upload-time = "2025-10-06T14:52:24.735Z" },
+ { url = "https://files.pythonhosted.org/packages/af/01/547ffe9c2faec91c26965c152f3fea6cff068b6037401f61d310cc861ff4/multidict-6.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:7e73299c99939f089dd9b2120a04a516b95cdf8c1cd2b18c53ebf0de80b1f18f", size = 46193, upload-time = "2025-10-06T14:52:26.101Z" },
+ { url = "https://files.pythonhosted.org/packages/27/77/cfa5461d1d2651d6fc24216c92b4a21d4e385a41c46e0d9f3b070675167b/multidict-6.7.0-cp39-cp39-win_arm64.whl", hash = "sha256:6bdce131e14b04fd34a809b6380dbfd826065c3e2fe8a50dbae659fa0c390546", size = 43118, upload-time = "2025-10-06T14:52:27.876Z" },
{ url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" },
]
@@ -755,10 +721,23 @@ source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mypy-extensions" },
{ name = "pathspec" },
+ { name = "tomli", marker = "python_full_version < '3.11' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/1e/e3/034322d5a779685218ed69286c32faa505247f1f096251ef66c8fd203b08/mypy-1.17.0.tar.gz", hash = "sha256:e5d7ccc08ba089c06e2f5629c660388ef1fee708444f1dee0b9203fa031dee03", size = 3352114, upload-time = "2025-07-14T20:34:30.181Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/6a/31/e762baa3b73905c856d45ab77b4af850e8159dffffd86a52879539a08c6b/mypy-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8e08de6138043108b3b18f09d3f817a4783912e48828ab397ecf183135d84d6", size = 10998313, upload-time = "2025-07-14T20:33:24.519Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/c1/25b2f0d46fb7e0b5e2bee61ec3a47fe13eff9e3c2f2234f144858bbe6485/mypy-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce4a17920ec144647d448fc43725b5873548b1aae6c603225626747ededf582d", size = 10128922, upload-time = "2025-07-14T20:34:06.414Z" },
+ { url = "https://files.pythonhosted.org/packages/02/78/6d646603a57aa8a2886df1b8881fe777ea60f28098790c1089230cd9c61d/mypy-1.17.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ff25d151cc057fdddb1cb1881ef36e9c41fa2a5e78d8dd71bee6e4dcd2bc05b", size = 11913524, upload-time = "2025-07-14T20:33:19.109Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/19/dae6c55e87ee426fb76980f7e78484450cad1c01c55a1dc4e91c930bea01/mypy-1.17.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93468cf29aa9a132bceb103bd8475f78cacde2b1b9a94fd978d50d4bdf616c9a", size = 12650527, upload-time = "2025-07-14T20:32:44.095Z" },
+ { url = "https://files.pythonhosted.org/packages/86/e1/f916845a235235a6c1e4d4d065a3930113767001d491b8b2e1b61ca56647/mypy-1.17.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:98189382b310f16343151f65dd7e6867386d3e35f7878c45cfa11383d175d91f", size = 12897284, upload-time = "2025-07-14T20:33:38.168Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/dc/414760708a4ea1b096bd214d26a24e30ac5e917ef293bc33cdb6fe22d2da/mypy-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:c004135a300ab06a045c1c0d8e3f10215e71d7b4f5bb9a42ab80236364429937", size = 9506493, upload-time = "2025-07-14T20:34:01.093Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/24/82efb502b0b0f661c49aa21cfe3e1999ddf64bf5500fc03b5a1536a39d39/mypy-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9d4fe5c72fd262d9c2c91c1117d16aac555e05f5beb2bae6a755274c6eec42be", size = 10914150, upload-time = "2025-07-14T20:31:51.985Z" },
+ { url = "https://files.pythonhosted.org/packages/03/96/8ef9a6ff8cedadff4400e2254689ca1dc4b420b92c55255b44573de10c54/mypy-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d96b196e5c16f41b4f7736840e8455958e832871990c7ba26bf58175e357ed61", size = 10039845, upload-time = "2025-07-14T20:32:30.527Z" },
+ { url = "https://files.pythonhosted.org/packages/df/32/7ce359a56be779d38021d07941cfbb099b41411d72d827230a36203dbb81/mypy-1.17.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:73a0ff2dd10337ceb521c080d4147755ee302dcde6e1a913babd59473904615f", size = 11837246, upload-time = "2025-07-14T20:32:01.28Z" },
+ { url = "https://files.pythonhosted.org/packages/82/16/b775047054de4d8dbd668df9137707e54b07fe18c7923839cd1e524bf756/mypy-1.17.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:24cfcc1179c4447854e9e406d3af0f77736d631ec87d31c6281ecd5025df625d", size = 12571106, upload-time = "2025-07-14T20:34:26.942Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/cf/fa33eaf29a606102c8d9ffa45a386a04c2203d9ad18bf4eef3e20c43ebc8/mypy-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c56f180ff6430e6373db7a1d569317675b0a451caf5fef6ce4ab365f5f2f6c3", size = 12759960, upload-time = "2025-07-14T20:33:42.882Z" },
+ { url = "https://files.pythonhosted.org/packages/94/75/3f5a29209f27e739ca57e6350bc6b783a38c7621bdf9cac3ab8a08665801/mypy-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:eafaf8b9252734400f9b77df98b4eee3d2eecab16104680d51341c75702cad70", size = 9503888, upload-time = "2025-07-14T20:32:34.392Z" },
{ url = "https://files.pythonhosted.org/packages/12/e9/e6824ed620bbf51d3bf4d6cbbe4953e83eaf31a448d1b3cfb3620ccb641c/mypy-1.17.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f986f1cab8dbec39ba6e0eaa42d4d3ac6686516a5d3dccd64be095db05ebc6bb", size = 11086395, upload-time = "2025-07-14T20:34:11.452Z" },
{ url = "https://files.pythonhosted.org/packages/ba/51/a4afd1ae279707953be175d303f04a5a7bd7e28dc62463ad29c1c857927e/mypy-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:51e455a54d199dd6e931cd7ea987d061c2afbaf0960f7f66deef47c90d1b304d", size = 10120052, upload-time = "2025-07-14T20:33:09.897Z" },
{ url = "https://files.pythonhosted.org/packages/8a/71/19adfeac926ba8205f1d1466d0d360d07b46486bf64360c54cb5a2bd86a8/mypy-1.17.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3204d773bab5ff4ebbd1f8efa11b498027cd57017c003ae970f310e5b96be8d8", size = 11861806, upload-time = "2025-07-14T20:32:16.028Z" },
@@ -771,6 +750,12 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4c/66/85607ab5137d65e4f54d9797b77d5a038ef34f714929cf8ad30b03f628df/mypy-1.17.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:037bc0f0b124ce46bfde955c647f3e395c6174476a968c0f22c95a8d2f589bba", size = 12731358, upload-time = "2025-07-14T20:32:25.579Z" },
{ url = "https://files.pythonhosted.org/packages/73/d0/341dbbfb35ce53d01f8f2969facbb66486cee9804048bf6c01b048127501/mypy-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c38876106cb6132259683632b287238858bd58de267d80defb6f418e9ee50658", size = 12917480, upload-time = "2025-07-14T20:34:21.868Z" },
{ url = "https://files.pythonhosted.org/packages/64/63/70c8b7dbfc520089ac48d01367a97e8acd734f65bd07813081f508a8c94c/mypy-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:d30ba01c0f151998f367506fab31c2ac4527e6a7b2690107c7a7f9e3cb419a9c", size = 9589666, upload-time = "2025-07-14T20:34:16.841Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/a0/6263dd11941231f688f0a8f2faf90ceac1dc243d148d314a089d2fe25108/mypy-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:63e751f1b5ab51d6f3d219fe3a2fe4523eaa387d854ad06906c63883fde5b1ab", size = 10988185, upload-time = "2025-07-14T20:33:04.797Z" },
+ { url = "https://files.pythonhosted.org/packages/02/13/b8f16d6b0dc80277129559c8e7dbc9011241a0da8f60d031edb0e6e9ac8f/mypy-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f7fb09d05e0f1c329a36dcd30e27564a3555717cde87301fae4fb542402ddfad", size = 10120169, upload-time = "2025-07-14T20:32:38.84Z" },
+ { url = "https://files.pythonhosted.org/packages/14/ef/978ba79df0d65af680e20d43121363cf643eb79b04bf3880d01fc8afeb6f/mypy-1.17.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b72c34ce05ac3a1361ae2ebb50757fb6e3624032d91488d93544e9f82db0ed6c", size = 11918121, upload-time = "2025-07-14T20:33:52.328Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/10/55ef70b104151a0d8280474f05268ff0a2a79be8d788d5e647257d121309/mypy-1.17.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:434ad499ad8dde8b2f6391ddfa982f41cb07ccda8e3c67781b1bfd4e5f9450a8", size = 12648821, upload-time = "2025-07-14T20:32:59.631Z" },
+ { url = "https://files.pythonhosted.org/packages/26/8c/7781fcd2e1eef48fbedd3a422c21fe300a8e03ed5be2eb4bd10246a77f4e/mypy-1.17.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f105f61a5eff52e137fd73bee32958b2add9d9f0a856f17314018646af838e97", size = 12896955, upload-time = "2025-07-14T20:32:49.543Z" },
+ { url = "https://files.pythonhosted.org/packages/78/13/03ac759dabe86e98ca7b6681f114f90ee03f3ff8365a57049d311bd4a4e3/mypy-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:ba06254a5a22729853209550d80f94e28690d5530c661f9416a68ac097b13fc4", size = 9512957, upload-time = "2025-07-14T20:33:28.619Z" },
{ url = "https://files.pythonhosted.org/packages/e3/fc/ee058cc4316f219078464555873e99d170bde1d9569abd833300dbeb484a/mypy-1.17.0-py3-none-any.whl", hash = "sha256:15d9d0018237ab058e5de3d8fce61b6fa72cc59cc78fd91f1b474bce12abf496", size = 2283195, upload-time = "2025-07-14T20:31:54.753Z" },
]
@@ -793,64 +778,80 @@ wheels = [
]
[[package]]
-name = "numpy"
-version = "2.4.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/10/8b/c265f4823726ab832de836cdd184d0986dcf94480f81e8739692a7ac7af2/numpy-2.4.3.tar.gz", hash = "sha256:483a201202b73495f00dbc83796c6ae63137a9bdade074f7648b3e32613412dd", size = 20727743, upload-time = "2026-03-09T07:58:53.426Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/a9/ed/6388632536f9788cea23a3a1b629f25b43eaacd7d7377e5d6bc7b9deb69b/numpy-2.4.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:61b0cbabbb6126c8df63b9a3a0c4b1f44ebca5e12ff6997b80fcf267fb3150ef", size = 16669628, upload-time = "2026-03-09T07:56:24.252Z" },
- { url = "https://files.pythonhosted.org/packages/74/1b/ee2abfc68e1ce728b2958b6ba831d65c62e1b13ce3017c13943f8f9b5b2e/numpy-2.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7395e69ff32526710748f92cd8c9849b361830968ea3e24a676f272653e8983e", size = 14696872, upload-time = "2026-03-09T07:56:26.991Z" },
- { url = "https://files.pythonhosted.org/packages/ba/d1/780400e915ff5638166f11ca9dc2c5815189f3d7cf6f8759a1685e586413/numpy-2.4.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:abdce0f71dcb4a00e4e77f3faf05e4616ceccfe72ccaa07f47ee79cda3b7b0f4", size = 5203489, upload-time = "2026-03-09T07:56:29.414Z" },
- { url = "https://files.pythonhosted.org/packages/0b/bb/baffa907e9da4cc34a6e556d6d90e032f6d7a75ea47968ea92b4858826c4/numpy-2.4.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:48da3a4ee1336454b07497ff7ec83903efa5505792c4e6d9bf83d99dc07a1e18", size = 6550814, upload-time = "2026-03-09T07:56:32.225Z" },
- { url = "https://files.pythonhosted.org/packages/7b/12/8c9f0c6c95f76aeb20fc4a699c33e9f827fa0d0f857747c73bb7b17af945/numpy-2.4.3-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32e3bef222ad6b052280311d1d60db8e259e4947052c3ae7dd6817451fc8a4c5", size = 15666601, upload-time = "2026-03-09T07:56:34.461Z" },
- { url = "https://files.pythonhosted.org/packages/bd/79/cc665495e4d57d0aa6fbcc0aa57aa82671dfc78fbf95fe733ed86d98f52a/numpy-2.4.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e7dd01a46700b1967487141a66ac1a3cf0dd8ebf1f08db37d46389401512ca97", size = 16621358, upload-time = "2026-03-09T07:56:36.852Z" },
- { url = "https://files.pythonhosted.org/packages/a8/40/b4ecb7224af1065c3539f5ecfff879d090de09608ad1008f02c05c770cb3/numpy-2.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:76f0f283506c28b12bba319c0fab98217e9f9b54e6160e9c79e9f7348ba32e9c", size = 17016135, upload-time = "2026-03-09T07:56:39.337Z" },
- { url = "https://files.pythonhosted.org/packages/f7/b1/6a88e888052eed951afed7a142dcdf3b149a030ca59b4c71eef085858e43/numpy-2.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:737f630a337364665aba3b5a77e56a68cc42d350edd010c345d65a3efa3addcc", size = 18345816, upload-time = "2026-03-09T07:56:42.31Z" },
- { url = "https://files.pythonhosted.org/packages/f3/8f/103a60c5f8c3d7fc678c19cd7b2476110da689ccb80bc18050efbaeae183/numpy-2.4.3-cp312-cp312-win32.whl", hash = "sha256:26952e18d82a1dbbc2f008d402021baa8d6fc8e84347a2072a25e08b46d698b9", size = 5960132, upload-time = "2026-03-09T07:56:44.851Z" },
- { url = "https://files.pythonhosted.org/packages/d7/7c/f5ee1bf6ed888494978046a809df2882aad35d414b622893322df7286879/numpy-2.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:65f3c2455188f09678355f5cae1f959a06b778bc66d535da07bf2ef20cd319d5", size = 12316144, upload-time = "2026-03-09T07:56:47.057Z" },
- { url = "https://files.pythonhosted.org/packages/71/46/8d1cb3f7a00f2fb6394140e7e6623696e54c6318a9d9691bb4904672cf42/numpy-2.4.3-cp312-cp312-win_arm64.whl", hash = "sha256:2abad5c7fef172b3377502bde47892439bae394a71bc329f31df0fd829b41a9e", size = 10220364, upload-time = "2026-03-09T07:56:49.849Z" },
- { url = "https://files.pythonhosted.org/packages/b6/d0/1fe47a98ce0df229238b77611340aff92d52691bcbc10583303181abf7fc/numpy-2.4.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b346845443716c8e542d54112966383b448f4a3ba5c66409771b8c0889485dd3", size = 16665297, upload-time = "2026-03-09T07:56:52.296Z" },
- { url = "https://files.pythonhosted.org/packages/27/d9/4e7c3f0e68dfa91f21c6fb6cf839bc829ec920688b1ce7ec722b1a6202fb/numpy-2.4.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2629289168f4897a3c4e23dc98d6f1731f0fc0fe52fb9db19f974041e4cc12b9", size = 14691853, upload-time = "2026-03-09T07:56:54.992Z" },
- { url = "https://files.pythonhosted.org/packages/3a/66/bd096b13a87549683812b53ab211e6d413497f84e794fb3c39191948da97/numpy-2.4.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:bb2e3cf95854233799013779216c57e153c1ee67a0bf92138acca0e429aefaee", size = 5198435, upload-time = "2026-03-09T07:56:57.184Z" },
- { url = "https://files.pythonhosted.org/packages/a2/2f/687722910b5a5601de2135c891108f51dfc873d8e43c8ed9f4ebb440b4a2/numpy-2.4.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:7f3408ff897f8ab07a07fbe2823d7aee6ff644c097cc1f90382511fe982f647f", size = 6546347, upload-time = "2026-03-09T07:56:59.531Z" },
- { url = "https://files.pythonhosted.org/packages/bf/ec/7971c4e98d86c564750393fab8d7d83d0a9432a9d78bb8a163a6dc59967a/numpy-2.4.3-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:decb0eb8a53c3b009b0962378065589685d66b23467ef5dac16cbe818afde27f", size = 15664626, upload-time = "2026-03-09T07:57:01.385Z" },
- { url = "https://files.pythonhosted.org/packages/7e/eb/7daecbea84ec935b7fc732e18f532073064a3816f0932a40a17f3349185f/numpy-2.4.3-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5f51900414fc9204a0e0da158ba2ac52b75656e7dce7e77fb9f84bfa343b4cc", size = 16608916, upload-time = "2026-03-09T07:57:04.008Z" },
- { url = "https://files.pythonhosted.org/packages/df/58/2a2b4a817ffd7472dca4421d9f0776898b364154e30c95f42195041dc03b/numpy-2.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6bd06731541f89cdc01b261ba2c9e037f1543df7472517836b78dfb15bd6e476", size = 17015824, upload-time = "2026-03-09T07:57:06.347Z" },
- { url = "https://files.pythonhosted.org/packages/4a/ca/627a828d44e78a418c55f82dd4caea8ea4a8ef24e5144d9e71016e52fb40/numpy-2.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:22654fe6be0e5206f553a9250762c653d3698e46686eee53b399ab90da59bd92", size = 18334581, upload-time = "2026-03-09T07:57:09.114Z" },
- { url = "https://files.pythonhosted.org/packages/cd/c0/76f93962fc79955fcba30a429b62304332345f22d4daec1cb33653425643/numpy-2.4.3-cp313-cp313-win32.whl", hash = "sha256:d71e379452a2f670ccb689ec801b1218cd3983e253105d6e83780967e899d687", size = 5958618, upload-time = "2026-03-09T07:57:11.432Z" },
- { url = "https://files.pythonhosted.org/packages/b1/3c/88af0040119209b9b5cb59485fa48b76f372c73068dbf9254784b975ac53/numpy-2.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:0a60e17a14d640f49146cb38e3f105f571318db7826d9b6fef7e4dce758faecd", size = 12312824, upload-time = "2026-03-09T07:57:13.586Z" },
- { url = "https://files.pythonhosted.org/packages/58/ce/3d07743aced3d173f877c3ef6a454c2174ba42b584ab0b7e6d99374f51ed/numpy-2.4.3-cp313-cp313-win_arm64.whl", hash = "sha256:c9619741e9da2059cd9c3f206110b97583c7152c1dc9f8aafd4beb450ac1c89d", size = 10221218, upload-time = "2026-03-09T07:57:16.183Z" },
- { url = "https://files.pythonhosted.org/packages/62/09/d96b02a91d09e9d97862f4fc8bfebf5400f567d8eb1fe4b0cc4795679c15/numpy-2.4.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7aa4e54f6469300ebca1d9eb80acd5253cdfa36f2c03d79a35883687da430875", size = 14819570, upload-time = "2026-03-09T07:57:18.564Z" },
- { url = "https://files.pythonhosted.org/packages/b5/ca/0b1aba3905fdfa3373d523b2b15b19029f4f3031c87f4066bd9d20ef6c6b/numpy-2.4.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d1b90d840b25874cf5cd20c219af10bac3667db3876d9a495609273ebe679070", size = 5326113, upload-time = "2026-03-09T07:57:21.052Z" },
- { url = "https://files.pythonhosted.org/packages/c0/63/406e0fd32fcaeb94180fd6a4c41e55736d676c54346b7efbce548b94a914/numpy-2.4.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a749547700de0a20a6718293396ec237bb38218049cfce788e08fcb716e8cf73", size = 6646370, upload-time = "2026-03-09T07:57:22.804Z" },
- { url = "https://files.pythonhosted.org/packages/b6/d0/10f7dc157d4b37af92720a196be6f54f889e90dcd30dce9dc657ed92c257/numpy-2.4.3-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f3c4a151a2e529adf49c1d54f0f57ff8f9b233ee4d44af623a81553ab86368", size = 15723499, upload-time = "2026-03-09T07:57:24.693Z" },
- { url = "https://files.pythonhosted.org/packages/66/f1/d1c2bf1161396629701bc284d958dc1efa3a5a542aab83cf11ee6eb4cba5/numpy-2.4.3-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22c31dc07025123aedf7f2db9e91783df13f1776dc52c6b22c620870dc0fab22", size = 16657164, upload-time = "2026-03-09T07:57:27.676Z" },
- { url = "https://files.pythonhosted.org/packages/1a/be/cca19230b740af199ac47331a21c71e7a3d0ba59661350483c1600d28c37/numpy-2.4.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:148d59127ac95979d6f07e4d460f934ebdd6eed641db9c0db6c73026f2b2101a", size = 17081544, upload-time = "2026-03-09T07:57:30.664Z" },
- { url = "https://files.pythonhosted.org/packages/b9/c5/9602b0cbb703a0936fb40f8a95407e8171935b15846de2f0776e08af04c7/numpy-2.4.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a97cbf7e905c435865c2d939af3d93f99d18eaaa3cabe4256f4304fb51604349", size = 18380290, upload-time = "2026-03-09T07:57:33.763Z" },
- { url = "https://files.pythonhosted.org/packages/ed/81/9f24708953cd30be9ee36ec4778f4b112b45165812f2ada4cc5ea1c1f254/numpy-2.4.3-cp313-cp313t-win32.whl", hash = "sha256:be3b8487d725a77acccc9924f65fd8bce9af7fac8c9820df1049424a2115af6c", size = 6082814, upload-time = "2026-03-09T07:57:36.491Z" },
- { url = "https://files.pythonhosted.org/packages/e2/9e/52f6eaa13e1a799f0ab79066c17f7016a4a8ae0c1aefa58c82b4dab690b4/numpy-2.4.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1ec84fd7c8e652b0f4aaaf2e6e9cc8eaa9b1b80a537e06b2e3a2fb176eedcb26", size = 12452673, upload-time = "2026-03-09T07:57:38.281Z" },
- { url = "https://files.pythonhosted.org/packages/c4/04/b8cece6ead0b30c9fbd99bb835ad7ea0112ac5f39f069788c5558e3b1ab2/numpy-2.4.3-cp313-cp313t-win_arm64.whl", hash = "sha256:120df8c0a81ebbf5b9020c91439fccd85f5e018a927a39f624845be194a2be02", size = 10290907, upload-time = "2026-03-09T07:57:40.747Z" },
- { url = "https://files.pythonhosted.org/packages/70/ae/3936f79adebf8caf81bd7a599b90a561334a658be4dcc7b6329ebf4ee8de/numpy-2.4.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:5884ce5c7acfae1e4e1b6fde43797d10aa506074d25b531b4f54bde33c0c31d4", size = 16664563, upload-time = "2026-03-09T07:57:43.817Z" },
- { url = "https://files.pythonhosted.org/packages/9b/62/760f2b55866b496bb1fa7da2a6db076bef908110e568b02fcfc1422e2a3a/numpy-2.4.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:297837823f5bc572c5f9379b0c9f3a3365f08492cbdc33bcc3af174372ebb168", size = 14702161, upload-time = "2026-03-09T07:57:46.169Z" },
- { url = "https://files.pythonhosted.org/packages/32/af/a7a39464e2c0a21526fb4fb76e346fb172ebc92f6d1c7a07c2c139cc17b1/numpy-2.4.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:a111698b4a3f8dcbe54c64a7708f049355abd603e619013c346553c1fd4ca90b", size = 5208738, upload-time = "2026-03-09T07:57:48.506Z" },
- { url = "https://files.pythonhosted.org/packages/29/8c/2a0cf86a59558fa078d83805589c2de490f29ed4fb336c14313a161d358a/numpy-2.4.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:4bd4741a6a676770e0e97fe9ab2e51de01183df3dcbcec591d26d331a40de950", size = 6543618, upload-time = "2026-03-09T07:57:50.591Z" },
- { url = "https://files.pythonhosted.org/packages/aa/b8/612ce010c0728b1c363fa4ea3aa4c22fe1c5da1de008486f8c2f5cb92fae/numpy-2.4.3-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:54f29b877279d51e210e0c80709ee14ccbbad647810e8f3d375561c45ef613dd", size = 15680676, upload-time = "2026-03-09T07:57:52.34Z" },
- { url = "https://files.pythonhosted.org/packages/a9/7e/4f120ecc54ba26ddf3dc348eeb9eb063f421de65c05fc961941798feea18/numpy-2.4.3-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:679f2a834bae9020f81534671c56fd0cc76dd7e5182f57131478e23d0dc59e24", size = 16613492, upload-time = "2026-03-09T07:57:54.91Z" },
- { url = "https://files.pythonhosted.org/packages/2c/86/1b6020db73be330c4b45d5c6ee4295d59cfeef0e3ea323959d053e5a6909/numpy-2.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d84f0f881cb2225c2dfd7f78a10a5645d487a496c6668d6cc39f0f114164f3d0", size = 17031789, upload-time = "2026-03-09T07:57:57.641Z" },
- { url = "https://files.pythonhosted.org/packages/07/3a/3b90463bf41ebc21d1b7e06079f03070334374208c0f9a1f05e4ae8455e7/numpy-2.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d213c7e6e8d211888cc359bab7199670a00f5b82c0978b9d1c75baf1eddbeac0", size = 18339941, upload-time = "2026-03-09T07:58:00.577Z" },
- { url = "https://files.pythonhosted.org/packages/a8/74/6d736c4cd962259fd8bae9be27363eb4883a2f9069763747347544c2a487/numpy-2.4.3-cp314-cp314-win32.whl", hash = "sha256:52077feedeff7c76ed7c9f1a0428558e50825347b7545bbb8523da2cd55c547a", size = 6007503, upload-time = "2026-03-09T07:58:03.331Z" },
- { url = "https://files.pythonhosted.org/packages/48/39/c56ef87af669364356bb011922ef0734fc49dad51964568634c72a009488/numpy-2.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:0448e7f9caefb34b4b7dd2b77f21e8906e5d6f0365ad525f9f4f530b13df2afc", size = 12444915, upload-time = "2026-03-09T07:58:06.353Z" },
- { url = "https://files.pythonhosted.org/packages/9d/1f/ab8528e38d295fd349310807496fabb7cf9fe2e1f70b97bc20a483ea9d4a/numpy-2.4.3-cp314-cp314-win_arm64.whl", hash = "sha256:b44fd60341c4d9783039598efadd03617fa28d041fc37d22b62d08f2027fa0e7", size = 10494875, upload-time = "2026-03-09T07:58:08.734Z" },
- { url = "https://files.pythonhosted.org/packages/e6/ef/b7c35e4d5ef141b836658ab21a66d1a573e15b335b1d111d31f26c8ef80f/numpy-2.4.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0a195f4216be9305a73c0e91c9b026a35f2161237cf1c6de9b681637772ea657", size = 14822225, upload-time = "2026-03-09T07:58:11.034Z" },
- { url = "https://files.pythonhosted.org/packages/cd/8d/7730fa9278cf6648639946cc816e7cc89f0d891602584697923375f801ed/numpy-2.4.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:cd32fbacb9fd1bf041bf8e89e4576b6f00b895f06d00914820ae06a616bdfef7", size = 5328769, upload-time = "2026-03-09T07:58:13.67Z" },
- { url = "https://files.pythonhosted.org/packages/47/01/d2a137317c958b074d338807c1b6a383406cdf8b8e53b075d804cc3d211d/numpy-2.4.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:2e03c05abaee1f672e9d67bc858f300b5ccba1c21397211e8d77d98350972093", size = 6649461, upload-time = "2026-03-09T07:58:15.912Z" },
- { url = "https://files.pythonhosted.org/packages/5c/34/812ce12bc0f00272a4b0ec0d713cd237cb390666eb6206323d1cc9cedbb2/numpy-2.4.3-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d1ce23cce91fcea443320a9d0ece9b9305d4368875bab09538f7a5b4131938a", size = 15725809, upload-time = "2026-03-09T07:58:17.787Z" },
- { url = "https://files.pythonhosted.org/packages/25/c0/2aed473a4823e905e765fee3dc2cbf504bd3e68ccb1150fbdabd5c39f527/numpy-2.4.3-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c59020932feb24ed49ffd03704fbab89f22aa9c0d4b180ff45542fe8918f5611", size = 16655242, upload-time = "2026-03-09T07:58:20.476Z" },
- { url = "https://files.pythonhosted.org/packages/f2/c8/7e052b2fc87aa0e86de23f20e2c42bd261c624748aa8efd2c78f7bb8d8c6/numpy-2.4.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:9684823a78a6cd6ad7511fc5e25b07947d1d5b5e2812c93fe99d7d4195130720", size = 17080660, upload-time = "2026-03-09T07:58:23.067Z" },
- { url = "https://files.pythonhosted.org/packages/f3/3d/0876746044db2adcb11549f214d104f2e1be00f07a67edbb4e2812094847/numpy-2.4.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0200b25c687033316fb39f0ff4e3e690e8957a2c3c8d22499891ec58c37a3eb5", size = 18380384, upload-time = "2026-03-09T07:58:25.839Z" },
- { url = "https://files.pythonhosted.org/packages/07/12/8160bea39da3335737b10308df4f484235fd297f556745f13092aa039d3b/numpy-2.4.3-cp314-cp314t-win32.whl", hash = "sha256:5e10da9e93247e554bb1d22f8edc51847ddd7dde52d85ce31024c1b4312bfba0", size = 6154547, upload-time = "2026-03-09T07:58:28.289Z" },
- { url = "https://files.pythonhosted.org/packages/42/f3/76534f61f80d74cc9cdf2e570d3d4eeb92c2280a27c39b0aaf471eda7b48/numpy-2.4.3-cp314-cp314t-win_amd64.whl", hash = "sha256:45f003dbdffb997a03da2d1d0cb41fbd24a87507fb41605c0420a3db5bd4667b", size = 12633645, upload-time = "2026-03-09T07:58:30.384Z" },
- { url = "https://files.pythonhosted.org/packages/1f/b6/7c0d4334c15983cec7f92a69e8ce9b1e6f31857e5ee3a413ac424e6bd63d/numpy-2.4.3-cp314-cp314t-win_arm64.whl", hash = "sha256:4d382735cecd7bcf090172489a525cd7d4087bc331f7df9f60ddc9a296cf208e", size = 10565454, upload-time = "2026-03-09T07:58:33.031Z" },
+name = "ogx-client"
+version = "0.7.0a2"
+source = { editable = "." }
+dependencies = [
+ { name = "anyio" },
+ { name = "distro" },
+ { name = "httpx" },
+ { name = "pydantic", version = "1.10.26", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'group-10-ogx-client-pydantic-v1'" },
+ { name = "pydantic", version = "2.12.5", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'group-10-ogx-client-pydantic-v2' or extra != 'group-10-ogx-client-pydantic-v1'" },
+ { name = "sniffio" },
+ { name = "typing-extensions" },
+]
+
+[package.optional-dependencies]
+aiohttp = [
+ { name = "aiohttp" },
+ { name = "httpx-aiohttp" },
+]
+
+[package.dev-dependencies]
+dev = [
+ { name = "dirty-equals" },
+ { name = "importlib-metadata" },
+ { name = "mypy" },
+ { name = "pyright" },
+ { name = "pytest", version = "8.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pytest", version = "9.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pytest-asyncio", version = "1.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pytest-asyncio", version = "1.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pytest-xdist" },
+ { name = "respx" },
+ { name = "rich" },
+ { name = "ruff" },
+ { name = "time-machine", version = "2.19.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "time-machine", version = "3.2.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+]
+pydantic-v1 = [
+ { name = "pydantic", version = "1.10.26", source = { registry = "https://pypi.org/simple" } },
+]
+pydantic-v2 = [
+ { name = "pydantic", version = "2.12.5", source = { registry = "https://pypi.org/simple" } },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "aiohttp", marker = "extra == 'aiohttp'" },
+ { name = "anyio", specifier = ">=3.5.0,<5" },
+ { name = "distro", specifier = ">=1.7.0,<2" },
+ { name = "httpx", specifier = ">=0.23.0,<1" },
+ { name = "httpx-aiohttp", marker = "extra == 'aiohttp'", specifier = ">=0.1.9" },
+ { name = "pydantic", specifier = ">=1.9.0,<3" },
+ { name = "sniffio" },
+ { name = "typing-extensions", specifier = ">=4.14,<5" },
+]
+provides-extras = ["aiohttp"]
+
+[package.metadata.requires-dev]
+dev = [
+ { name = "dirty-equals", specifier = ">=0.6.0" },
+ { name = "importlib-metadata", specifier = ">=6.7.0" },
+ { name = "mypy", specifier = "==1.17" },
+ { name = "pyright", specifier = "==1.1.399" },
+ { name = "pytest" },
+ { name = "pytest-asyncio" },
+ { name = "pytest-xdist", specifier = ">=3.6.1" },
+ { name = "respx" },
+ { name = "rich", specifier = ">=13.7.1" },
+ { name = "ruff" },
+ { name = "time-machine" },
+]
+pydantic-v1 = [{ name = "pydantic", specifier = ">=1.9.0,<2" }]
+pydantic-v2 = [
+ { name = "pydantic", marker = "python_full_version < '3.14'", specifier = "~=2.0" },
+ { name = "pydantic", marker = "python_full_version >= '3.14'", specifier = "~=2.12" },
]
[[package]]
@@ -862,58 +863,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
]
-[[package]]
-name = "pandas"
-version = "3.0.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "numpy" },
- { name = "python-dateutil" },
- { name = "tzdata", marker = "sys_platform == 'emscripten' or sys_platform == 'win32' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/2e/0c/b28ed414f080ee0ad153f848586d61d1878f91689950f037f976ce15f6c8/pandas-3.0.1.tar.gz", hash = "sha256:4186a699674af418f655dbd420ed87f50d56b4cd6603784279d9eef6627823c8", size = 4641901, upload-time = "2026-02-17T22:20:16.434Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/37/51/b467209c08dae2c624873d7491ea47d2b47336e5403309d433ea79c38571/pandas-3.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:476f84f8c20c9f5bc47252b66b4bb25e1a9fc2fa98cead96744d8116cb85771d", size = 10344357, upload-time = "2026-02-17T22:18:38.262Z" },
- { url = "https://files.pythonhosted.org/packages/7c/f1/e2567ffc8951ab371db2e40b2fe068e36b81d8cf3260f06ae508700e5504/pandas-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ab749dfba921edf641d4036c4c21c0b3ea70fea478165cb98a998fb2a261955", size = 9884543, upload-time = "2026-02-17T22:18:41.476Z" },
- { url = "https://files.pythonhosted.org/packages/d7/39/327802e0b6d693182403c144edacbc27eb82907b57062f23ef5a4c4a5ea7/pandas-3.0.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8e36891080b87823aff3640c78649b91b8ff6eea3c0d70aeabd72ea43ab069b", size = 10396030, upload-time = "2026-02-17T22:18:43.822Z" },
- { url = "https://files.pythonhosted.org/packages/3d/fe/89d77e424365280b79d99b3e1e7d606f5165af2f2ecfaf0c6d24c799d607/pandas-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:532527a701281b9dd371e2f582ed9094f4c12dd9ffb82c0c54ee28d8ac9520c4", size = 10876435, upload-time = "2026-02-17T22:18:45.954Z" },
- { url = "https://files.pythonhosted.org/packages/b5/a6/2a75320849dd154a793f69c951db759aedb8d1dd3939eeacda9bdcfa1629/pandas-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:356e5c055ed9b0da1580d465657bc7d00635af4fd47f30afb23025352ba764d1", size = 11405133, upload-time = "2026-02-17T22:18:48.533Z" },
- { url = "https://files.pythonhosted.org/packages/58/53/1d68fafb2e02d7881df66aa53be4cd748d25cbe311f3b3c85c93ea5d30ca/pandas-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d810036895f9ad6345b8f2a338dd6998a74e8483847403582cab67745bff821", size = 11932065, upload-time = "2026-02-17T22:18:50.837Z" },
- { url = "https://files.pythonhosted.org/packages/75/08/67cc404b3a966b6df27b38370ddd96b3b023030b572283d035181854aac5/pandas-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:536232a5fe26dd989bd633e7a0c450705fdc86a207fec7254a55e9a22950fe43", size = 9741627, upload-time = "2026-02-17T22:18:53.905Z" },
- { url = "https://files.pythonhosted.org/packages/86/4f/caf9952948fb00d23795f09b893d11f1cacb384e666854d87249530f7cbe/pandas-3.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f463ebfd8de7f326d38037c7363c6dacb857c5881ab8961fb387804d6daf2f7", size = 9052483, upload-time = "2026-02-17T22:18:57.31Z" },
- { url = "https://files.pythonhosted.org/packages/0b/48/aad6ec4f8d007534c091e9a7172b3ec1b1ee6d99a9cbb936b5eab6c6cf58/pandas-3.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5272627187b5d9c20e55d27caf5f2cd23e286aba25cadf73c8590e432e2b7262", size = 10317509, upload-time = "2026-02-17T22:18:59.498Z" },
- { url = "https://files.pythonhosted.org/packages/a8/14/5990826f779f79148ae9d3a2c39593dc04d61d5d90541e71b5749f35af95/pandas-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:661e0f665932af88c7877f31da0dc743fe9c8f2524bdffe23d24fdcb67ef9d56", size = 9860561, upload-time = "2026-02-17T22:19:02.265Z" },
- { url = "https://files.pythonhosted.org/packages/fa/80/f01ff54664b6d70fed71475543d108a9b7c888e923ad210795bef04ffb7d/pandas-3.0.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:75e6e292ff898679e47a2199172593d9f6107fd2dd3617c22c2946e97d5df46e", size = 10365506, upload-time = "2026-02-17T22:19:05.017Z" },
- { url = "https://files.pythonhosted.org/packages/f2/85/ab6d04733a7d6ff32bfc8382bf1b07078228f5d6ebec5266b91bfc5c4ff7/pandas-3.0.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1ff8cf1d2896e34343197685f432450ec99a85ba8d90cce2030c5eee2ef98791", size = 10873196, upload-time = "2026-02-17T22:19:07.204Z" },
- { url = "https://files.pythonhosted.org/packages/48/a9/9301c83d0b47c23ac5deab91c6b39fd98d5b5db4d93b25df8d381451828f/pandas-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eca8b4510f6763f3d37359c2105df03a7a221a508f30e396a51d0713d462e68a", size = 11370859, upload-time = "2026-02-17T22:19:09.436Z" },
- { url = "https://files.pythonhosted.org/packages/59/fe/0c1fc5bd2d29c7db2ab372330063ad555fb83e08422829c785f5ec2176ca/pandas-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:06aff2ad6f0b94a17822cf8b83bbb563b090ed82ff4fe7712db2ce57cd50d9b8", size = 11924584, upload-time = "2026-02-17T22:19:11.562Z" },
- { url = "https://files.pythonhosted.org/packages/d6/7d/216a1588b65a7aa5f4535570418a599d943c85afb1d95b0876fc00aa1468/pandas-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fea306c783e28884c29057a1d9baa11a349bbf99538ec1da44c8476563d1b25", size = 9742769, upload-time = "2026-02-17T22:19:13.926Z" },
- { url = "https://files.pythonhosted.org/packages/c4/cb/810a22a6af9a4e97c8ab1c946b47f3489c5bca5adc483ce0ffc84c9cc768/pandas-3.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:a8d37a43c52917427e897cb2e429f67a449327394396a81034a4449b99afda59", size = 9043855, upload-time = "2026-02-17T22:19:16.09Z" },
- { url = "https://files.pythonhosted.org/packages/92/fa/423c89086cca1f039cf1253c3ff5b90f157b5b3757314aa635f6bf3e30aa/pandas-3.0.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d54855f04f8246ed7b6fc96b05d4871591143c46c0b6f4af874764ed0d2d6f06", size = 10752673, upload-time = "2026-02-17T22:19:18.304Z" },
- { url = "https://files.pythonhosted.org/packages/22/23/b5a08ec1f40020397f0faba72f1e2c11f7596a6169c7b3e800abff0e433f/pandas-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e1b677accee34a09e0dc2ce5624e4a58a1870ffe56fc021e9caf7f23cd7668f", size = 10404967, upload-time = "2026-02-17T22:19:20.726Z" },
- { url = "https://files.pythonhosted.org/packages/5c/81/94841f1bb4afdc2b52a99daa895ac2c61600bb72e26525ecc9543d453ebc/pandas-3.0.1-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a9cabbdcd03f1b6cd254d6dda8ae09b0252524be1592594c00b7895916cb1324", size = 10320575, upload-time = "2026-02-17T22:19:24.919Z" },
- { url = "https://files.pythonhosted.org/packages/0a/8b/2ae37d66a5342a83adadfd0cb0b4bf9c3c7925424dd5f40d15d6cfaa35ee/pandas-3.0.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ae2ab1f166668b41e770650101e7090824fd34d17915dd9cd479f5c5e0065e9", size = 10710921, upload-time = "2026-02-17T22:19:27.181Z" },
- { url = "https://files.pythonhosted.org/packages/a2/61/772b2e2757855e232b7ccf7cb8079a5711becb3a97f291c953def15a833f/pandas-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6bf0603c2e30e2cafac32807b06435f28741135cb8697eae8b28c7d492fc7d76", size = 11334191, upload-time = "2026-02-17T22:19:29.411Z" },
- { url = "https://files.pythonhosted.org/packages/1b/08/b16c6df3ef555d8495d1d265a7963b65be166785d28f06a350913a4fac78/pandas-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c426422973973cae1f4a23e51d4ae85974f44871b24844e4f7de752dd877098", size = 11782256, upload-time = "2026-02-17T22:19:32.34Z" },
- { url = "https://files.pythonhosted.org/packages/55/80/178af0594890dee17e239fca96d3d8670ba0f5ff59b7d0439850924a9c09/pandas-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b03f91ae8c10a85c1613102c7bef5229b5379f343030a3ccefeca8a33414cf35", size = 10485047, upload-time = "2026-02-17T22:19:34.605Z" },
- { url = "https://files.pythonhosted.org/packages/bb/8b/4bb774a998b97e6c2fd62a9e6cfdaae133b636fd1c468f92afb4ae9a447a/pandas-3.0.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:99d0f92ed92d3083d140bf6b97774f9f13863924cf3f52a70711f4e7588f9d0a", size = 10322465, upload-time = "2026-02-17T22:19:36.803Z" },
- { url = "https://files.pythonhosted.org/packages/72/3a/5b39b51c64159f470f1ca3b1c2a87da290657ca022f7cd11442606f607d1/pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3b66857e983208654294bb6477b8a63dee26b37bdd0eb34d010556e91261784f", size = 9910632, upload-time = "2026-02-17T22:19:39.001Z" },
- { url = "https://files.pythonhosted.org/packages/4e/f7/b449ffb3f68c11da12fc06fbf6d2fa3a41c41e17d0284d23a79e1c13a7e4/pandas-3.0.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56cf59638bf24dc9bdf2154c81e248b3289f9a09a6d04e63608c159022352749", size = 10440535, upload-time = "2026-02-17T22:19:41.157Z" },
- { url = "https://files.pythonhosted.org/packages/55/77/6ea82043db22cb0f2bbfe7198da3544000ddaadb12d26be36e19b03a2dc5/pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1a9f55e0f46951874b863d1f3906dcb57df2d9be5c5847ba4dfb55b2c815249", size = 10893940, upload-time = "2026-02-17T22:19:43.493Z" },
- { url = "https://files.pythonhosted.org/packages/03/30/f1b502a72468c89412c1b882a08f6eed8a4ee9dc033f35f65d0663df6081/pandas-3.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1849f0bba9c8a2fb0f691d492b834cc8dadf617e29015c66e989448d58d011ee", size = 11442711, upload-time = "2026-02-17T22:19:46.074Z" },
- { url = "https://files.pythonhosted.org/packages/0d/f0/ebb6ddd8fc049e98cabac5c2924d14d1dda26a20adb70d41ea2e428d3ec4/pandas-3.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3d288439e11b5325b02ae6e9cc83e6805a62c40c5a6220bea9beb899c073b1c", size = 11963918, upload-time = "2026-02-17T22:19:48.838Z" },
- { url = "https://files.pythonhosted.org/packages/09/f8/8ce132104074f977f907442790eaae24e27bce3b3b454e82faa3237ff098/pandas-3.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:93325b0fe372d192965f4cca88d97667f49557398bbf94abdda3bf1b591dbe66", size = 9862099, upload-time = "2026-02-17T22:19:51.081Z" },
- { url = "https://files.pythonhosted.org/packages/e6/b7/6af9aac41ef2456b768ef0ae60acf8abcebb450a52043d030a65b4b7c9bd/pandas-3.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:97ca08674e3287c7148f4858b01136f8bdfe7202ad25ad04fec602dd1d29d132", size = 9185333, upload-time = "2026-02-17T22:19:53.266Z" },
- { url = "https://files.pythonhosted.org/packages/66/fc/848bb6710bc6061cb0c5badd65b92ff75c81302e0e31e496d00029fe4953/pandas-3.0.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:58eeb1b2e0fb322befcf2bbc9ba0af41e616abadb3d3414a6bc7167f6cbfce32", size = 10772664, upload-time = "2026-02-17T22:19:55.806Z" },
- { url = "https://files.pythonhosted.org/packages/69/5c/866a9bbd0f79263b4b0db6ec1a341be13a1473323f05c122388e0f15b21d/pandas-3.0.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cd9af1276b5ca9e298bd79a26bda32fa9cc87ed095b2a9a60978d2ca058eaf87", size = 10421286, upload-time = "2026-02-17T22:19:58.091Z" },
- { url = "https://files.pythonhosted.org/packages/51/a4/2058fb84fb1cfbfb2d4a6d485e1940bb4ad5716e539d779852494479c580/pandas-3.0.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f87a04984d6b63788327cd9f79dda62b7f9043909d2440ceccf709249ca988", size = 10342050, upload-time = "2026-02-17T22:20:01.376Z" },
- { url = "https://files.pythonhosted.org/packages/22/1b/674e89996cc4be74db3c4eb09240c4bb549865c9c3f5d9b086ff8fcfbf00/pandas-3.0.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85fe4c4df62e1e20f9db6ebfb88c844b092c22cd5324bdcf94bfa2fc1b391221", size = 10740055, upload-time = "2026-02-17T22:20:04.328Z" },
- { url = "https://files.pythonhosted.org/packages/d0/f8/e954b750764298c22fa4614376531fe63c521ef517e7059a51f062b87dca/pandas-3.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:331ca75a2f8672c365ae25c0b29e46f5ac0c6551fdace8eec4cd65e4fac271ff", size = 11357632, upload-time = "2026-02-17T22:20:06.647Z" },
- { url = "https://files.pythonhosted.org/packages/6d/02/c6e04b694ffd68568297abd03588b6d30295265176a5c01b7459d3bc35a3/pandas-3.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:15860b1fdb1973fffade772fdb931ccf9b2f400a3f5665aef94a00445d7d8dd5", size = 11810974, upload-time = "2026-02-17T22:20:08.946Z" },
- { url = "https://files.pythonhosted.org/packages/89/41/d7dfb63d2407f12055215070c42fc6ac41b66e90a2946cdc5e759058398b/pandas-3.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:44f1364411d5670efa692b146c748f4ed013df91ee91e9bec5677fb1fd58b937", size = 10884622, upload-time = "2026-02-17T22:20:11.711Z" },
- { url = "https://files.pythonhosted.org/packages/68/b0/34937815889fa982613775e4b97fddd13250f11012d769949c5465af2150/pandas-3.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:108dd1790337a494aa80e38def654ca3f0968cf4f362c85f44c15e471667102d", size = 9452085, upload-time = "2026-02-17T22:20:14.331Z" },
-]
-
[[package]]
name = "pathspec"
version = "1.0.3"
@@ -923,15 +872,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/32/2b/121e912bd60eebd623f873fd090de0e84f322972ab25a7f9044c056804ed/pathspec-1.0.3-py3-none-any.whl", hash = "sha256:e80767021c1cc524aa3fb14bedda9c34406591343cc42797b386ce7b9354fb6c", size = 55021, upload-time = "2026-01-09T15:46:44.652Z" },
]
-[[package]]
-name = "platformdirs"
-version = "4.9.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/19/56/8d4c30c8a1d07013911a8fdbd8f89440ef9f08d07a1b50ab8ca8be5a20f9/platformdirs-4.9.4.tar.gz", hash = "sha256:1ec356301b7dc906d83f371c8f487070e99d3ccf9e501686456394622a01a934", size = 28737, upload-time = "2026-03-05T18:34:13.271Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/63/d7/97f7e3a6abb67d8080dd406fd4df842c2be0efaf712d1c899c32a075027c/platformdirs-4.9.4-py3-none-any.whl", hash = "sha256:68a9a4619a666ea6439f2ff250c12a853cd1cbd5158d258bd824a7df6be2f868", size = 21216, upload-time = "2026-03-05T18:34:12.172Z" },
-]
-
[[package]]
name = "pluggy"
version = "1.6.0"
@@ -941,40 +881,42 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
]
-[[package]]
-name = "pre-commit"
-version = "4.5.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "cfgv" },
- { name = "identify" },
- { name = "nodeenv" },
- { name = "pyyaml" },
- { name = "virtualenv" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/40/f1/6d86a29246dfd2e9b6237f0b5823717f60cad94d47ddc26afa916d21f525/pre_commit-4.5.1.tar.gz", hash = "sha256:eb545fcff725875197837263e977ea257a402056661f09dae08e4b149b030a61", size = 198232, upload-time = "2025-12-16T21:14:33.552Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/5d/19/fd3ef348460c80af7bb4669ea7926651d1f95c23ff2df18b9d24bab4f3fa/pre_commit-4.5.1-py2.py3-none-any.whl", hash = "sha256:3b3afd891e97337708c1674210f8eba659b52a38ea5f822ff142d10786221f77", size = 226437, upload-time = "2025-12-16T21:14:32.409Z" },
-]
-
-[[package]]
-name = "prompt-toolkit"
-version = "3.0.52"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "wcwidth" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" },
-]
-
[[package]]
name = "propcache"
version = "0.4.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/9e/da/e9fc233cf63743258bff22b3dfa7ea5baef7b5bc324af47a0ad89b8ffc6f/propcache-0.4.1.tar.gz", hash = "sha256:f48107a8c637e80362555f37ecf49abe20370e557cc4ab374f04ec4423c97c3d", size = 46442, upload-time = "2025-10-08T19:49:02.291Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/3c/0e/934b541323035566a9af292dba85a195f7b78179114f2c6ebb24551118a9/propcache-0.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c2d1fa3201efaf55d730400d945b5b3ab6e672e100ba0f9a409d950ab25d7db", size = 79534, upload-time = "2025-10-08T19:46:02.083Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/6b/db0d03d96726d995dc7171286c6ba9d8d14251f37433890f88368951a44e/propcache-0.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1eb2994229cc8ce7fe9b3db88f5465f5fd8651672840b2e426b88cdb1a30aac8", size = 45526, upload-time = "2025-10-08T19:46:03.884Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/c3/82728404aea669e1600f304f2609cde9e665c18df5a11cdd57ed73c1dceb/propcache-0.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:66c1f011f45a3b33d7bcb22daed4b29c0c9e2224758b6be00686731e1b46f925", size = 47263, upload-time = "2025-10-08T19:46:05.405Z" },
+ { url = "https://files.pythonhosted.org/packages/df/1b/39313ddad2bf9187a1432654c38249bab4562ef535ef07f5eb6eb04d0b1b/propcache-0.4.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9a52009f2adffe195d0b605c25ec929d26b36ef986ba85244891dee3b294df21", size = 201012, upload-time = "2025-10-08T19:46:07.165Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/01/f1d0b57d136f294a142acf97f4ed58c8e5b974c21e543000968357115011/propcache-0.4.1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5d4e2366a9c7b837555cf02fb9be2e3167d333aff716332ef1b7c3a142ec40c5", size = 209491, upload-time = "2025-10-08T19:46:08.909Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/c8/038d909c61c5bb039070b3fb02ad5cccdb1dde0d714792e251cdb17c9c05/propcache-0.4.1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:9d2b6caef873b4f09e26ea7e33d65f42b944837563a47a94719cc3544319a0db", size = 215319, upload-time = "2025-10-08T19:46:10.7Z" },
+ { url = "https://files.pythonhosted.org/packages/08/57/8c87e93142b2c1fa2408e45695205a7ba05fb5db458c0bf5c06ba0e09ea6/propcache-0.4.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b16ec437a8c8a965ecf95739448dd938b5c7f56e67ea009f4300d8df05f32b7", size = 196856, upload-time = "2025-10-08T19:46:12.003Z" },
+ { url = "https://files.pythonhosted.org/packages/42/df/5615fec76aa561987a534759b3686008a288e73107faa49a8ae5795a9f7a/propcache-0.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:296f4c8ed03ca7476813fe666c9ea97869a8d7aec972618671b33a38a5182ef4", size = 193241, upload-time = "2025-10-08T19:46:13.495Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/21/62949eb3a7a54afe8327011c90aca7e03547787a88fb8bd9726806482fea/propcache-0.4.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:1f0978529a418ebd1f49dad413a2b68af33f85d5c5ca5c6ca2a3bed375a7ac60", size = 190552, upload-time = "2025-10-08T19:46:14.938Z" },
+ { url = "https://files.pythonhosted.org/packages/30/ee/ab4d727dd70806e5b4de96a798ae7ac6e4d42516f030ee60522474b6b332/propcache-0.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fd138803047fb4c062b1c1dd95462f5209456bfab55c734458f15d11da288f8f", size = 200113, upload-time = "2025-10-08T19:46:16.695Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/0b/38b46208e6711b016aa8966a3ac793eee0d05c7159d8342aa27fc0bc365e/propcache-0.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8c9b3cbe4584636d72ff556d9036e0c9317fa27b3ac1f0f558e7e84d1c9c5900", size = 200778, upload-time = "2025-10-08T19:46:18.023Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/81/5abec54355ed344476bee711e9f04815d4b00a311ab0535599204eecc257/propcache-0.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f93243fdc5657247533273ac4f86ae106cc6445a0efacb9a1bfe982fcfefd90c", size = 193047, upload-time = "2025-10-08T19:46:19.449Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/b6/1f237c04e32063cb034acd5f6ef34ef3a394f75502e72703545631ab1ef6/propcache-0.4.1-cp310-cp310-win32.whl", hash = "sha256:a0ee98db9c5f80785b266eb805016e36058ac72c51a064040f2bc43b61101cdb", size = 38093, upload-time = "2025-10-08T19:46:20.643Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/67/354aac4e0603a15f76439caf0427781bcd6797f370377f75a642133bc954/propcache-0.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:1cdb7988c4e5ac7f6d175a28a9aa0c94cb6f2ebe52756a3c0cda98d2809a9e37", size = 41638, upload-time = "2025-10-08T19:46:21.935Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/e1/74e55b9fd1a4c209ff1a9a824bf6c8b3d1fc5a1ac3eabe23462637466785/propcache-0.4.1-cp310-cp310-win_arm64.whl", hash = "sha256:d82ad62b19645419fe79dd63b3f9253e15b30e955c0170e5cebc350c1844e581", size = 38229, upload-time = "2025-10-08T19:46:23.368Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/d4/4e2c9aaf7ac2242b9358f98dccd8f90f2605402f5afeff6c578682c2c491/propcache-0.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:60a8fda9644b7dfd5dece8c61d8a85e271cb958075bfc4e01083c148b61a7caf", size = 80208, upload-time = "2025-10-08T19:46:24.597Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/21/d7b68e911f9c8e18e4ae43bdbc1e1e9bbd971f8866eb81608947b6f585ff/propcache-0.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c30b53e7e6bda1d547cabb47c825f3843a0a1a42b0496087bb58d8fedf9f41b5", size = 45777, upload-time = "2025-10-08T19:46:25.733Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/1d/11605e99ac8ea9435651ee71ab4cb4bf03f0949586246476a25aadfec54a/propcache-0.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6918ecbd897443087a3b7cd978d56546a812517dcaaca51b49526720571fa93e", size = 47647, upload-time = "2025-10-08T19:46:27.304Z" },
+ { url = "https://files.pythonhosted.org/packages/58/1a/3c62c127a8466c9c843bccb503d40a273e5cc69838805f322e2826509e0d/propcache-0.4.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3d902a36df4e5989763425a8ab9e98cd8ad5c52c823b34ee7ef307fd50582566", size = 214929, upload-time = "2025-10-08T19:46:28.62Z" },
+ { url = "https://files.pythonhosted.org/packages/56/b9/8fa98f850960b367c4b8fe0592e7fc341daa7a9462e925228f10a60cf74f/propcache-0.4.1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a9695397f85973bb40427dedddf70d8dc4a44b22f1650dd4af9eedf443d45165", size = 221778, upload-time = "2025-10-08T19:46:30.358Z" },
+ { url = "https://files.pythonhosted.org/packages/46/a6/0ab4f660eb59649d14b3d3d65c439421cf2f87fe5dd68591cbe3c1e78a89/propcache-0.4.1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2bb07ffd7eaad486576430c89f9b215f9e4be68c4866a96e97db9e97fead85dc", size = 228144, upload-time = "2025-10-08T19:46:32.607Z" },
+ { url = "https://files.pythonhosted.org/packages/52/6a/57f43e054fb3d3a56ac9fc532bc684fc6169a26c75c353e65425b3e56eef/propcache-0.4.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd6f30fdcf9ae2a70abd34da54f18da086160e4d7d9251f81f3da0ff84fc5a48", size = 210030, upload-time = "2025-10-08T19:46:33.969Z" },
+ { url = "https://files.pythonhosted.org/packages/40/e2/27e6feebb5f6b8408fa29f5efbb765cd54c153ac77314d27e457a3e993b7/propcache-0.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fc38cba02d1acba4e2869eef1a57a43dfbd3d49a59bf90dda7444ec2be6a5570", size = 208252, upload-time = "2025-10-08T19:46:35.309Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/f8/91c27b22ccda1dbc7967f921c42825564fa5336a01ecd72eb78a9f4f53c2/propcache-0.4.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:67fad6162281e80e882fb3ec355398cf72864a54069d060321f6cd0ade95fe85", size = 202064, upload-time = "2025-10-08T19:46:36.993Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/26/7f00bd6bd1adba5aafe5f4a66390f243acab58eab24ff1a08bebb2ef9d40/propcache-0.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f10207adf04d08bec185bae14d9606a1444715bc99180f9331c9c02093e1959e", size = 212429, upload-time = "2025-10-08T19:46:38.398Z" },
+ { url = "https://files.pythonhosted.org/packages/84/89/fd108ba7815c1117ddca79c228f3f8a15fc82a73bca8b142eb5de13b2785/propcache-0.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e9b0d8d0845bbc4cfcdcbcdbf5086886bc8157aa963c31c777ceff7846c77757", size = 216727, upload-time = "2025-10-08T19:46:39.732Z" },
+ { url = "https://files.pythonhosted.org/packages/79/37/3ec3f7e3173e73f1d600495d8b545b53802cbf35506e5732dd8578db3724/propcache-0.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:981333cb2f4c1896a12f4ab92a9cc8f09ea664e9b7dbdc4eff74627af3a11c0f", size = 205097, upload-time = "2025-10-08T19:46:41.025Z" },
+ { url = "https://files.pythonhosted.org/packages/61/b0/b2631c19793f869d35f47d5a3a56fb19e9160d3c119f15ac7344fc3ccae7/propcache-0.4.1-cp311-cp311-win32.whl", hash = "sha256:f1d2f90aeec838a52f1c1a32fe9a619fefd5e411721a9117fbf82aea638fe8a1", size = 38084, upload-time = "2025-10-08T19:46:42.693Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/78/6cce448e2098e9f3bfc91bb877f06aa24b6ccace872e39c53b2f707c4648/propcache-0.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:364426a62660f3f699949ac8c621aad6977be7126c5807ce48c0aeb8e7333ea6", size = 41637, upload-time = "2025-10-08T19:46:43.778Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/e9/754f180cccd7f51a39913782c74717c581b9cc8177ad0e949f4d51812383/propcache-0.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:e53f3a38d3510c11953f3e6a33f205c6d1b001129f972805ca9b42fc308bc239", size = 38064, upload-time = "2025-10-08T19:46:44.872Z" },
{ url = "https://files.pythonhosted.org/packages/a2/0f/f17b1b2b221d5ca28b4b876e8bb046ac40466513960646bda8e1853cdfa2/propcache-0.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e153e9cd40cc8945138822807139367f256f89c6810c2634a4f6902b52d3b4e2", size = 80061, upload-time = "2025-10-08T19:46:46.075Z" },
{ url = "https://files.pythonhosted.org/packages/76/47/8ccf75935f51448ba9a16a71b783eb7ef6b9ee60f5d14c7f8a8a79fbeed7/propcache-0.4.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cd547953428f7abb73c5ad82cbb32109566204260d98e41e5dfdc682eb7f8403", size = 46037, upload-time = "2025-10-08T19:46:47.23Z" },
{ url = "https://files.pythonhosted.org/packages/0a/b6/5c9a0e42df4d00bfb4a3cbbe5cf9f54260300c88a0e9af1f47ca5ce17ac0/propcache-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f048da1b4f243fc44f205dfd320933a951b8d89e0afd4c7cacc762a8b9165207", size = 47324, upload-time = "2025-10-08T19:46:48.384Z" },
@@ -1050,38 +992,47 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/08/02/87b25304249a35c0915d236575bc3574a323f60b47939a2262b77632a3ee/propcache-0.4.1-cp314-cp314t-win32.whl", hash = "sha256:05674a162469f31358c30bcaa8883cb7829fa3110bf9c0991fe27d7896c42d85", size = 42546, upload-time = "2025-10-08T19:48:32.872Z" },
{ url = "https://files.pythonhosted.org/packages/cb/ef/3c6ecf8b317aa982f309835e8f96987466123c6e596646d4e6a1dfcd080f/propcache-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:990f6b3e2a27d683cb7602ed6c86f15ee6b43b1194736f9baaeb93d0016633b1", size = 46259, upload-time = "2025-10-08T19:48:34.226Z" },
{ url = "https://files.pythonhosted.org/packages/c4/2d/346e946d4951f37eca1e4f55be0f0174c52cd70720f84029b02f296f4a38/propcache-0.4.1-cp314-cp314t-win_arm64.whl", hash = "sha256:ecef2343af4cc68e05131e45024ba34f6095821988a9d0a02aa7c73fcc448aa9", size = 40428, upload-time = "2025-10-08T19:48:35.441Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/01/0ebaec9003f5d619a7475165961f8e3083cf8644d704b60395df3601632d/propcache-0.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3d233076ccf9e450c8b3bc6720af226b898ef5d051a2d145f7d765e6e9f9bcff", size = 80277, upload-time = "2025-10-08T19:48:36.647Z" },
+ { url = "https://files.pythonhosted.org/packages/34/58/04af97ac586b4ef6b9026c3fd36ee7798b737a832f5d3440a4280dcebd3a/propcache-0.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:357f5bb5c377a82e105e44bd3d52ba22b616f7b9773714bff93573988ef0a5fb", size = 45865, upload-time = "2025-10-08T19:48:37.859Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/19/b65d98ae21384518b291d9939e24a8aeac4fdb5101b732576f8f7540e834/propcache-0.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbc3b6dfc728105b2a57c06791eb07a94229202ea75c59db644d7d496b698cac", size = 47636, upload-time = "2025-10-08T19:48:39.038Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/0f/317048c6d91c356c7154dca5af019e6effeb7ee15fa6a6db327cc19e12b4/propcache-0.4.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:182b51b421f0501952d938dc0b0eb45246a5b5153c50d42b495ad5fb7517c888", size = 201126, upload-time = "2025-10-08T19:48:40.774Z" },
+ { url = "https://files.pythonhosted.org/packages/71/69/0b2a7a5a6ee83292b4b997dbd80549d8ce7d40b6397c1646c0d9495f5a85/propcache-0.4.1-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4b536b39c5199b96fc6245eb5fb796c497381d3942f169e44e8e392b29c9ebcc", size = 209837, upload-time = "2025-10-08T19:48:42.167Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/92/c699ac495a6698df6e497fc2de27af4b6ace10d8e76528357ce153722e45/propcache-0.4.1-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:db65d2af507bbfbdcedb254a11149f894169d90488dd3e7190f7cdcb2d6cd57a", size = 215578, upload-time = "2025-10-08T19:48:43.56Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/ee/14de81c5eb02c0ee4f500b4e39c4e1bd0677c06e72379e6ab18923c773fc/propcache-0.4.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd2dbc472da1f772a4dae4fa24be938a6c544671a912e30529984dd80400cd88", size = 197187, upload-time = "2025-10-08T19:48:45.309Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/94/48dce9aaa6d8dd5a0859bad75158ec522546d4ac23f8e2f05fac469477dd/propcache-0.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:daede9cd44e0f8bdd9e6cc9a607fc81feb80fae7a5fc6cecaff0e0bb32e42d00", size = 193478, upload-time = "2025-10-08T19:48:47.743Z" },
+ { url = "https://files.pythonhosted.org/packages/60/b5/0516b563e801e1ace212afde869a0596a0d7115eec0b12d296d75633fb29/propcache-0.4.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:71b749281b816793678ae7f3d0d84bd36e694953822eaad408d682efc5ca18e0", size = 190650, upload-time = "2025-10-08T19:48:49.373Z" },
+ { url = "https://files.pythonhosted.org/packages/24/89/e0f7d4a5978cd56f8cd67735f74052f257dc471ec901694e430f0d1572fe/propcache-0.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:0002004213ee1f36cfb3f9a42b5066100c44276b9b72b4e1504cddd3d692e86e", size = 200251, upload-time = "2025-10-08T19:48:51.4Z" },
+ { url = "https://files.pythonhosted.org/packages/06/7d/a1fac863d473876ed4406c914f2e14aa82d2f10dd207c9e16fc383cc5a24/propcache-0.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:fe49d0a85038f36ba9e3ffafa1103e61170b28e95b16622e11be0a0ea07c6781", size = 200919, upload-time = "2025-10-08T19:48:53.227Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/4e/f86a256ff24944cf5743e4e6c6994e3526f6acfcfb55e21694c2424f758c/propcache-0.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:99d43339c83aaf4d32bda60928231848eee470c6bda8d02599cc4cebe872d183", size = 193211, upload-time = "2025-10-08T19:48:55.027Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/3f/3fbad5f4356b068f1b047d300a6ff2c66614d7030f078cd50be3fec04228/propcache-0.4.1-cp39-cp39-win32.whl", hash = "sha256:a129e76735bc792794d5177069691c3217898b9f5cee2b2661471e52ffe13f19", size = 38314, upload-time = "2025-10-08T19:48:56.792Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/45/d78d136c3a3d215677abb886785aae744da2c3005bcb99e58640c56529b1/propcache-0.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:948dab269721ae9a87fd16c514a0a2c2a1bdb23a9a61b969b0f9d9ee2968546f", size = 41912, upload-time = "2025-10-08T19:48:57.995Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/2a/b0632941f25139f4e58450b307242951f7c2717a5704977c6d5323a800af/propcache-0.4.1-cp39-cp39-win_arm64.whl", hash = "sha256:5fd37c406dd6dc85aa743e214cef35dc54bbdd1419baac4f6ae5e5b1a2976938", size = 38450, upload-time = "2025-10-08T19:48:59.349Z" },
{ url = "https://files.pythonhosted.org/packages/5b/5a/bc7b4a4ef808fa59a816c17b20c4bef6884daebbdf627ff2a161da67da19/propcache-0.4.1-py3-none-any.whl", hash = "sha256:af2a6052aeb6cf17d3e46ee169099044fd8224cbaf75c76a2ef596e8163e2237", size = 13305, upload-time = "2025-10-08T19:49:00.792Z" },
]
-[[package]]
-name = "pyaml"
-version = "26.2.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "pyyaml" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/38/fb/2b9590512a9d7763620d87171c7531d5295678ce96e57393614b91da8998/pyaml-26.2.1.tar.gz", hash = "sha256:489dd82997235d4cfcf76a6287fce2f075487d77a6567c271e8d790583690c68", size = 30653, upload-time = "2026-02-06T13:49:30.769Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/5d/f3/1f8651f23101e6fae41d0d504414c9722b0140bf0fc6acf87ac52e18aa41/pyaml-26.2.1-py3-none-any.whl", hash = "sha256:6261c2f0a2f33245286c794ad6ec234be33a73d2b05427079fd343e2812a87cf", size = 27211, upload-time = "2026-02-06T13:49:29.652Z" },
-]
-
[[package]]
name = "pydantic"
version = "1.10.26"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
- "python_full_version >= '3.14' and sys_platform == 'win32'",
- "python_full_version >= '3.14' and sys_platform == 'emscripten'",
- "python_full_version >= '3.14' and sys_platform != 'emscripten' and sys_platform != 'win32'",
- "python_full_version < '3.14' and sys_platform == 'win32'",
- "python_full_version < '3.14' and sys_platform == 'emscripten'",
- "python_full_version < '3.14' and sys_platform != 'emscripten' and sys_platform != 'win32'",
+ "python_full_version >= '3.10'",
+ "python_full_version < '3.10'",
]
dependencies = [
- { name = "typing-extensions", marker = "extra == 'group-18-llama-stack-client-pydantic-v1'" },
+ { name = "typing-extensions", marker = "extra == 'group-10-ogx-client-pydantic-v1'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/7b/da/fd89f987a376c807cd81ea0eff4589aade783bbb702637b4734ef2c743a2/pydantic-1.10.26.tar.gz", hash = "sha256:8c6aa39b494c5af092e690127c283d84f363ac36017106a9e66cb33a22ac412e", size = 357906, upload-time = "2025-12-18T15:47:46.557Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/71/08/2587a6d4314e7539eec84acd062cb7b037638edb57a0335d20e4c5b8878c/pydantic-1.10.26-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f7ae36fa0ecef8d39884120f212e16c06bb096a38f523421278e2f39c1784546", size = 2444588, upload-time = "2025-12-18T15:46:28.882Z" },
+ { url = "https://files.pythonhosted.org/packages/47/e6/10df5f08c105bcbb4adbee7d1108ff4b347702b110fed058f6a03f1c6b73/pydantic-1.10.26-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d95a76cf503f0f72ed7812a91de948440b2bf564269975738a4751e4fadeb572", size = 2255972, upload-time = "2025-12-18T15:46:31.72Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/7d/fdb961e7adc2c31f394feba6f560ef2c74c446f0285e2c2eb87d2b7206c7/pydantic-1.10.26-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a943ce8e00ad708ed06a1d9df5b4fd28f5635a003b82a4908ece6f24c0b18464", size = 2857175, upload-time = "2025-12-18T15:46:34Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/6c/f21e27dda475d4c562bd01b5874284dd3180f336c1e669413b743ca8b278/pydantic-1.10.26-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:465ad8edb29b15c10b779b16431fe8e77c380098badf6db367b7a1d3e572cf53", size = 2947001, upload-time = "2025-12-18T15:46:35.922Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/f6/27ea206232cbb6ec24dc4e4e8888a9a734f96a1eaf13504be4b30ef26aa7/pydantic-1.10.26-cp310-cp310-win_amd64.whl", hash = "sha256:80e6be6272839c8a7641d26ad569ab77772809dd78f91d0068dc0fc97f071945", size = 2066217, upload-time = "2025-12-18T15:46:37.614Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/c1/d521e64c8130e1ad9d22c270bed3fabcc0940c9539b076b639c88fd32a8d/pydantic-1.10.26-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:116233e53889bcc536f617e38c1b8337d7fa9c280f0fd7a4045947515a785637", size = 2428347, upload-time = "2025-12-18T15:46:39.41Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/08/f4b804a00c16e3ea994cb640a7c25c579b4f1fa674cde6a19fa0dfb0ae4f/pydantic-1.10.26-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c3cfdd361addb6eb64ccd26ac356ad6514cee06a61ab26b27e16b5ed53108f77", size = 2212605, upload-time = "2025-12-18T15:46:41.006Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/78/0df4b9efef29bbc5e39f247fcba99060d15946b4463d82a5589cf7923d71/pydantic-1.10.26-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0e4451951a9a93bf9a90576f3e25240b47ee49ab5236adccb8eff6ac943adf0f", size = 2753560, upload-time = "2025-12-18T15:46:43.215Z" },
+ { url = "https://files.pythonhosted.org/packages/68/66/6ab6c1d3a116d05d2508fce64f96e35242938fac07544d611e11d0d363a0/pydantic-1.10.26-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9858ed44c6bea5f29ffe95308db9e62060791c877766c67dd5f55d072c8612b5", size = 2859235, upload-time = "2025-12-18T15:46:45.112Z" },
+ { url = "https://files.pythonhosted.org/packages/61/4e/f1676bb0fcdf6ed2ce4670d7d1fc1d6c3a06d84497644acfbe02649503f1/pydantic-1.10.26-cp311-cp311-win_amd64.whl", hash = "sha256:ac1089f723e2106ebde434377d31239e00870a7563245072968e5af5cc4d33df", size = 2066646, upload-time = "2025-12-18T15:46:46.816Z" },
{ url = "https://files.pythonhosted.org/packages/02/6c/cd97a5a776c4515e6ee2ae81c2f2c5be51376dda6c31f965d7746ce0019f/pydantic-1.10.26-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:468d5b9cacfcaadc76ed0a4645354ab6f263ec01a63fb6d05630ea1df6ae453f", size = 2433795, upload-time = "2025-12-18T15:46:49.321Z" },
{ url = "https://files.pythonhosted.org/packages/47/12/de20affa30dcef728fcf9cc98e13ff4438c7a630de8d2f90eb38eba0891c/pydantic-1.10.26-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2c1b0b914be31671000ca25cf7ea17fcaaa68cfeadf6924529c5c5aa24b7ab1f", size = 2227387, upload-time = "2025-12-18T15:46:50.877Z" },
{ url = "https://files.pythonhosted.org/packages/7b/1d/9d65dcc5b8c17ba590f1f9f486e9306346831902318b7ee93f63516f4003/pydantic-1.10.26-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15b13b9f8ba8867095769e1156e0d7fbafa1f65b898dd40fd1c02e34430973cb", size = 2629594, upload-time = "2025-12-18T15:46:53.42Z" },
@@ -1097,6 +1048,11 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/78/c1/3a4d069593283ca4dd0006039ba33644e21e432cddc09da706ac50441610/pydantic-1.10.26-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc2e3fe7bc4993626ef6b6fa855defafa1d6f8996aa1caef2deb83c5ac4d043a", size = 2620047, upload-time = "2025-12-18T15:47:17.089Z" },
{ url = "https://files.pythonhosted.org/packages/e0/0e/340c3d29197d99c15ab04093d43bb9c9d0fd17c2a34b80cb9d36ed732b09/pydantic-1.10.26-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:36d9e46b588aaeb1dcd2409fa4c467fe0b331f3cc9f227b03a7a00643704e962", size = 2747625, upload-time = "2025-12-18T15:47:19.21Z" },
{ url = "https://files.pythonhosted.org/packages/1e/58/f12ab3727339b172c830b32151919456b67787cdfe8808b2568b322fb15c/pydantic-1.10.26-cp314-cp314-win_amd64.whl", hash = "sha256:81ce3c8616d12a7be31b4aadfd3434f78f6b44b75adbfaec2fe1ad4f7f999b8c", size = 1976436, upload-time = "2025-12-18T15:47:21.384Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/8a/3a5a6267d5f03617b5c0f1985aa9fdfbafd33a50ef6dadd866a15ed4d123/pydantic-1.10.26-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:502b9d30d18a2dfaf81b7302f6ba0e5853474b1c96212449eb4db912cb604b7d", size = 2457039, upload-time = "2025-12-18T15:47:34.584Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/fa/343ac0db26918a033ac6256c036d72c3b6eb1196b7de622e2e8a94b19079/pydantic-1.10.26-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0d8f6087bf697dec3bf7ffcd7fe8362674f16519f3151789f33cbe8f1d19fc15", size = 2266441, upload-time = "2025-12-18T15:47:36.807Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/36/1ab48136578608dba2f2a62e452f3db2083b474d4e49be5749c6ae0c123c/pydantic-1.10.26-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dd40a99c358419910c85e6f5d22f9c56684c25b5e7abc40879b3b4a52f34ae90", size = 2869383, upload-time = "2025-12-18T15:47:38.883Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/25/41dbf1bffc31eb242cece8080561a4133eaeb513372dec36a84477a3fb71/pydantic-1.10.26-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ce3293b86ca9f4125df02ff0a70be91bc7946522467cbd98e7f1493f340616ba", size = 2963582, upload-time = "2025-12-18T15:47:40.854Z" },
+ { url = "https://files.pythonhosted.org/packages/61/2f/f072ae160a300c85eb9f059915101fd33dacf12d8df08c2b804acb3b95d1/pydantic-1.10.26-cp39-cp39-win_amd64.whl", hash = "sha256:1a4e3062b71ab1d5df339ba12c48f9ed5817c5de6cb92a961dd5c64bb32e7b96", size = 2075530, upload-time = "2025-12-18T15:47:43.181Z" },
{ url = "https://files.pythonhosted.org/packages/1f/98/556e82f00b98486def0b8af85da95e69d2be7e367cf2431408e108bc3095/pydantic-1.10.26-py3-none-any.whl", hash = "sha256:c43ad70dc3ce7787543d563792426a16fd7895e14be4b194b5665e36459dd917", size = 166975, upload-time = "2025-12-18T15:47:44.927Z" },
]
@@ -1105,18 +1061,17 @@ name = "pydantic"
version = "2.12.5"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
- "python_full_version >= '3.14' and sys_platform == 'win32'",
- "python_full_version >= '3.14' and sys_platform == 'emscripten'",
- "python_full_version >= '3.14' and sys_platform != 'emscripten' and sys_platform != 'win32'",
- "python_full_version < '3.14' and sys_platform == 'win32'",
- "python_full_version < '3.14' and sys_platform == 'emscripten'",
- "python_full_version < '3.14' and sys_platform != 'emscripten' and sys_platform != 'win32'",
+ "python_full_version >= '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version < '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version < '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
]
dependencies = [
- { name = "annotated-types" },
- { name = "pydantic-core" },
- { name = "typing-extensions" },
- { name = "typing-inspection" },
+ { name = "annotated-types", marker = "extra == 'group-10-ogx-client-pydantic-v2' or extra != 'group-10-ogx-client-pydantic-v1'" },
+ { name = "pydantic-core", marker = "extra == 'group-10-ogx-client-pydantic-v2' or extra != 'group-10-ogx-client-pydantic-v1'" },
+ { name = "typing-extensions", marker = "extra == 'group-10-ogx-client-pydantic-v2' or extra != 'group-10-ogx-client-pydantic-v1'" },
+ { name = "typing-inspection", marker = "extra == 'group-10-ogx-client-pydantic-v2' or extra != 'group-10-ogx-client-pydantic-v1'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" }
wheels = [
@@ -1128,10 +1083,37 @@ name = "pydantic-core"
version = "2.41.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "typing-extensions" },
+ { name = "typing-extensions", marker = "extra == 'group-10-ogx-client-pydantic-v2' or extra != 'group-10-ogx-client-pydantic-v1'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" },
+ { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" },
+ { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" },
+ { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" },
+ { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" },
+ { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" },
+ { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" },
+ { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" },
+ { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" },
+ { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" },
+ { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" },
{ url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" },
{ url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" },
{ url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" },
@@ -1188,10 +1170,43 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" },
{ url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" },
{ url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" },
+ { url = "https://files.pythonhosted.org/packages/54/db/160dffb57ed9a3705c4cbcbff0ac03bdae45f1ca7d58ab74645550df3fbd/pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf", size = 2107999, upload-time = "2025-11-04T13:42:03.885Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/7d/88e7de946f60d9263cc84819f32513520b85c0f8322f9b8f6e4afc938383/pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5", size = 1929745, upload-time = "2025-11-04T13:42:06.075Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/c2/aef51e5b283780e85e99ff19db0f05842d2d4a8a8cd15e63b0280029b08f/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d", size = 1920220, upload-time = "2025-11-04T13:42:08.457Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/97/492ab10f9ac8695cd76b2fdb24e9e61f394051df71594e9bcc891c9f586e/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60", size = 2067296, upload-time = "2025-11-04T13:42:10.817Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/23/984149650e5269c59a2a4c41d234a9570adc68ab29981825cfaf4cfad8f4/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82", size = 2231548, upload-time = "2025-11-04T13:42:13.843Z" },
+ { url = "https://files.pythonhosted.org/packages/71/0c/85bcbb885b9732c28bec67a222dbed5ed2d77baee1f8bba2002e8cd00c5c/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5", size = 2362571, upload-time = "2025-11-04T13:42:16.208Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/4a/412d2048be12c334003e9b823a3fa3d038e46cc2d64dd8aab50b31b65499/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3", size = 2068175, upload-time = "2025-11-04T13:42:18.911Z" },
+ { url = "https://files.pythonhosted.org/packages/73/f4/c58b6a776b502d0a5540ad02e232514285513572060f0d78f7832ca3c98b/pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425", size = 2177203, upload-time = "2025-11-04T13:42:22.578Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/ae/f06ea4c7e7a9eead3d165e7623cd2ea0cb788e277e4f935af63fc98fa4e6/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504", size = 2148191, upload-time = "2025-11-04T13:42:24.89Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/57/25a11dcdc656bf5f8b05902c3c2934ac3ea296257cc4a3f79a6319e61856/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5", size = 2343907, upload-time = "2025-11-04T13:42:27.683Z" },
+ { url = "https://files.pythonhosted.org/packages/96/82/e33d5f4933d7a03327c0c43c65d575e5919d4974ffc026bc917a5f7b9f61/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3", size = 2322174, upload-time = "2025-11-04T13:42:30.776Z" },
+ { url = "https://files.pythonhosted.org/packages/81/45/4091be67ce9f469e81656f880f3506f6a5624121ec5eb3eab37d7581897d/pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460", size = 1990353, upload-time = "2025-11-04T13:42:33.111Z" },
+ { url = "https://files.pythonhosted.org/packages/44/8a/a98aede18db6e9cd5d66bcacd8a409fcf8134204cdede2e7de35c5a2c5ef/pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b", size = 2015698, upload-time = "2025-11-04T13:42:35.484Z" },
+ { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" },
+ { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" },
{ url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" },
{ url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" },
{ url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" },
{ url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" },
+ { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" },
+ { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" },
+ { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" },
+ { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" },
+ { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" },
+ { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" },
]
[[package]]
@@ -1216,29 +1231,82 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/2f/b5/380380c9e7a534cb1783c70c3e8ac6d1193c599650a55838d0557586796e/pyright-1.1.399-py3-none-any.whl", hash = "sha256:55f9a875ddf23c9698f24208c764465ffdfd38be6265f7faf9a176e1dc549f3b", size = 5592584, upload-time = "2025-04-10T04:40:23.502Z" },
]
+[[package]]
+name = "pytest"
+version = "8.4.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.10'",
+]
+dependencies = [
+ { name = "colorama", marker = "(python_full_version < '3.10' and sys_platform == 'win32') or (python_full_version >= '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2') or (sys_platform != 'win32' and extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "exceptiongroup", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "iniconfig", version = "2.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "packaging", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pluggy", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pygments", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "tomli", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" },
+]
+
[[package]]
name = "pytest"
version = "9.0.2"
source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+]
dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
- { name = "iniconfig" },
- { name = "packaging" },
- { name = "pluggy" },
- { name = "pygments" },
+ { name = "colorama", marker = "(python_full_version >= '3.10' and sys_platform == 'win32') or (python_full_version < '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2') or (sys_platform != 'win32' and extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "exceptiongroup", marker = "python_full_version == '3.10.*' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "packaging", marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pluggy", marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pygments", marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "tomli", marker = "python_full_version == '3.10.*' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
]
+[[package]]
+name = "pytest-asyncio"
+version = "1.2.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version < '3.10'",
+]
+dependencies = [
+ { name = "backports-asyncio-runner", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pytest", version = "8.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "typing-extensions", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/42/86/9e3c5f48f7b7b638b216e4b9e645f54d199d7abbbab7a64a13b4e12ba10f/pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57", size = 50119, upload-time = "2025-09-12T07:33:53.816Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/93/2fa34714b7a4ae72f2f8dad66ba17dd9a2c793220719e736dda28b7aec27/pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99", size = 15095, upload-time = "2025-09-12T07:33:52.639Z" },
+]
+
[[package]]
name = "pytest-asyncio"
version = "1.3.0"
source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+]
dependencies = [
- { name = "pytest" },
- { name = "typing-extensions", marker = "python_full_version < '3.13' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
+ { name = "backports-asyncio-runner", marker = "python_full_version == '3.10.*' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pytest", version = "9.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "typing-extensions", marker = "(python_full_version >= '3.10' and python_full_version < '3.13') or (python_full_version < '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2') or (python_full_version >= '3.13' and extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/90/2c/8af215c0f776415f3590cac4f9086ccefd6fd463befeae41cd4d3f193e5a/pytest_asyncio-1.3.0.tar.gz", hash = "sha256:d7f52f36d231b80ee124cd216ffb19369aa168fc10095013c6b014a34d3ee9e5", size = 50087, upload-time = "2025-11-10T16:07:47.256Z" }
wheels = [
@@ -1251,7 +1319,8 @@ version = "3.8.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "execnet" },
- { name = "pytest" },
+ { name = "pytest", version = "8.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "pytest", version = "9.0.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" }
wheels = [
@@ -1263,116 +1332,13 @@ name = "python-dateutil"
version = "2.9.0.post0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "six" },
+ { name = "six", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
]
sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
]
-[[package]]
-name = "python-discovery"
-version = "1.2.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "filelock" },
- { name = "platformdirs" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b9/88/815e53084c5079a59df912825a279f41dd2e0df82281770eadc732f5352c/python_discovery-1.2.1.tar.gz", hash = "sha256:180c4d114bff1c32462537eac5d6a332b768242b76b69c0259c7d14b1b680c9e", size = 58457, upload-time = "2026-03-26T22:30:44.496Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/67/0f/019d3949a40280f6193b62bc010177d4ce702d0fce424322286488569cd3/python_discovery-1.2.1-py3-none-any.whl", hash = "sha256:b6a957b24c1cd79252484d3566d1b49527581d46e789aaf43181005e56201502", size = 31674, upload-time = "2026-03-26T22:30:43.396Z" },
-]
-
-[[package]]
-name = "pytokens"
-version = "0.4.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b6/34/b4e015b99031667a7b960f888889c5bd34ef585c85e1cb56a594b92836ac/pytokens-0.4.1.tar.gz", hash = "sha256:292052fe80923aae2260c073f822ceba21f3872ced9a68bb7953b348e561179a", size = 23015, upload-time = "2026-01-30T01:03:45.924Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/41/5d/e44573011401fb82e9d51e97f1290ceb377800fb4eed650b96f4753b499c/pytokens-0.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:140709331e846b728475786df8aeb27d24f48cbcf7bcd449f8de75cae7a45083", size = 160663, upload-time = "2026-01-30T01:03:06.473Z" },
- { url = "https://files.pythonhosted.org/packages/f0/e6/5bbc3019f8e6f21d09c41f8b8654536117e5e211a85d89212d59cbdab381/pytokens-0.4.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d6c4268598f762bc8e91f5dbf2ab2f61f7b95bdc07953b602db879b3c8c18e1", size = 255626, upload-time = "2026-01-30T01:03:08.177Z" },
- { url = "https://files.pythonhosted.org/packages/bf/3c/2d5297d82286f6f3d92770289fd439956b201c0a4fc7e72efb9b2293758e/pytokens-0.4.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:24afde1f53d95348b5a0eb19488661147285ca4dd7ed752bbc3e1c6242a304d1", size = 269779, upload-time = "2026-01-30T01:03:09.756Z" },
- { url = "https://files.pythonhosted.org/packages/20/01/7436e9ad693cebda0551203e0bf28f7669976c60ad07d6402098208476de/pytokens-0.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5ad948d085ed6c16413eb5fec6b3e02fa00dc29a2534f088d3302c47eb59adf9", size = 268076, upload-time = "2026-01-30T01:03:10.957Z" },
- { url = "https://files.pythonhosted.org/packages/2e/df/533c82a3c752ba13ae7ef238b7f8cdd272cf1475f03c63ac6cf3fcfb00b6/pytokens-0.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:3f901fe783e06e48e8cbdc82d631fca8f118333798193e026a50ce1b3757ea68", size = 103552, upload-time = "2026-01-30T01:03:12.066Z" },
- { url = "https://files.pythonhosted.org/packages/cb/dc/08b1a080372afda3cceb4f3c0a7ba2bde9d6a5241f1edb02a22a019ee147/pytokens-0.4.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8bdb9d0ce90cbf99c525e75a2fa415144fd570a1ba987380190e8b786bc6ef9b", size = 160720, upload-time = "2026-01-30T01:03:13.843Z" },
- { url = "https://files.pythonhosted.org/packages/64/0c/41ea22205da480837a700e395507e6a24425151dfb7ead73343d6e2d7ffe/pytokens-0.4.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5502408cab1cb18e128570f8d598981c68a50d0cbd7c61312a90507cd3a1276f", size = 254204, upload-time = "2026-01-30T01:03:14.886Z" },
- { url = "https://files.pythonhosted.org/packages/e0/d2/afe5c7f8607018beb99971489dbb846508f1b8f351fcefc225fcf4b2adc0/pytokens-0.4.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29d1d8fb1030af4d231789959f21821ab6325e463f0503a61d204343c9b355d1", size = 268423, upload-time = "2026-01-30T01:03:15.936Z" },
- { url = "https://files.pythonhosted.org/packages/68/d4/00ffdbd370410c04e9591da9220a68dc1693ef7499173eb3e30d06e05ed1/pytokens-0.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:970b08dd6b86058b6dc07efe9e98414f5102974716232d10f32ff39701e841c4", size = 266859, upload-time = "2026-01-30T01:03:17.458Z" },
- { url = "https://files.pythonhosted.org/packages/a7/c9/c3161313b4ca0c601eeefabd3d3b576edaa9afdefd32da97210700e47652/pytokens-0.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:9bd7d7f544d362576be74f9d5901a22f317efc20046efe2034dced238cbbfe78", size = 103520, upload-time = "2026-01-30T01:03:18.652Z" },
- { url = "https://files.pythonhosted.org/packages/8f/a7/b470f672e6fc5fee0a01d9e75005a0e617e162381974213a945fcd274843/pytokens-0.4.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4a14d5f5fc78ce85e426aa159489e2d5961acf0e47575e08f35584009178e321", size = 160821, upload-time = "2026-01-30T01:03:19.684Z" },
- { url = "https://files.pythonhosted.org/packages/80/98/e83a36fe8d170c911f864bfded690d2542bfcfacb9c649d11a9e6eb9dc41/pytokens-0.4.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f50fd18543be72da51dd505e2ed20d2228c74e0464e4262e4899797803d7fa", size = 254263, upload-time = "2026-01-30T01:03:20.834Z" },
- { url = "https://files.pythonhosted.org/packages/0f/95/70d7041273890f9f97a24234c00b746e8da86df462620194cef1d411ddeb/pytokens-0.4.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc74c035f9bfca0255c1af77ddd2d6ae8419012805453e4b0e7513e17904545d", size = 268071, upload-time = "2026-01-30T01:03:21.888Z" },
- { url = "https://files.pythonhosted.org/packages/da/79/76e6d09ae19c99404656d7db9c35dfd20f2086f3eb6ecb496b5b31163bad/pytokens-0.4.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f66a6bbe741bd431f6d741e617e0f39ec7257ca1f89089593479347cc4d13324", size = 271716, upload-time = "2026-01-30T01:03:23.633Z" },
- { url = "https://files.pythonhosted.org/packages/79/37/482e55fa1602e0a7ff012661d8c946bafdc05e480ea5a32f4f7e336d4aa9/pytokens-0.4.1-cp314-cp314-win_amd64.whl", hash = "sha256:b35d7e5ad269804f6697727702da3c517bb8a5228afa450ab0fa787732055fc9", size = 104539, upload-time = "2026-01-30T01:03:24.788Z" },
- { url = "https://files.pythonhosted.org/packages/30/e8/20e7db907c23f3d63b0be3b8a4fd1927f6da2395f5bcc7f72242bb963dfe/pytokens-0.4.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:8fcb9ba3709ff77e77f1c7022ff11d13553f3c30299a9fe246a166903e9091eb", size = 168474, upload-time = "2026-01-30T01:03:26.428Z" },
- { url = "https://files.pythonhosted.org/packages/d6/81/88a95ee9fafdd8f5f3452107748fd04c24930d500b9aba9738f3ade642cc/pytokens-0.4.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:79fc6b8699564e1f9b521582c35435f1bd32dd06822322ec44afdeba666d8cb3", size = 290473, upload-time = "2026-01-30T01:03:27.415Z" },
- { url = "https://files.pythonhosted.org/packages/cf/35/3aa899645e29b6375b4aed9f8d21df219e7c958c4c186b465e42ee0a06bf/pytokens-0.4.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d31b97b3de0f61571a124a00ffe9a81fb9939146c122c11060725bd5aea79975", size = 303485, upload-time = "2026-01-30T01:03:28.558Z" },
- { url = "https://files.pythonhosted.org/packages/52/a0/07907b6ff512674d9b201859f7d212298c44933633c946703a20c25e9d81/pytokens-0.4.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:967cf6e3fd4adf7de8fc73cd3043754ae79c36475c1c11d514fc72cf5490094a", size = 306698, upload-time = "2026-01-30T01:03:29.653Z" },
- { url = "https://files.pythonhosted.org/packages/39/2a/cbbf9250020a4a8dd53ba83a46c097b69e5eb49dd14e708f496f548c6612/pytokens-0.4.1-cp314-cp314t-win_amd64.whl", hash = "sha256:584c80c24b078eec1e227079d56dc22ff755e0ba8654d8383b2c549107528918", size = 116287, upload-time = "2026-01-30T01:03:30.912Z" },
- { url = "https://files.pythonhosted.org/packages/c6/78/397db326746f0a342855b81216ae1f0a32965deccfd7c830a2dbc66d2483/pytokens-0.4.1-py3-none-any.whl", hash = "sha256:26cef14744a8385f35d0e095dc8b3a7583f6c953c2e3d269c7f82484bf5ad2de", size = 13729, upload-time = "2026-01-30T01:03:45.029Z" },
-]
-
-[[package]]
-name = "pyyaml"
-version = "6.0.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" },
- { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" },
- { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" },
- { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" },
- { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" },
- { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" },
- { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" },
- { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" },
- { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" },
- { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" },
- { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" },
- { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" },
- { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" },
- { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" },
- { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" },
- { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" },
- { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" },
- { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" },
- { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" },
- { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" },
- { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" },
- { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" },
- { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" },
- { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" },
- { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" },
- { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" },
- { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" },
- { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" },
- { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" },
- { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" },
- { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" },
- { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" },
- { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" },
- { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" },
- { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" },
- { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" },
- { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" },
- { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" },
-]
-
-[[package]]
-name = "requests"
-version = "2.33.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "certifi" },
- { name = "charset-normalizer" },
- { name = "idna" },
- { name = "urllib3" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/34/64/8860370b167a9721e8956ae116825caff829224fbca0ca6e7bf8ddef8430/requests-2.33.0.tar.gz", hash = "sha256:c7ebc5e8b0f21837386ad0e1c8fe8b829fa5f544d8df3b2253bff14ef29d7652", size = 134232, upload-time = "2026-03-25T15:10:41.586Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/56/5d/c814546c2333ceea4ba42262d8c4d55763003e767fa169adc693bd524478/requests-2.33.0-py3-none-any.whl", hash = "sha256:3324635456fa185245e24865e810cecec7b4caf933d7eb133dcde67d48cee69b", size = 65017, upload-time = "2026-03-25T15:10:40.382Z" },
-]
-
[[package]]
name = "respx"
version = "0.22.0"
@@ -1390,7 +1356,8 @@ name = "rich"
version = "14.2.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "markdown-it-py" },
+ { name = "markdown-it-py", version = "3.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+ { name = "markdown-it-py", version = "4.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
{ name = "pygments" },
]
sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" }
@@ -1443,20 +1410,141 @@ wheels = [
]
[[package]]
-name = "termcolor"
-version = "3.3.0"
+name = "time-machine"
+version = "2.19.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434, upload-time = "2025-12-29T12:55:21.882Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" },
+resolution-markers = [
+ "python_full_version < '3.10'",
+]
+dependencies = [
+ { name = "python-dateutil", marker = "python_full_version < '3.10' or (extra == 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2')" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f8/a4/1b5fdd165f61b67f445fac2a7feb0c655118edef429cd09ff5a8067f7f1d/time_machine-2.19.0.tar.gz", hash = "sha256:7c5065a8b3f2bbb449422c66ef71d114d3f909c276a6469642ecfffb6a0fcd29", size = 14576, upload-time = "2025-08-19T17:22:08.402Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9d/8f/19125611ebbcb3a14da14cd982b9eb4573e2733db60c9f1fbf6a39534f40/time_machine-2.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b5169018ef47206997b46086ce01881cd3a4666fd2998c9d76a87858ca3e49e9", size = 19659, upload-time = "2025-08-19T17:20:30.062Z" },
+ { url = "https://files.pythonhosted.org/packages/74/da/9b0a928321e7822a3ff96dbd1eae089883848e30e9e1b149b85fb96ba56b/time_machine-2.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85bb7ed440fccf6f6d0c8f7d68d849e7c3d1f771d5e0b2cdf871fa6561da569f", size = 15157, upload-time = "2025-08-19T17:20:31.931Z" },
+ { url = "https://files.pythonhosted.org/packages/36/ff/d7e943422038f5f2161fe2c2d791e64a45be691ef946020b20f3a6efc4d4/time_machine-2.19.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a3b12028af1cdc09ccd595be2168b7b26f206c1e190090b048598fbe278beb8e", size = 32860, upload-time = "2025-08-19T17:20:33.241Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/80/2b0f1070ed9808ee7da7a6da62a4a0b776957cb4d861578348f86446e778/time_machine-2.19.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c261f073086cf081d1443cbf7684148c662659d3d139d06b772bfe3fe7cc71a6", size = 34510, upload-time = "2025-08-19T17:20:34.221Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/b4/48038691c8d89924b36c83335a73adeeb68c884f5a1da08a5b17b8a956f3/time_machine-2.19.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:011954d951230a9f1079f22b39ed1a3a9abb50ee297dfb8c557c46351659d94d", size = 36204, upload-time = "2025-08-19T17:20:35.163Z" },
+ { url = "https://files.pythonhosted.org/packages/37/2e/60e8adb541df195e83cb74b720b2cfb1f22ed99c5a7f8abf2a9ab3442cb5/time_machine-2.19.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b0f83308b29c7872006803f2e77318874eb84d0654f2afe0e48e3822e7a2e39b", size = 34936, upload-time = "2025-08-19T17:20:36.61Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/72/e8cee59c6cd99dd3b25b8001a0253e779a286aa8f44d5b40777cbd66210b/time_machine-2.19.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:39733ef844e2984620ec9382a42d00cccc4757d75a5dd572be8c2572e86e50b9", size = 32932, upload-time = "2025-08-19T17:20:37.901Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/eb/83f300d93c1504965d944e03679f1c943a923bce2d0fdfadef0e2e22cc13/time_machine-2.19.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f8db99f6334432e9ffbf00c215caf2ae9773f17cec08304d77e9e90febc3507b", size = 34010, upload-time = "2025-08-19T17:20:39.202Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/77/f35f2500e04daac5033a22fbfd17e68467822b8406ee77966bf222ccaa26/time_machine-2.19.0-cp310-cp310-win32.whl", hash = "sha256:72bf66cd19e27ffd26516b9cbe676d50c2e0b026153289765dfe0cf406708128", size = 17121, upload-time = "2025-08-19T17:20:40.108Z" },
+ { url = "https://files.pythonhosted.org/packages/db/df/32d3e0404be1760a64a44caab2af34b07e952bfe00a23134fea9ddba3e8a/time_machine-2.19.0-cp310-cp310-win_amd64.whl", hash = "sha256:46f1c945934ce3d6b4f388b8e581fce7f87ec891ea90d7128e19520e434f96f0", size = 17957, upload-time = "2025-08-19T17:20:41.079Z" },
+ { url = "https://files.pythonhosted.org/packages/66/df/598a71a1afb4b509a4587273b76590b16d9110a3e9106f01eedc68d02bb2/time_machine-2.19.0-cp310-cp310-win_arm64.whl", hash = "sha256:fb4897c7a5120a4fd03f0670f332d83b7e55645886cd8864a71944c4c2e5b35b", size = 16821, upload-time = "2025-08-19T17:20:41.967Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/ed/4815ebcc9b6c14273f692b9be38a9b09eae52a7e532407cc61a51912b121/time_machine-2.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5ee91664880434d98e41585c3446dac7180ec408c786347451ddfca110d19296", size = 19342, upload-time = "2025-08-19T17:20:43.207Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/08/154cce8b11b60d8238b0b751b8901d369999f4e8f7c3a5f917caa5d95b0b/time_machine-2.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed3732b83a893d1c7b8cabde762968b4dc5680ee0d305b3ecca9bb516f4e3862", size = 14978, upload-time = "2025-08-19T17:20:44.134Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/b7/b689d8c8eeca7af375cfcd64973e49e83aa817cc00f80f98548d42c0eb50/time_machine-2.19.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:6ba0303e9cc9f7f947e344f501e26bedfb68fab521e3c2729d370f4f332d2d55", size = 30964, upload-time = "2025-08-19T17:20:45.366Z" },
+ { url = "https://files.pythonhosted.org/packages/80/91/38bf9c79674e95ce32e23c267055f281dff651eec77ed32a677db3dc011a/time_machine-2.19.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2851825b524a988ee459c37c1c26bdfaa7eff78194efb2b562ea497a6f375b0a", size = 32606, upload-time = "2025-08-19T17:20:46.693Z" },
+ { url = "https://files.pythonhosted.org/packages/19/4a/e9222d85d4de68975a5e799f539a9d32f3a134a9101fca0a61fa6aa33d68/time_machine-2.19.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:68d32b09ecfd7fef59255c091e8e7c24dd117f882c4880b5c7ab8c5c32a98f89", size = 34405, upload-time = "2025-08-19T17:20:48.032Z" },
+ { url = "https://files.pythonhosted.org/packages/14/e2/09480d608d42d6876f9ff74593cfc9197a7eb2c31381a74fb2b145575b65/time_machine-2.19.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:60c46ab527bf2fa144b530f639cc9e12803524c9e1f111dc8c8f493bb6586eeb", size = 33181, upload-time = "2025-08-19T17:20:48.937Z" },
+ { url = "https://files.pythonhosted.org/packages/84/64/f9359e000fad32d9066305c48abc527241d608bcdf77c19d67d66e268455/time_machine-2.19.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:56f26ab9f0201c453d18fe76bb7d1cf05fe58c1b9d9cb0c7d243d05132e01292", size = 31036, upload-time = "2025-08-19T17:20:50.276Z" },
+ { url = "https://files.pythonhosted.org/packages/71/0d/fab2aacec71e3e482bd7fce0589381f9414a4a97f8766bddad04ad047b7b/time_machine-2.19.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6c806cf3c1185baa1d807b7f51bed0db7a6506832c961d5d1b4c94c775749bc0", size = 32145, upload-time = "2025-08-19T17:20:51.449Z" },
+ { url = "https://files.pythonhosted.org/packages/44/fb/faeba2405fb27553f7b28db441a500e2064ffdb2dcba001ee315fdd2c121/time_machine-2.19.0-cp311-cp311-win32.whl", hash = "sha256:b30039dfd89855c12138095bee39c540b4633cbc3684580d684ef67a99a91587", size = 17004, upload-time = "2025-08-19T17:20:52.38Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/84/87e483d660ca669426192969280366635c845c3154a9fe750be546ed3afc/time_machine-2.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:13ed8b34430f1de79905877f5600adffa626793ab4546a70a99fb72c6a3350d8", size = 17822, upload-time = "2025-08-19T17:20:53.348Z" },
+ { url = "https://files.pythonhosted.org/packages/41/f4/ebf7bbf5047854a528adaf54a5e8780bc5f7f0104c298ab44566a3053bf8/time_machine-2.19.0-cp311-cp311-win_arm64.whl", hash = "sha256:cc29a50a0257d8750b08056b66d7225daab47606832dea1a69e8b017323bf511", size = 16680, upload-time = "2025-08-19T17:20:54.26Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/aa/7e00614d339e4d687f6e96e312a1566022528427d237ec639df66c4547bc/time_machine-2.19.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:c85cf437dc3c07429456d8d6670ac90ecbd8241dcd0fbf03e8db2800576f91ff", size = 19308, upload-time = "2025-08-19T17:20:55.25Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/3c/bde3c757394f5bca2fbc1528d4117960a26c38f9b160bf471b38d2378d8f/time_machine-2.19.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d9238897e8ef54acdf59f5dff16f59ca0720e7c02d820c56b4397c11db5d3eb9", size = 15019, upload-time = "2025-08-19T17:20:56.204Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/e0/8ca916dd918018352d377f1f5226ee071cfbeb7dbbde2b03d14a411ac2b1/time_machine-2.19.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e312c7d5d6bfffb96c6a7b39ff29e3046de100d7efaa3c01552654cfbd08f14c", size = 33079, upload-time = "2025-08-19T17:20:57.166Z" },
+ { url = "https://files.pythonhosted.org/packages/48/69/184a0209f02dd0cb5e01e8d13cd4c97a5f389c4e3d09b95160dd676ad1e7/time_machine-2.19.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:714c40b2c90d1c57cc403382d5a9cf16e504cb525bfe9650095317da3c3d62b5", size = 34925, upload-time = "2025-08-19T17:20:58.117Z" },
+ { url = "https://files.pythonhosted.org/packages/43/42/4bbf4309e8e57cea1086eb99052d97ff6ddecc1ab6a3b07aa4512f8bf963/time_machine-2.19.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2eaa1c675d500dc3ccae19e9fb1feff84458a68c132bbea47a80cc3dd2df7072", size = 36384, upload-time = "2025-08-19T17:20:59.108Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/af/9f510dc1719157348c1a2e87423aed406589070b54b503cb237d9bf3a4fe/time_machine-2.19.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e77a414e9597988af53b2b2e67242c9d2f409769df0d264b6d06fda8ca3360d4", size = 34881, upload-time = "2025-08-19T17:21:00.116Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/28/61764a635c70cc76c76ba582dfdc1a84834cddaeb96789023af5214426b2/time_machine-2.19.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cd93996970e11c382b04d4937c3cd0b0167adeef14725ece35aae88d8a01733c", size = 32931, upload-time = "2025-08-19T17:21:01.095Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/e0/f028d93b266e6ade8aca5851f76ebbc605b2905cdc29981a2943b43e1a6c/time_machine-2.19.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8e20a6d8d6e23174bd7e931e134d9610b136db460b249d07e84ecdad029ec352", size = 34241, upload-time = "2025-08-19T17:21:02.052Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/a6/36d1950ed1d3f613158024cf1dcc73db1d9ef0b9117cf51ef2e37dc06499/time_machine-2.19.0-cp312-cp312-win32.whl", hash = "sha256:95afc9bc65228b27be80c2756799c20b8eb97c4ef382a9b762b6d7888bc84099", size = 17021, upload-time = "2025-08-19T17:21:03.374Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/0d/e2dce93355abda3cac69e77fe96566757e98b8fe7fdcbddce89c9ced3f5f/time_machine-2.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:e84909af950e2448f4e2562ea5759c946248c99ab380d2b47d79b62bd76fa236", size = 17857, upload-time = "2025-08-19T17:21:04.331Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/28/50ae6fb83b7feeeca7a461c0dc156cf7ef5e6ef594a600d06634fde6a2cb/time_machine-2.19.0-cp312-cp312-win_arm64.whl", hash = "sha256:0390a1ea9fa7e9d772a39b7c61b34fdcca80eb9ffac339cc0441c6c714c81470", size = 16677, upload-time = "2025-08-19T17:21:05.39Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/b8/24ebce67aa531bae2cbe164bb3f4abc6467dc31f3aead35e77f5a075ea3e/time_machine-2.19.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5e172866753e6041d3b29f3037dc47c20525176a494a71bbd0998dfdc4f11f2f", size = 19373, upload-time = "2025-08-19T17:21:06.701Z" },
+ { url = "https://files.pythonhosted.org/packages/53/a5/c9a5240fd2f845d3ff9fa26f8c8eaa29f7239af9d65007e61d212250f15b/time_machine-2.19.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f70f68379bd6f542ae6775cce9a4fa3dcc20bf7959c42eaef871c14469e18097", size = 15056, upload-time = "2025-08-19T17:21:07.667Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/92/66cce5d2fb2a5e68459aca85fd18a7e2d216f725988940cd83f96630f2f1/time_machine-2.19.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e69e0b0f694728a00e72891ef8dd00c7542952cb1c87237db594b6b27d504a96", size = 33172, upload-time = "2025-08-19T17:21:08.619Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/20/b499e9ab4364cd466016c33dcdf4f56629ca4c20b865bd4196d229f31d92/time_machine-2.19.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3ae0a8b869574301ec5637e32c270c7384cca5cd6e230f07af9d29271a7fa293", size = 35042, upload-time = "2025-08-19T17:21:09.622Z" },
+ { url = "https://files.pythonhosted.org/packages/41/32/b252d3d32791eb16c07d553c820dbc33d9c7fa771de3d1c602190bded2b7/time_machine-2.19.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:554e4317de90e2f7605ff80d153c8bb56b38c0d0c0279feb17e799521e987b8c", size = 36535, upload-time = "2025-08-19T17:21:10.571Z" },
+ { url = "https://files.pythonhosted.org/packages/98/cf/4d0470062b9742e1b040ab81bad04d1a5d1de09806507bb6188989cfa1a7/time_machine-2.19.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6567a5ec5538ed550539ac29be11b3cb36af1f9894e2a72940cba0292cc7c3c9", size = 34945, upload-time = "2025-08-19T17:21:11.538Z" },
+ { url = "https://files.pythonhosted.org/packages/24/71/2f741b29d98b1c18f6777a32236497c3d3264b6077e431cea4695684c8a1/time_machine-2.19.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:82e9ffe8dfff07b0d810a2ad015a82cd78c6a237f6c7cf185fa7f747a3256f8a", size = 33014, upload-time = "2025-08-19T17:21:12.858Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/83/ca8dba6106562843fd99f672e5aaf95badbc10f4f13f7cfe8d8640a7019d/time_machine-2.19.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7e1c4e578cdd69b3531d8dd3fbcb92a0cd879dadb912ee37af99c3a9e3c0d285", size = 34350, upload-time = "2025-08-19T17:21:13.923Z" },
+ { url = "https://files.pythonhosted.org/packages/21/7f/34fe540450e18d0a993240100e4b86e8d03d831b92af8bb6ddb2662dc6fc/time_machine-2.19.0-cp313-cp313-win32.whl", hash = "sha256:72dbd4cbc3d96dec9dd281ddfbb513982102776b63e4e039f83afb244802a9e5", size = 17047, upload-time = "2025-08-19T17:21:14.874Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/5d/c8be73df82c7ebe7cd133279670e89b8b110af3ce1412c551caa9d08e625/time_machine-2.19.0-cp313-cp313-win_amd64.whl", hash = "sha256:e17e3e089ac95f9a145ce07ff615e3c85674f7de36f2d92aaf588493a23ffb4b", size = 17868, upload-time = "2025-08-19T17:21:15.819Z" },
+ { url = "https://files.pythonhosted.org/packages/92/13/2dfd3b8fb285308f61cd7aa9bfa96f46ddf916e3549a0f0afd094c556599/time_machine-2.19.0-cp313-cp313-win_arm64.whl", hash = "sha256:149072aff8e3690e14f4916103d898ea0d5d9c95531b6aa0995251c299533f7b", size = 16710, upload-time = "2025-08-19T17:21:16.748Z" },
+ { url = "https://files.pythonhosted.org/packages/05/c1/deebb361727d2c5790f9d4d874be1b19afd41f4375581df465e6718b46a2/time_machine-2.19.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f3589fee1ed0ab6ee424a55b0ea1ec694c4ba64cc26895bcd7d99f3d1bc6a28a", size = 20053, upload-time = "2025-08-19T17:21:17.704Z" },
+ { url = "https://files.pythonhosted.org/packages/45/e8/fe3376951e6118d8ec1d1f94066a169b791424fe4a26c7dfc069b153ee08/time_machine-2.19.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7887e85275c4975fe54df03dcdd5f38bd36be973adc68a8c77e17441c3b443d6", size = 15423, upload-time = "2025-08-19T17:21:18.668Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/c7/f88d95cd1a87c650cf3749b4d64afdaf580297aa18ad7f4b44ec9d252dfc/time_machine-2.19.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ce0be294c209928563fcce1c587963e60ec803436cf1e181acd5bc1e425d554b", size = 39630, upload-time = "2025-08-19T17:21:19.645Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/5d/65a5c48a65357e56ec6f032972e4abd1c02d4fca4b0717a3aaefd19014d4/time_machine-2.19.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a62fd1ab380012c86f4c042010418ed45eb31604f4bf4453e17c9fa60bc56a29", size = 41242, upload-time = "2025-08-19T17:21:20.979Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/f9/fe5209e1615fde0a8cad6c4e857157b150333ed1fe31a7632b08cfe0ebdd/time_machine-2.19.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b25ec853a4530a5800731257f93206b12cbdee85ede964ebf8011b66086a7914", size = 44278, upload-time = "2025-08-19T17:21:21.984Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/3a/a5e5fe9c5d614cde0a9387ff35e8dfd12c5ef6384e4c1a21b04e6e0b905d/time_machine-2.19.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a430e4d0e0556f021a9c78e9b9f68e5e8910bdace4aa34ed4d1a73e239ed9384", size = 42321, upload-time = "2025-08-19T17:21:23.755Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/c5/56eca774e9162bc1ce59111d2bd69140dc8908c9478c92ec7bd15d547600/time_machine-2.19.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2415b7495ec4364c8067071e964fbadfe746dd4cdb43983f2f0bd6ebed13315c", size = 39270, upload-time = "2025-08-19T17:21:26.009Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/69/5dd0c420667578169a12acc8c8fd7452e8cfb181e41c9b4ac7e88fa36686/time_machine-2.19.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dbfc6b90c10f288594e1bf89a728a98cc0030791fd73541bbdc6b090aff83143", size = 40193, upload-time = "2025-08-19T17:21:27.054Z" },
+ { url = "https://files.pythonhosted.org/packages/75/a7/de974d421bd55c9355583427c2a38fb0237bb5fd6614af492ba89dacb2f9/time_machine-2.19.0-cp313-cp313t-win32.whl", hash = "sha256:16f5d81f650c0a4d117ab08036dc30b5f8b262e11a4a0becc458e7f1c011b228", size = 17542, upload-time = "2025-08-19T17:21:28.674Z" },
+ { url = "https://files.pythonhosted.org/packages/76/0a/aa0d05becd5d06ae8d3f16d657dc8cc9400c8d79aef80299de196467ff12/time_machine-2.19.0-cp313-cp313t-win_amd64.whl", hash = "sha256:645699616ec14e147094f601e6ab9553ff6cea37fad9c42720a6d7ed04bcd5dc", size = 18703, upload-time = "2025-08-19T17:21:29.663Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/c0/f785a4c7c73aa176510f7c48b84b49c26be84af0d534deb222e0327f750e/time_machine-2.19.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b32daa965d13237536ea3afaa5ad61ade2b2d9314bc3a20196a0d2e1d7b57c6a", size = 17020, upload-time = "2025-08-19T17:21:30.653Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/97/c5fb51def06c0b2b6735332ad118ab35b4d9b85368792e5b638e99b1b686/time_machine-2.19.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:31cb43c8fd2d961f31bed0ff4e0026964d2b35e5de9e0fabbfecf756906d3612", size = 19360, upload-time = "2025-08-19T17:21:31.94Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/4e/2d795f7d6b7f5205ffe737a05bb1cf19d8038233b797062b2ef412b8512b/time_machine-2.19.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:bdf481a75afc6bff3e520db594501975b652f7def21cd1de6aa971d35ba644e6", size = 15033, upload-time = "2025-08-19T17:21:32.934Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/32/9bad501e360b4e758c58fae616ca5f8c7ad974b343f2463a15b2bf77a366/time_machine-2.19.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:00bee4bb950ac6a08d62af78e4da0cf2b4fc2abf0de2320d0431bf610db06e7c", size = 33379, upload-time = "2025-08-19T17:21:33.925Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/45/eda0ca4d793dfd162478d6163759b1c6ce7f6e61daa7fd7d62b31f21f87f/time_machine-2.19.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9f02199490906582302ce09edd32394fb393271674c75d7aa76c7a3245f16003", size = 35123, upload-time = "2025-08-19T17:21:34.945Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/5a/97e16325442ae5731fcaac794f0a1ef9980eff8a5491e58201d7eb814a34/time_machine-2.19.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e35726c7ba625f844c13b1fc0d4f81f394eefaee1d3a094a9093251521f2ef15", size = 36588, upload-time = "2025-08-19T17:21:35.975Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/9d/bf0b2ccc930cc4a316f26f1c78d3f313cd0fa13bb7480369b730a8f129db/time_machine-2.19.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:304315023999cd401ff02698870932b893369e1cfeb2248d09f6490507a92e97", size = 35013, upload-time = "2025-08-19T17:21:37.017Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/5a/39ac6a3078174f9715d88364871348b249631f12e76de1b862433b3f8862/time_machine-2.19.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9765d4f003f263ea8bfd90d2d15447ca4b3dfa181922cf6cf808923b02ac180a", size = 33303, upload-time = "2025-08-19T17:21:38.352Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/ac/d8646baf9f95f2e792a6d7a7b35e92fca253c4a992afff801beafae0e5c2/time_machine-2.19.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7837ef3fd5911eb9b480909bb93d922737b6bdecea99dfcedb0a03807de9b2d3", size = 34440, upload-time = "2025-08-19T17:21:39.382Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/8b/8b6568c5ae966d80ead03ab537be3c6acf2af06fb501c2d466a3162c6295/time_machine-2.19.0-cp314-cp314-win32.whl", hash = "sha256:4bb5bd43b1bdfac3007b920b51d8e761f024ed465cfeec63ac4296922a4ec428", size = 17162, upload-time = "2025-08-19T17:21:40.381Z" },
+ { url = "https://files.pythonhosted.org/packages/46/a5/211c1ab4566eba5308b2dc001b6349e3a032e3f6afa67ca2f27ea6b27af5/time_machine-2.19.0-cp314-cp314-win_amd64.whl", hash = "sha256:f583bbd0aa8ab4a7c45a684bf636d9e042d466e30bcbae1d13e7541e2cbe7207", size = 18040, upload-time = "2025-08-19T17:21:41.363Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/fc/4c2fb705f6371cb83824da45a8b967514a922fc092a0ef53979334d97a70/time_machine-2.19.0-cp314-cp314-win_arm64.whl", hash = "sha256:f379c6f8a6575a8284592179cf528ce89373f060301323edcc44f1fa1d37be12", size = 16752, upload-time = "2025-08-19T17:21:42.336Z" },
+ { url = "https://files.pythonhosted.org/packages/79/ab/6437d18f31c666b5116c97572a282ac2590a82a0a9867746a6647eaf4613/time_machine-2.19.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:a3b8981f9c663b0906b05ab4d0ca211fae4b63b47c6ec26de5374fe56c836162", size = 20057, upload-time = "2025-08-19T17:21:43.35Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/a2/e03639ec2ba7200328bbcad8a2b2b1d5fccca9cceb9481b164a1cabdcb33/time_machine-2.19.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8e9c6363893e7f52c226afbebb23e825259222d100e67dfd24c8a6d35f1a1907", size = 15430, upload-time = "2025-08-19T17:21:44.725Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/ff/39e63a48e840f3e36ce24846ee51dd99c6dba635659b1750a2993771e88e/time_machine-2.19.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:206fcd6c9a6f00cac83db446ad1effc530a8cec244d2780af62db3a2d0a9871b", size = 39622, upload-time = "2025-08-19T17:21:45.821Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/2e/ee5ac79c4954768705801e54817c7d58e07e25a0bb227e775f501f3e2122/time_machine-2.19.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bf33016a1403c123373ffaeff25e26e69d63bf2c63b6163932efed94160db7ef", size = 41235, upload-time = "2025-08-19T17:21:46.783Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/3e/9af5f39525e779185c77285b8bbae15340eeeaa0afb33d458bc8b47d459b/time_machine-2.19.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9247c4bb9bbd3ff584ef4efbdec8efd9f37aa08bcfc4728bde1e489c2cb445bd", size = 44276, upload-time = "2025-08-19T17:21:47.759Z" },
+ { url = "https://files.pythonhosted.org/packages/59/fe/572c7443cc27140bbeae3947279bbd4a120f9e8622253a20637f260b7813/time_machine-2.19.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:77f9bb0b86758d1f2d9352642c874946ad5815df53ef4ca22eb9d532179fe50d", size = 42330, upload-time = "2025-08-19T17:21:48.881Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/24/1a81c2e08ee7dae13ec8ceed27a29afa980c3d63852e42f1e023bf0faa03/time_machine-2.19.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:0b529e262df3b9c449f427385f4d98250828c879168c2e00eec844439f40b370", size = 39281, upload-time = "2025-08-19T17:21:49.907Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/60/6f0d6e5108978ca1a2a4ffb4d1c7e176d9199bb109fd44efe2680c60b52a/time_machine-2.19.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9199246e31cdc810e5d89cb71d09144c4d745960fdb0824da4994d152aca3303", size = 40201, upload-time = "2025-08-19T17:21:50.953Z" },
+ { url = "https://files.pythonhosted.org/packages/73/b9/3ea4951e8293b0643feb98c0b9a176fa822154f1810835db3f282968ab10/time_machine-2.19.0-cp314-cp314t-win32.whl", hash = "sha256:0fe81bae55b7aefc2c2a34eb552aa82e6c61a86b3353a3c70df79b9698cb02ca", size = 17743, upload-time = "2025-08-19T17:21:51.948Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/8b/cd802884ca8a98e2b6cdc2397d57dd12ff8a7d1481e06fc3fad3d4e7e5ff/time_machine-2.19.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7253791b8d7e7399fbeed7a8193cb01bc004242864306288797056badbdaf80b", size = 18956, upload-time = "2025-08-19T17:21:52.997Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/49/cabb1593896082fd55e34768029b8b0ca23c9be8b2dc127e0fc14796d33e/time_machine-2.19.0-cp314-cp314t-win_arm64.whl", hash = "sha256:536bd1ac31ab06a1522e7bf287602188f502dc19d122b1502c4f60b1e8efac79", size = 17068, upload-time = "2025-08-19T17:21:54.064Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/05/0608376c3167afe6cf7cdfd2b05c142ea4c42616eee9ba06d1799965806a/time_machine-2.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8bb00b30ec9fe56d01e9812df1ffe39f331437cef9bfaebcc81c83f7f8f8ee2", size = 19659, upload-time = "2025-08-19T17:21:55.426Z" },
+ { url = "https://files.pythonhosted.org/packages/11/c4/72eb8c7b36830cf36c51d7bc2f1ac313d68881c3a58040fb6b42c4523d20/time_machine-2.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d821c60efc08a97cc11e5482798e6fd5eba5c0f22a02db246b50895dbdc0de41", size = 15153, upload-time = "2025-08-19T17:21:56.505Z" },
+ { url = "https://files.pythonhosted.org/packages/89/1a/0782e1f5c8ab8809ebd992709e1bb69d67600191baa023af7a5d32023a3c/time_machine-2.19.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:fb051aec7b3b6e96a200d911c225901e6133ff3da11e470e24111a53bbc13637", size = 32555, upload-time = "2025-08-19T17:21:57.74Z" },
+ { url = "https://files.pythonhosted.org/packages/94/b0/8ef58e2f6321851d5900ca3d18044938832c2ed42a2ac7570ca6aa29768a/time_machine-2.19.0-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fe59909d95a2ef5e01ce3354fdea3908404c2932c2069f00f66dff6f27e9363e", size = 34185, upload-time = "2025-08-19T17:21:59.361Z" },
+ { url = "https://files.pythonhosted.org/packages/82/74/ce0c9867f788c1fb22c417ec1aae47a24117e53d51f6ff97d7c6ca5392f6/time_machine-2.19.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:29e84b8682645b16eb6f9e8ec11c35324ad091841a11cf4fc3fc7f6119094c89", size = 35917, upload-time = "2025-08-19T17:22:00.421Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/70/6f97a8f552dbaa66feb10170b5726dab74bc531673d1ed9d6f271547e54c/time_machine-2.19.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a11f1c0e0d06023dc01614c964e256138913551d3ae6dca5148f79081156336", size = 34584, upload-time = "2025-08-19T17:22:01.447Z" },
+ { url = "https://files.pythonhosted.org/packages/48/c8/cf139088ce537c15d7f03cf56ec317d3a5cfb520e30aa711ea0248d0ae8a/time_machine-2.19.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:57a235a6307c54df50e69f1906e2f199e47da91bde4b886ee05aff57fe4b6bf6", size = 32608, upload-time = "2025-08-19T17:22:02.548Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/17/0ec41ef7a30c6753fb226a28b74162b264b35724905ced4098f2f5076ded/time_machine-2.19.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:426aba552f7af9604adad9ef570c859af7c1081d878db78089fac159cd911b0a", size = 33686, upload-time = "2025-08-19T17:22:03.606Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/19/586f15159083ec84f178d494c60758c46603b00c9641b04deb63f1950128/time_machine-2.19.0-cp39-cp39-win32.whl", hash = "sha256:67772c7197a3a712d1b970ed545c6e98db73524bd90e245fd3c8fa7ad7630768", size = 17133, upload-time = "2025-08-19T17:22:04.989Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/c2/bfe4b906a9fe0bf2d011534314212ed752d6b8f392c9c82f6ac63dccc5ab/time_machine-2.19.0-cp39-cp39-win_amd64.whl", hash = "sha256:011d7859089263204dc5fdf83dce7388f986fe833c9381d6106b4edfda2ebd3e", size = 17972, upload-time = "2025-08-19T17:22:06.026Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/73/182343eba05aa5787732aaa68f3b3feb5e40ddf86b928ae941be45646393/time_machine-2.19.0-cp39-cp39-win_arm64.whl", hash = "sha256:e1af66550fa4685434f00002808a525f176f1f92746646c0019bb86fbff48b27", size = 16820, upload-time = "2025-08-19T17:22:07.227Z" },
]
[[package]]
name = "time-machine"
version = "3.2.0"
source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and python_full_version < '3.14' and extra != 'group-10-ogx-client-pydantic-v1' and extra == 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra == 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+ "python_full_version >= '3.10' and extra != 'group-10-ogx-client-pydantic-v1' and extra != 'group-10-ogx-client-pydantic-v2'",
+]
sdist = { url = "https://files.pythonhosted.org/packages/02/fc/37b02f6094dbb1f851145330460532176ed2f1dc70511a35828166c41e52/time_machine-3.2.0.tar.gz", hash = "sha256:a4ddd1cea17b8950e462d1805a42b20c81eb9aafc8f66b392dd5ce997e037d79", size = 14804, upload-time = "2025-12-17T23:33:02.599Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/9c/31/6bf41cb4a326230518d9b76c910dfc11d4fc23444d1cbfdf2d7652bd99f4/time_machine-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:68142c070e78b62215d8029ec7394905083a4f9aacb0a2a11514ce70b5951b13", size = 19447, upload-time = "2025-12-17T23:31:30.181Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/14/d71ce771712e1cbfa15d8c24452225109262b16cb6caaf967e9f60662b67/time_machine-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:161bbd0648802ffdfcb4bb297ecb26b3009684a47d3a4dedb90bc549df4fa2ad", size = 15432, upload-time = "2025-12-17T23:31:31.381Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/d6/dcb43a11f8029561996fad58ff9d3dc5e6d7f32b74f0745a2965d7e4b4f3/time_machine-3.2.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1359ba8c258be695ba69253bc84db882fd616fe69b426cc6056536da2c7bf68e", size = 32956, upload-time = "2025-12-17T23:31:32.469Z" },
+ { url = "https://files.pythonhosted.org/packages/77/da/d802cd3c335c414f9b11b479f7459aa72df5de6485c799966cfdf8856d53/time_machine-3.2.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c85b169998ca2c24a78fb214586ec11c4cad56d9c38f55ad8326235cb481c884", size = 34556, upload-time = "2025-12-17T23:31:33.946Z" },
+ { url = "https://files.pythonhosted.org/packages/85/ee/51ad553514ab0b940c7c82c6e1519dd10fd06ac07b32039a1d153ef09c88/time_machine-3.2.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:65b9367cb8a10505bc8f67da0da514ba20fa816fc47e11f434f7c60350322b4c", size = 36101, upload-time = "2025-12-17T23:31:35.462Z" },
+ { url = "https://files.pythonhosted.org/packages/11/39/938b111b5bb85a2b07502d0f9d8a704fc75bd760d62e76bce23c89ed16c9/time_machine-3.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9faca6a0f1973d7df3233c951fc2a11ff0c54df74087d8aaf41ae3deb19d0893", size = 34905, upload-time = "2025-12-17T23:31:36.543Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/50/0951f73b23e76455de0b4a3a58ac5a24bd8d10489624b1c5e03f10c6fc0b/time_machine-3.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:213b1ada7f385d467e598999b642eda4a8e89ae10ad5dc4f5d8f672cbf604261", size = 33012, upload-time = "2025-12-17T23:31:37.967Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/95/5304912d3dcecc4e14ed222dbe0396352efdf8497534abc3c9edd67a7528/time_machine-3.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:160b6afd94c39855af04d39c58e4cf602406abd6d79427ab80e830ea71789cfb", size = 34104, upload-time = "2025-12-17T23:31:39.449Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/1c/af56518652ec7adac4ced193b7a42c4ff354fef28a412b3b5ffa5763aead/time_machine-3.2.0-cp310-cp310-win32.whl", hash = "sha256:c15d9ac257c78c124d112e4fc91fa9f3dcb004bdda913c19f0e7368d713cf080", size = 17468, upload-time = "2025-12-17T23:31:40.432Z" },
+ { url = "https://files.pythonhosted.org/packages/48/15/0213f00ca3cf6fe1c9fdbd7fd467e801052fc85534f30c0e4684bd474190/time_machine-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:3bf0f428487f93b8fe9d27aa01eccc817885da3290b467341b4a4a795e1d1891", size = 18313, upload-time = "2025-12-17T23:31:41.617Z" },
+ { url = "https://files.pythonhosted.org/packages/77/e4/811f96aa7a634b2b264d9a476f3400e710744dda503b4ad87a5c76db32c9/time_machine-3.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:347f6be2129fcd35b1c94b9387fcb2cbe7949b1e649228c5f22949a811b78976", size = 17037, upload-time = "2025-12-17T23:31:42.924Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/e1/03aae5fbaa53859f665094af696338fc7cae733d926a024af69982712350/time_machine-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c188a9dda9fcf975022f1b325b466651b96a4dfc223c523ed7ed8d979f9bf3e8", size = 19143, upload-time = "2025-12-17T23:31:44.258Z" },
+ { url = "https://files.pythonhosted.org/packages/75/8f/98cb17bebb52b22ff4ec26984dd44280f9c71353c3bae0640a470e6683e5/time_machine-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:17245f1cc2dd13f9d63a174be59bb2684a9e5e0a112ab707e37be92068cd655f", size = 15273, upload-time = "2025-12-17T23:31:45.246Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/2f/ca11e4a7897234bb9331fcc5f4ed4714481ba4012370cc79a0ae8c42ea0a/time_machine-3.2.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d9bd1de1996e76efd36ae15970206c5089fb3728356794455bd5cd8d392b5537", size = 31049, upload-time = "2025-12-17T23:31:46.613Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/ad/d17d83a59943094e6b6c6a3743caaf6811b12203c3e07a30cc7bcc2ab7ee/time_machine-3.2.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:98493cd50e8b7f941eab69b9e18e697ad69db1a0ec1959f78f3d7b0387107e5c", size = 32632, upload-time = "2025-12-17T23:31:47.72Z" },
+ { url = "https://files.pythonhosted.org/packages/71/50/d60576d047a0dfb5638cdfb335e9c3deb6e8528544fa0b3966a8480f72b7/time_machine-3.2.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:31f2a33d595d9f91eb9bc7f157f0dc5721f5789f4c4a9e8b852cdedb2a7d9b16", size = 34289, upload-time = "2025-12-17T23:31:48.913Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/fe/4afa602dbdebddde6d0ea4a7fe849e49b9bb85dc3fb415725a87ccb4b471/time_machine-3.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9f78ac4213c10fbc44283edd1a29cfb7d3382484f4361783ddc057292aaa1889", size = 33175, upload-time = "2025-12-17T23:31:50.611Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/87/c152e23977c1d7d7c94eb3ed3ea45cc55971796205125c6fdff40db2c60f/time_machine-3.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c1326b09e947b360926d529a96d1d9e126ce120359b63b506ecdc6ee20755c23", size = 31170, upload-time = "2025-12-17T23:31:51.645Z" },
+ { url = "https://files.pythonhosted.org/packages/80/af/54acf51d0f3ade3b51eab73df6192937c9a938753ef5456dff65eb8630be/time_machine-3.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9f2949f03d15264cc15c38918a2cda8966001f0f4ebe190cbfd9c56d91aed8ac", size = 32292, upload-time = "2025-12-17T23:31:52.803Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/bc/3745963f36e75661a807196428639327a366f4332f35f1f775c074d4062f/time_machine-3.2.0-cp311-cp311-win32.whl", hash = "sha256:6dfe48e0499e6e16751476b9799e67be7514e6ef04cdf39571ef95a279645831", size = 17349, upload-time = "2025-12-17T23:31:54.19Z" },
+ { url = "https://files.pythonhosted.org/packages/82/a2/057469232a99d1f5a0160ae7c5bae7b095c9168b333dd598fcbcfbc1c87b/time_machine-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:809bdf267a29189c304154873620fe0bcc0c9513295fa46b19e21658231c4915", size = 18191, upload-time = "2025-12-17T23:31:55.472Z" },
+ { url = "https://files.pythonhosted.org/packages/79/d8/bf9c8de57262ee7130d92a6ed49ed6a6e40a36317e46979428d373630c12/time_machine-3.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:a3f4c17fa90f54902a3f8692c75caf67be87edc3429eeb71cb4595da58198f8e", size = 16905, upload-time = "2025-12-17T23:31:56.658Z" },
{ url = "https://files.pythonhosted.org/packages/71/8b/080c8eedcd67921a52ba5bd0e075362062509ab63c86fc1a0442fad241a6/time_machine-3.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cc4bee5b0214d7dc4ebc91f4a4c600f1a598e9b5606ac751f42cb6f6740b1dbb", size = 19255, upload-time = "2025-12-17T23:31:58.057Z" },
{ url = "https://files.pythonhosted.org/packages/66/17/0e5291e9eb705bf8a5a1305f826e979af307bbeb79def4ddbf4b3f9a81e0/time_machine-3.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ca036304b4460ae2fdc1b52dd8b1fa7cf1464daa427fc49567413c09aa839c1", size = 15360, upload-time = "2025-12-17T23:31:59.048Z" },
{ url = "https://files.pythonhosted.org/packages/8b/e8/9ab87b71d2e2b62463b9b058b7ae7ac09fb57f8fcd88729dec169d304340/time_machine-3.2.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5442735b41d7a2abc2f04579b4ca6047ed4698a8338a4fec92c7c9423e7938cb", size = 33029, upload-time = "2025-12-17T23:32:00.413Z" },
@@ -1515,15 +1603,57 @@ wheels = [
]
[[package]]
-name = "tqdm"
-version = "4.67.3"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "colorama", marker = "sys_platform == 'win32' or (extra == 'group-18-llama-stack-client-pydantic-v1' and extra == 'group-18-llama-stack-client-pydantic-v2')" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" },
+name = "tomli"
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/82/30/31573e9457673ab10aa432461bee537ce6cef177667deca369efb79df071/tomli-2.4.0.tar.gz", hash = "sha256:aa89c3f6c277dd275d8e243ad24f3b5e701491a860d5121f2cdd399fbb31fc9c", size = 17477, upload-time = "2026-01-11T11:22:38.165Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3c/d9/3dc2289e1f3b32eb19b9785b6a006b28ee99acb37d1d47f78d4c10e28bf8/tomli-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b5ef256a3fd497d4973c11bf142e9ed78b150d36f5773f1ca6088c230ffc5867", size = 153663, upload-time = "2026-01-11T11:21:45.27Z" },
+ { url = "https://files.pythonhosted.org/packages/51/32/ef9f6845e6b9ca392cd3f64f9ec185cc6f09f0a2df3db08cbe8809d1d435/tomli-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5572e41282d5268eb09a697c89a7bee84fae66511f87533a6f88bd2f7b652da9", size = 148469, upload-time = "2026-01-11T11:21:46.873Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/c2/506e44cce89a8b1b1e047d64bd495c22c9f71f21e05f380f1a950dd9c217/tomli-2.4.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:551e321c6ba03b55676970b47cb1b73f14a0a4dce6a3e1a9458fd6d921d72e95", size = 236039, upload-time = "2026-01-11T11:21:48.503Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/40/e1b65986dbc861b7e986e8ec394598187fa8aee85b1650b01dd925ca0be8/tomli-2.4.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e3f639a7a8f10069d0e15408c0b96a2a828cfdec6fca05296ebcdcc28ca7c76", size = 243007, upload-time = "2026-01-11T11:21:49.456Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/6f/6e39ce66b58a5b7ae572a0f4352ff40c71e8573633deda43f6a379d56b3e/tomli-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b168f2731796b045128c45982d3a4874057626da0e2ef1fdd722848b741361d", size = 240875, upload-time = "2026-01-11T11:21:50.755Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/ad/cb089cb190487caa80204d503c7fd0f4d443f90b95cf4ef5cf5aa0f439b0/tomli-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:133e93646ec4300d651839d382d63edff11d8978be23da4cc106f5a18b7d0576", size = 246271, upload-time = "2026-01-11T11:21:51.81Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/63/69125220e47fd7a3a27fd0de0c6398c89432fec41bc739823bcc66506af6/tomli-2.4.0-cp311-cp311-win32.whl", hash = "sha256:b6c78bdf37764092d369722d9946cb65b8767bfa4110f902a1b2542d8d173c8a", size = 96770, upload-time = "2026-01-11T11:21:52.647Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/0d/a22bb6c83f83386b0008425a6cd1fa1c14b5f3dd4bad05e98cf3dbbf4a64/tomli-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:d3d1654e11d724760cdb37a3d7691f0be9db5fbdaef59c9f532aabf87006dbaa", size = 107626, upload-time = "2026-01-11T11:21:53.459Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/6d/77be674a3485e75cacbf2ddba2b146911477bd887dda9d8c9dfb2f15e871/tomli-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:cae9c19ed12d4e8f3ebf46d1a75090e4c0dc16271c5bce1c833ac168f08fb614", size = 94842, upload-time = "2026-01-11T11:21:54.831Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/43/7389a1869f2f26dba52404e1ef13b4784b6b37dac93bac53457e3ff24ca3/tomli-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:920b1de295e72887bafa3ad9f7a792f811847d57ea6b1215154030cf131f16b1", size = 154894, upload-time = "2026-01-11T11:21:56.07Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/05/2f9bf110b5294132b2edf13fe6ca6ae456204f3d749f623307cbb7a946f2/tomli-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6d9a4aee98fac3eab4952ad1d73aee87359452d1c086b5ceb43ed02ddb16b8", size = 149053, upload-time = "2026-01-11T11:21:57.467Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/41/1eda3ca1abc6f6154a8db4d714a4d35c4ad90adc0bcf700657291593fbf3/tomli-2.4.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36b9d05b51e65b254ea6c2585b59d2c4cb91c8a3d91d0ed0f17591a29aaea54a", size = 243481, upload-time = "2026-01-11T11:21:58.661Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/6d/02ff5ab6c8868b41e7d4b987ce2b5f6a51d3335a70aa144edd999e055a01/tomli-2.4.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c8a885b370751837c029ef9bc014f27d80840e48bac415f3412e6593bbc18c1", size = 251720, upload-time = "2026-01-11T11:22:00.178Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/57/0405c59a909c45d5b6f146107c6d997825aa87568b042042f7a9c0afed34/tomli-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8768715ffc41f0008abe25d808c20c3d990f42b6e2e58305d5da280ae7d1fa3b", size = 247014, upload-time = "2026-01-11T11:22:01.238Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/0e/2e37568edd944b4165735687cbaf2fe3648129e440c26d02223672ee0630/tomli-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b438885858efd5be02a9a133caf5812b8776ee0c969fea02c45e8e3f296ba51", size = 251820, upload-time = "2026-01-11T11:22:02.727Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/1c/ee3b707fdac82aeeb92d1a113f803cf6d0f37bdca0849cb489553e1f417a/tomli-2.4.0-cp312-cp312-win32.whl", hash = "sha256:0408e3de5ec77cc7f81960c362543cbbd91ef883e3138e81b729fc3eea5b9729", size = 97712, upload-time = "2026-01-11T11:22:03.777Z" },
+ { url = "https://files.pythonhosted.org/packages/69/13/c07a9177d0b3bab7913299b9278845fc6eaaca14a02667c6be0b0a2270c8/tomli-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:685306e2cc7da35be4ee914fd34ab801a6acacb061b6a7abca922aaf9ad368da", size = 108296, upload-time = "2026-01-11T11:22:04.86Z" },
+ { url = "https://files.pythonhosted.org/packages/18/27/e267a60bbeeee343bcc279bb9e8fbed0cbe224bc7b2a3dc2975f22809a09/tomli-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:5aa48d7c2356055feef06a43611fc401a07337d5b006be13a30f6c58f869e3c3", size = 94553, upload-time = "2026-01-11T11:22:05.854Z" },
+ { url = "https://files.pythonhosted.org/packages/34/91/7f65f9809f2936e1f4ce6268ae1903074563603b2a2bd969ebbda802744f/tomli-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84d081fbc252d1b6a982e1870660e7330fb8f90f676f6e78b052ad4e64714bf0", size = 154915, upload-time = "2026-01-11T11:22:06.703Z" },
+ { url = "https://files.pythonhosted.org/packages/20/aa/64dd73a5a849c2e8f216b755599c511badde80e91e9bc2271baa7b2cdbb1/tomli-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9a08144fa4cba33db5255f9b74f0b89888622109bd2776148f2597447f92a94e", size = 149038, upload-time = "2026-01-11T11:22:07.56Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/8a/6d38870bd3d52c8d1505ce054469a73f73a0fe62c0eaf5dddf61447e32fa/tomli-2.4.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c73add4bb52a206fd0c0723432db123c0c75c280cbd67174dd9d2db228ebb1b4", size = 242245, upload-time = "2026-01-11T11:22:08.344Z" },
+ { url = "https://files.pythonhosted.org/packages/59/bb/8002fadefb64ab2669e5b977df3f5e444febea60e717e755b38bb7c41029/tomli-2.4.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fb2945cbe303b1419e2706e711b7113da57b7db31ee378d08712d678a34e51e", size = 250335, upload-time = "2026-01-11T11:22:09.951Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/3d/4cdb6f791682b2ea916af2de96121b3cb1284d7c203d97d92d6003e91c8d/tomli-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bbb1b10aa643d973366dc2cb1ad94f99c1726a02343d43cbc011edbfac579e7c", size = 245962, upload-time = "2026-01-11T11:22:11.27Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/4a/5f25789f9a460bd858ba9756ff52d0830d825b458e13f754952dd15fb7bb/tomli-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4cbcb367d44a1f0c2be408758b43e1ffb5308abe0ea222897d6bfc8e8281ef2f", size = 250396, upload-time = "2026-01-11T11:22:12.325Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/2f/b73a36fea58dfa08e8b3a268750e6853a6aac2a349241a905ebd86f3047a/tomli-2.4.0-cp313-cp313-win32.whl", hash = "sha256:7d49c66a7d5e56ac959cb6fc583aff0651094ec071ba9ad43df785abc2320d86", size = 97530, upload-time = "2026-01-11T11:22:13.865Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/af/ca18c134b5d75de7e8dc551c5234eaba2e8e951f6b30139599b53de9c187/tomli-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:3cf226acb51d8f1c394c1b310e0e0e61fecdd7adcb78d01e294ac297dd2e7f87", size = 108227, upload-time = "2026-01-11T11:22:15.224Z" },
+ { url = "https://files.pythonhosted.org/packages/22/c3/b386b832f209fee8073c8138ec50f27b4460db2fdae9ffe022df89a57f9b/tomli-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:d20b797a5c1ad80c516e41bc1fb0443ddb5006e9aaa7bda2d71978346aeb9132", size = 94748, upload-time = "2026-01-11T11:22:16.009Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/c4/84047a97eb1004418bc10bdbcfebda209fca6338002eba2dc27cc6d13563/tomli-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:26ab906a1eb794cd4e103691daa23d95c6919cc2fa9160000ac02370cc9dd3f6", size = 154725, upload-time = "2026-01-11T11:22:17.269Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/5d/d39038e646060b9d76274078cddf146ced86dc2b9e8bbf737ad5983609a0/tomli-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20cedb4ee43278bc4f2fee6cb50daec836959aadaf948db5172e776dd3d993fc", size = 148901, upload-time = "2026-01-11T11:22:18.287Z" },
+ { url = "https://files.pythonhosted.org/packages/73/e5/383be1724cb30f4ce44983d249645684a48c435e1cd4f8b5cded8a816d3c/tomli-2.4.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39b0b5d1b6dd03684b3fb276407ebed7090bbec989fa55838c98560c01113b66", size = 243375, upload-time = "2026-01-11T11:22:19.154Z" },
+ { url = "https://files.pythonhosted.org/packages/31/f0/bea80c17971c8d16d3cc109dc3585b0f2ce1036b5f4a8a183789023574f2/tomli-2.4.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a26d7ff68dfdb9f87a016ecfd1e1c2bacbe3108f4e0f8bcd2228ef9a766c787d", size = 250639, upload-time = "2026-01-11T11:22:20.168Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/8f/2853c36abbb7608e3f945d8a74e32ed3a74ee3a1f468f1ffc7d1cb3abba6/tomli-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:20ffd184fb1df76a66e34bd1b36b4a4641bd2b82954befa32fe8163e79f1a702", size = 246897, upload-time = "2026-01-11T11:22:21.544Z" },
+ { url = "https://files.pythonhosted.org/packages/49/f0/6c05e3196ed5337b9fe7ea003e95fd3819a840b7a0f2bf5a408ef1dad8ed/tomli-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:75c2f8bbddf170e8effc98f5e9084a8751f8174ea6ccf4fca5398436e0320bc8", size = 254697, upload-time = "2026-01-11T11:22:23.058Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/f5/2922ef29c9f2951883525def7429967fc4d8208494e5ab524234f06b688b/tomli-2.4.0-cp314-cp314-win32.whl", hash = "sha256:31d556d079d72db7c584c0627ff3a24c5d3fb4f730221d3444f3efb1b2514776", size = 98567, upload-time = "2026-01-11T11:22:24.033Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/31/22b52e2e06dd2a5fdbc3ee73226d763b184ff21fc24e20316a44ccc4d96b/tomli-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:43e685b9b2341681907759cf3a04e14d7104b3580f808cfde1dfdb60ada85475", size = 108556, upload-time = "2026-01-11T11:22:25.378Z" },
+ { url = "https://files.pythonhosted.org/packages/48/3d/5058dff3255a3d01b705413f64f4306a141a8fd7a251e5a495e3f192a998/tomli-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:3d895d56bd3f82ddd6faaff993c275efc2ff38e52322ea264122d72729dca2b2", size = 96014, upload-time = "2026-01-11T11:22:26.138Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/4e/75dab8586e268424202d3a1997ef6014919c941b50642a1682df43204c22/tomli-2.4.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:5b5807f3999fb66776dbce568cc9a828544244a8eb84b84b9bafc080c99597b9", size = 163339, upload-time = "2026-01-11T11:22:27.143Z" },
+ { url = "https://files.pythonhosted.org/packages/06/e3/b904d9ab1016829a776d97f163f183a48be6a4deb87304d1e0116a349519/tomli-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c084ad935abe686bd9c898e62a02a19abfc9760b5a79bc29644463eaf2840cb0", size = 159490, upload-time = "2026-01-11T11:22:28.399Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/5a/fc3622c8b1ad823e8ea98a35e3c632ee316d48f66f80f9708ceb4f2a0322/tomli-2.4.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f2e3955efea4d1cfbcb87bc321e00dc08d2bcb737fd1d5e398af111d86db5df", size = 269398, upload-time = "2026-01-11T11:22:29.345Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/33/62bd6152c8bdd4c305ad9faca48f51d3acb2df1f8791b1477d46ff86e7f8/tomli-2.4.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e0fe8a0b8312acf3a88077a0802565cb09ee34107813bba1c7cd591fa6cfc8d", size = 276515, upload-time = "2026-01-11T11:22:30.327Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/ff/ae53619499f5235ee4211e62a8d7982ba9e439a0fb4f2f351a93d67c1dd2/tomli-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:413540dce94673591859c4c6f794dfeaa845e98bf35d72ed59636f869ef9f86f", size = 273806, upload-time = "2026-01-11T11:22:32.56Z" },
+ { url = "https://files.pythonhosted.org/packages/47/71/cbca7787fa68d4d0a9f7072821980b39fbb1b6faeb5f5cf02f4a5559fa28/tomli-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0dc56fef0e2c1c470aeac5b6ca8cc7b640bb93e92d9803ddaf9ea03e198f5b0b", size = 281340, upload-time = "2026-01-11T11:22:33.505Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/00/d595c120963ad42474cf6ee7771ad0d0e8a49d0f01e29576ee9195d9ecdf/tomli-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:d878f2a6707cc9d53a1be1414bbb419e629c3d6e67f69230217bb663e76b5087", size = 108106, upload-time = "2026-01-11T11:22:34.451Z" },
+ { url = "https://files.pythonhosted.org/packages/de/69/9aa0c6a505c2f80e519b43764f8b4ba93b5a0bbd2d9a9de6e2b24271b9a5/tomli-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2add28aacc7425117ff6364fe9e06a183bb0251b03f986df0e78e974047571fd", size = 120504, upload-time = "2026-01-11T11:22:35.764Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/9f/f1668c281c58cfae01482f7114a4b88d345e4c140386241a1a24dcc9e7bc/tomli-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2b1e3b80e1d5e52e40e9b924ec43d81570f0e7d09d11081b797bc4692765a3d4", size = 99561, upload-time = "2026-01-11T11:22:36.624Z" },
+ { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" },
]
[[package]]
@@ -1540,55 +1670,13 @@ name = "typing-inspection"
version = "0.4.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "typing-extensions" },
+ { name = "typing-extensions", marker = "extra == 'group-10-ogx-client-pydantic-v2' or extra != 'group-10-ogx-client-pydantic-v1'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
]
-[[package]]
-name = "tzdata"
-version = "2025.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/5e/a7/c202b344c5ca7daf398f3b8a477eeb205cf3b6f32e7ec3a6bac0629ca975/tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7", size = 196772, upload-time = "2025-12-13T17:45:35.667Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" },
-]
-
-[[package]]
-name = "urllib3"
-version = "2.6.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
-]
-
-[[package]]
-name = "virtualenv"
-version = "21.2.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "distlib" },
- { name = "filelock" },
- { name = "platformdirs" },
- { name = "python-discovery" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/aa/92/58199fe10049f9703c2666e809c4f686c54ef0a68b0f6afccf518c0b1eb9/virtualenv-21.2.0.tar.gz", hash = "sha256:1720dc3a62ef5b443092e3f499228599045d7fea4c79199770499df8becf9098", size = 5840618, upload-time = "2026-03-09T17:24:38.013Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/c6/59/7d02447a55b2e55755011a647479041bc92a82e143f96a8195cb33bd0a1c/virtualenv-21.2.0-py3-none-any.whl", hash = "sha256:1bd755b504931164a5a496d217c014d098426cddc79363ad66ac78125f9d908f", size = 5825084, upload-time = "2026-03-09T17:24:35.378Z" },
-]
-
-[[package]]
-name = "wcwidth"
-version = "0.6.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/35/a2/8e3becb46433538a38726c948d3399905a4c7cabd0df578ede5dc51f0ec2/wcwidth-0.6.0.tar.gz", hash = "sha256:cdc4e4262d6ef9a1a57e018384cbeb1208d8abbc64176027e2c2455c81313159", size = 159684, upload-time = "2026-02-06T19:19:40.919Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/68/5a/199c59e0a824a3db2b89c5d2dade7ab5f9624dbf6448dc291b46d5ec94d3/wcwidth-0.6.0-py3-none-any.whl", hash = "sha256:1a3a1e510b553315f8e146c54764f4fb6264ffad731b3d78088cdb1478ffbdad", size = 94189, upload-time = "2026-02-06T19:19:39.646Z" },
-]
-
[[package]]
name = "yarl"
version = "1.22.0"
@@ -1600,6 +1688,38 @@ dependencies = [
]
sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/d1/43/a2204825342f37c337f5edb6637040fa14e365b2fcc2346960201d457579/yarl-1.22.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c7bd6683587567e5a49ee6e336e0612bec8329be1b7d4c8af5687dcdeb67ee1e", size = 140517, upload-time = "2025-10-06T14:08:42.494Z" },
+ { url = "https://files.pythonhosted.org/packages/44/6f/674f3e6f02266428c56f704cd2501c22f78e8b2eeb23f153117cc86fb28a/yarl-1.22.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5cdac20da754f3a723cceea5b3448e1a2074866406adeb4ef35b469d089adb8f", size = 93495, upload-time = "2025-10-06T14:08:46.2Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/12/5b274d8a0f30c07b91b2f02cba69152600b47830fcfb465c108880fcee9c/yarl-1.22.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07a524d84df0c10f41e3ee918846e1974aba4ec017f990dc735aad487a0bdfdf", size = 94400, upload-time = "2025-10-06T14:08:47.855Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/7f/df1b6949b1fa1aa9ff6de6e2631876ad4b73c4437822026e85d8acb56bb1/yarl-1.22.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e1b329cb8146d7b736677a2440e422eadd775d1806a81db2d4cded80a48efc1a", size = 347545, upload-time = "2025-10-06T14:08:49.683Z" },
+ { url = "https://files.pythonhosted.org/packages/84/09/f92ed93bd6cd77872ab6c3462df45ca45cd058d8f1d0c9b4f54c1704429f/yarl-1.22.0-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:75976c6945d85dbb9ee6308cd7ff7b1fb9409380c82d6119bd778d8fcfe2931c", size = 319598, upload-time = "2025-10-06T14:08:51.215Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/97/ac3f3feae7d522cf7ccec3d340bb0b2b61c56cb9767923df62a135092c6b/yarl-1.22.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:80ddf7a5f8c86cb3eb4bc9028b07bbbf1f08a96c5c0bc1244be5e8fefcb94147", size = 363893, upload-time = "2025-10-06T14:08:53.144Z" },
+ { url = "https://files.pythonhosted.org/packages/06/49/f3219097403b9c84a4d079b1d7bda62dd9b86d0d6e4428c02d46ab2c77fc/yarl-1.22.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d332fc2e3c94dad927f2112395772a4e4fedbcf8f80efc21ed7cdfae4d574fdb", size = 371240, upload-time = "2025-10-06T14:08:55.036Z" },
+ { url = "https://files.pythonhosted.org/packages/35/9f/06b765d45c0e44e8ecf0fe15c9eacbbde342bb5b7561c46944f107bfb6c3/yarl-1.22.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0cf71bf877efeac18b38d3930594c0948c82b64547c1cf420ba48722fe5509f6", size = 346965, upload-time = "2025-10-06T14:08:56.722Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/69/599e7cea8d0fcb1694323b0db0dda317fa3162f7b90166faddecf532166f/yarl-1.22.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:663e1cadaddae26be034a6ab6072449a8426ddb03d500f43daf952b74553bba0", size = 342026, upload-time = "2025-10-06T14:08:58.563Z" },
+ { url = "https://files.pythonhosted.org/packages/95/6f/9dfd12c8bc90fea9eab39832ee32ea48f8e53d1256252a77b710c065c89f/yarl-1.22.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6dcbb0829c671f305be48a7227918cfcd11276c2d637a8033a99a02b67bf9eda", size = 335637, upload-time = "2025-10-06T14:09:00.506Z" },
+ { url = "https://files.pythonhosted.org/packages/57/2e/34c5b4eb9b07e16e873db5b182c71e5f06f9b5af388cdaa97736d79dd9a6/yarl-1.22.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f0d97c18dfd9a9af4490631905a3f131a8e4c9e80a39353919e2cfed8f00aedc", size = 359082, upload-time = "2025-10-06T14:09:01.936Z" },
+ { url = "https://files.pythonhosted.org/packages/31/71/fa7e10fb772d273aa1f096ecb8ab8594117822f683bab7d2c5a89914c92a/yarl-1.22.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:437840083abe022c978470b942ff832c3940b2ad3734d424b7eaffcd07f76737", size = 357811, upload-time = "2025-10-06T14:09:03.445Z" },
+ { url = "https://files.pythonhosted.org/packages/26/da/11374c04e8e1184a6a03cf9c8f5688d3e5cec83ed6f31ad3481b3207f709/yarl-1.22.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a899cbd98dce6f5d8de1aad31cb712ec0a530abc0a86bd6edaa47c1090138467", size = 351223, upload-time = "2025-10-06T14:09:05.401Z" },
+ { url = "https://files.pythonhosted.org/packages/82/8f/e2d01f161b0c034a30410e375e191a5d27608c1f8693bab1a08b089ca096/yarl-1.22.0-cp310-cp310-win32.whl", hash = "sha256:595697f68bd1f0c1c159fcb97b661fc9c3f5db46498043555d04805430e79bea", size = 82118, upload-time = "2025-10-06T14:09:11.148Z" },
+ { url = "https://files.pythonhosted.org/packages/62/46/94c76196642dbeae634c7a61ba3da88cd77bed875bf6e4a8bed037505aa6/yarl-1.22.0-cp310-cp310-win_amd64.whl", hash = "sha256:cb95a9b1adaa48e41815a55ae740cfda005758104049a640a398120bf02515ca", size = 86852, upload-time = "2025-10-06T14:09:12.958Z" },
+ { url = "https://files.pythonhosted.org/packages/af/af/7df4f179d3b1a6dcb9a4bd2ffbc67642746fcafdb62580e66876ce83fff4/yarl-1.22.0-cp310-cp310-win_arm64.whl", hash = "sha256:b85b982afde6df99ecc996990d4ad7ccbdbb70e2a4ba4de0aecde5922ba98a0b", size = 82012, upload-time = "2025-10-06T14:09:14.664Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/27/5ab13fc84c76a0250afd3d26d5936349a35be56ce5785447d6c423b26d92/yarl-1.22.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ab72135b1f2db3fed3997d7e7dc1b80573c67138023852b6efb336a5eae6511", size = 141607, upload-time = "2025-10-06T14:09:16.298Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/a1/d065d51d02dc02ce81501d476b9ed2229d9a990818332242a882d5d60340/yarl-1.22.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:669930400e375570189492dc8d8341301578e8493aec04aebc20d4717f899dd6", size = 94027, upload-time = "2025-10-06T14:09:17.786Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/da/8da9f6a53f67b5106ffe902c6fa0164e10398d4e150d85838b82f424072a/yarl-1.22.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:792a2af6d58177ef7c19cbf0097aba92ca1b9cb3ffdd9c7470e156c8f9b5e028", size = 94963, upload-time = "2025-10-06T14:09:19.662Z" },
+ { url = "https://files.pythonhosted.org/packages/68/fe/2c1f674960c376e29cb0bec1249b117d11738db92a6ccc4a530b972648db/yarl-1.22.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ea66b1c11c9150f1372f69afb6b8116f2dd7286f38e14ea71a44eee9ec51b9d", size = 368406, upload-time = "2025-10-06T14:09:21.402Z" },
+ { url = "https://files.pythonhosted.org/packages/95/26/812a540e1c3c6418fec60e9bbd38e871eaba9545e94fa5eff8f4a8e28e1e/yarl-1.22.0-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3e2daa88dc91870215961e96a039ec73e4937da13cf77ce17f9cad0c18df3503", size = 336581, upload-time = "2025-10-06T14:09:22.98Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/f5/5777b19e26fdf98563985e481f8be3d8a39f8734147a6ebf459d0dab5a6b/yarl-1.22.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba440ae430c00eee41509353628600212112cd5018d5def7e9b05ea7ac34eb65", size = 388924, upload-time = "2025-10-06T14:09:24.655Z" },
+ { url = "https://files.pythonhosted.org/packages/86/08/24bd2477bd59c0bbd994fe1d93b126e0472e4e3df5a96a277b0a55309e89/yarl-1.22.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e6438cc8f23a9c1478633d216b16104a586b9761db62bfacb6425bac0a36679e", size = 392890, upload-time = "2025-10-06T14:09:26.617Z" },
+ { url = "https://files.pythonhosted.org/packages/46/00/71b90ed48e895667ecfb1eaab27c1523ee2fa217433ed77a73b13205ca4b/yarl-1.22.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c52a6e78aef5cf47a98ef8e934755abf53953379b7d53e68b15ff4420e6683d", size = 365819, upload-time = "2025-10-06T14:09:28.544Z" },
+ { url = "https://files.pythonhosted.org/packages/30/2d/f715501cae832651d3282387c6a9236cd26bd00d0ff1e404b3dc52447884/yarl-1.22.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3b06bcadaac49c70f4c88af4ffcfbe3dc155aab3163e75777818092478bcbbe7", size = 363601, upload-time = "2025-10-06T14:09:30.568Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/f9/a678c992d78e394e7126ee0b0e4e71bd2775e4334d00a9278c06a6cce96a/yarl-1.22.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:6944b2dc72c4d7f7052683487e3677456050ff77fcf5e6204e98caf785ad1967", size = 358072, upload-time = "2025-10-06T14:09:32.528Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/d1/b49454411a60edb6fefdcad4f8e6dbba7d8019e3a508a1c5836cba6d0781/yarl-1.22.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d5372ca1df0f91a86b047d1277c2aaf1edb32d78bbcefffc81b40ffd18f027ed", size = 385311, upload-time = "2025-10-06T14:09:34.634Z" },
+ { url = "https://files.pythonhosted.org/packages/87/e5/40d7a94debb8448c7771a916d1861d6609dddf7958dc381117e7ba36d9e8/yarl-1.22.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:51af598701f5299012b8416486b40fceef8c26fc87dc6d7d1f6fc30609ea0aa6", size = 381094, upload-time = "2025-10-06T14:09:36.268Z" },
+ { url = "https://files.pythonhosted.org/packages/35/d8/611cc282502381ad855448643e1ad0538957fc82ae83dfe7762c14069e14/yarl-1.22.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b266bd01fedeffeeac01a79ae181719ff848a5a13ce10075adbefc8f1daee70e", size = 370944, upload-time = "2025-10-06T14:09:37.872Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/df/fadd00fb1c90e1a5a8bd731fa3d3de2e165e5a3666a095b04e31b04d9cb6/yarl-1.22.0-cp311-cp311-win32.whl", hash = "sha256:a9b1ba5610a4e20f655258d5a1fdc7ebe3d837bb0e45b581398b99eb98b1f5ca", size = 81804, upload-time = "2025-10-06T14:09:39.359Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/f7/149bb6f45f267cb5c074ac40c01c6b3ea6d8a620d34b337f6321928a1b4d/yarl-1.22.0-cp311-cp311-win_amd64.whl", hash = "sha256:078278b9b0b11568937d9509b589ee83ef98ed6d561dfe2020e24a9fd08eaa2b", size = 86858, upload-time = "2025-10-06T14:09:41.068Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/13/88b78b93ad3f2f0b78e13bfaaa24d11cbc746e93fe76d8c06bf139615646/yarl-1.22.0-cp311-cp311-win_arm64.whl", hash = "sha256:b6a6f620cfe13ccec221fa312139135166e47ae169f8253f72a0abc0dae94376", size = 81637, upload-time = "2025-10-06T14:09:42.712Z" },
{ url = "https://files.pythonhosted.org/packages/75/ff/46736024fee3429b80a165a732e38e5d5a238721e634ab41b040d49f8738/yarl-1.22.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e340382d1afa5d32b892b3ff062436d592ec3d692aeea3bef3a5cfe11bbf8c6f", size = 142000, upload-time = "2025-10-06T14:09:44.631Z" },
{ url = "https://files.pythonhosted.org/packages/5a/9a/b312ed670df903145598914770eb12de1bac44599549b3360acc96878df8/yarl-1.22.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f1e09112a2c31ffe8d80be1b0988fa6a18c5d5cad92a9ffbb1c04c91bfe52ad2", size = 94338, upload-time = "2025-10-06T14:09:46.372Z" },
{ url = "https://files.pythonhosted.org/packages/ba/f5/0601483296f09c3c65e303d60c070a5c19fcdbc72daa061e96170785bc7d/yarl-1.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:939fe60db294c786f6b7c2d2e121576628468f65453d86b0fe36cb52f987bd74", size = 94909, upload-time = "2025-10-06T14:09:48.648Z" },
@@ -1680,6 +1800,22 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/35/18/55e6011f7c044dc80b98893060773cefcfdbf60dfefb8cb2f58b9bacbd83/yarl-1.22.0-cp314-cp314t-win32.whl", hash = "sha256:8009b3173bcd637be650922ac455946197d858b3630b6d8787aa9e5c4564533e", size = 89056, upload-time = "2025-10-06T14:12:13.317Z" },
{ url = "https://files.pythonhosted.org/packages/f9/86/0f0dccb6e59a9e7f122c5afd43568b1d31b8ab7dda5f1b01fb5c7025c9a9/yarl-1.22.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9fb17ea16e972c63d25d4a97f016d235c78dd2344820eb35bc034bc32012ee27", size = 96292, upload-time = "2025-10-06T14:12:15.398Z" },
{ url = "https://files.pythonhosted.org/packages/48/b7/503c98092fb3b344a179579f55814b613c1fbb1c23b3ec14a7b008a66a6e/yarl-1.22.0-cp314-cp314t-win_arm64.whl", hash = "sha256:9f6d73c1436b934e3f01df1e1b21ff765cd1d28c77dfb9ace207f746d4610ee1", size = 85171, upload-time = "2025-10-06T14:12:16.935Z" },
+ { url = "https://files.pythonhosted.org/packages/94/fd/6480106702a79bcceda5fd9c63cb19a04a6506bd5ce7fd8d9b63742f0021/yarl-1.22.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3aa27acb6de7a23785d81557577491f6c38a5209a254d1191519d07d8fe51748", size = 141301, upload-time = "2025-10-06T14:12:19.01Z" },
+ { url = "https://files.pythonhosted.org/packages/42/e1/6d95d21b17a93e793e4ec420a925fe1f6a9342338ca7a563ed21129c0990/yarl-1.22.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:af74f05666a5e531289cb1cc9c883d1de2088b8e5b4de48004e5ca8a830ac859", size = 93864, upload-time = "2025-10-06T14:12:21.05Z" },
+ { url = "https://files.pythonhosted.org/packages/32/58/b8055273c203968e89808413ea4c984988b6649baabf10f4522e67c22d2f/yarl-1.22.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:62441e55958977b8167b2709c164c91a6363e25da322d87ae6dd9c6019ceecf9", size = 94706, upload-time = "2025-10-06T14:12:23.287Z" },
+ { url = "https://files.pythonhosted.org/packages/18/91/d7bfbc28a88c2895ecd0da6a874def0c147de78afc52c773c28e1aa233a3/yarl-1.22.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b580e71cac3f8113d3135888770903eaf2f507e9421e5697d6ee6d8cd1c7f054", size = 347100, upload-time = "2025-10-06T14:12:28.527Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/e8/37a1e7b99721c0564b1fc7b0a4d1f595ef6fb8060d82ca61775b644185f7/yarl-1.22.0-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e81fda2fb4a07eda1a2252b216aa0df23ebcd4d584894e9612e80999a78fd95b", size = 318902, upload-time = "2025-10-06T14:12:30.528Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/ef/34724449d7ef2db4f22df644f2dac0b8a275d20f585e526937b3ae47b02d/yarl-1.22.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:99b6fc1d55782461b78221e95fc357b47ad98b041e8e20f47c1411d0aacddc60", size = 363302, upload-time = "2025-10-06T14:12:32.295Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/04/88a39a5dad39889f192cce8d66cc4c58dbeca983e83f9b6bf23822a7ed91/yarl-1.22.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:088e4e08f033db4be2ccd1f34cf29fe994772fb54cfe004bbf54db320af56890", size = 370816, upload-time = "2025-10-06T14:12:34.01Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/1f/5e895e547129413f56c76be2c3ce4b96c797d2d0ff3e16a817d9269b12e6/yarl-1.22.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e4e1f6f0b4da23e61188676e3ed027ef0baa833a2e633c29ff8530800edccba", size = 346465, upload-time = "2025-10-06T14:12:35.977Z" },
+ { url = "https://files.pythonhosted.org/packages/11/13/a750e9fd6f9cc9ed3a52a70fe58ffe505322f0efe0d48e1fd9ffe53281f5/yarl-1.22.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:84fc3ec96fce86ce5aa305eb4aa9358279d1aa644b71fab7b8ed33fe3ba1a7ca", size = 341506, upload-time = "2025-10-06T14:12:37.788Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/67/bb6024de76e7186611ebe626aec5b71a2d2ecf9453e795f2dbd80614784c/yarl-1.22.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5dbeefd6ca588b33576a01b0ad58aa934bc1b41ef89dee505bf2932b22ddffba", size = 335030, upload-time = "2025-10-06T14:12:39.775Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/be/50b38447fd94a7992996a62b8b463d0579323fcfc08c61bdba949eef8a5d/yarl-1.22.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:14291620375b1060613f4aab9ebf21850058b6b1b438f386cc814813d901c60b", size = 358560, upload-time = "2025-10-06T14:12:41.547Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/89/c020b6f547578c4e3dbb6335bf918f26e2f34ad0d1e515d72fd33ac0c635/yarl-1.22.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:a4fcfc8eb2c34148c118dfa02e6427ca278bfd0f3df7c5f99e33d2c0e81eae3e", size = 357290, upload-time = "2025-10-06T14:12:43.861Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/52/c49a619ee35a402fa3a7019a4fa8d26878fec0d1243f6968bbf516789578/yarl-1.22.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:029866bde8d7b0878b9c160e72305bbf0a7342bcd20b9999381704ae03308dc8", size = 350700, upload-time = "2025-10-06T14:12:46.868Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/c9/f5042d87777bf6968435f04a2bbb15466b2f142e6e47fa4f34d1a3f32f0c/yarl-1.22.0-cp39-cp39-win32.whl", hash = "sha256:4dcc74149ccc8bba31ce1944acee24813e93cfdee2acda3c172df844948ddf7b", size = 82323, upload-time = "2025-10-06T14:12:48.633Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/58/d00f7cad9eba20c4eefac2682f34661d1d1b3a942fc0092eb60e78cfb733/yarl-1.22.0-cp39-cp39-win_amd64.whl", hash = "sha256:10619d9fdee46d20edc49d3479e2f8269d0779f1b031e6f7c2aa1c76be04b7ed", size = 87145, upload-time = "2025-10-06T14:12:50.241Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/a3/70904f365080780d38b919edd42d224b8c4ce224a86950d2eaa2a24366ad/yarl-1.22.0-cp39-cp39-win_arm64.whl", hash = "sha256:dd7afd3f8b0bfb4e0d9fc3c31bfe8a4ec7debe124cfd90619305def3c8ca8cd2", size = 82173, upload-time = "2025-10-06T14:12:51.869Z" },
{ url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" },
]