diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8cccfef..3c4a087 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -61,14 +61,18 @@ jobs:
run: rye build
- name: Get GitHub OIDC Token
- if: github.repository == 'stainless-sdks/parallel-sdk-python'
+ if: |-
+ github.repository == 'stainless-sdks/parallel-sdk-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
id: github-oidc
uses: actions/github-script@v8
with:
script: core.setOutput('github_token', await core.getIDToken());
- name: Upload tarball
- if: github.repository == 'stainless-sdks/parallel-sdk-python'
+ if: |-
+ github.repository == 'stainless-sdks/parallel-sdk-python' &&
+ !startsWith(github.ref, 'refs/heads/stl/')
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 3e2bf49..980ea05 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.4.1"
+ ".": "0.4.2"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 7272afb..d30019d 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 22
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-31ff2f5e2e00191b8f376bd9648d8305d224493bf40dd9f0e73e6668f2558504.yml
-openapi_spec_hash: de646907ddb63402755855b7e78ccbac
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-970b780e86490322cc3c7e2b57f140ca6766a3d9f6e0d3402837ebaf7c2183fc.yml
+openapi_spec_hash: 34f784ce2dec796048e6780924bae08f
config_hash: a398d153133d8884bed4e5256a0ae818
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f1df0ee..a52aa9d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## 0.4.2 (2026-03-09)
+
+Full Changelog: [v0.4.1...v0.4.2](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.1...v0.4.2)
+
+### Features
+
+* **api:** add betas back in for search ([23493c6](https://github.com/parallel-web/parallel-sdk-python/commit/23493c6ae666649f7ac2af185bb6caf49b9fefee))
+* **api:** sync openapi spec ([e00288b](https://github.com/parallel-web/parallel-sdk-python/commit/e00288bd8ed2a250c9a0d7935a52fd40b9d1bec5))
+* **client:** add custom JSON encoder for extended type support ([b2c8bf9](https://github.com/parallel-web/parallel-sdk-python/commit/b2c8bf9b8246e2e8f1d53a7c8e238dd19b727a77))
+
+
+### Chores
+
+* **ci:** skip uploading artifacts on stainless-internal branches ([bb389c0](https://github.com/parallel-web/parallel-sdk-python/commit/bb389c0078e00e615c1aa650674c006c6e906c81))
+* format all `api.md` files ([b74b93b](https://github.com/parallel-web/parallel-sdk-python/commit/b74b93bf04d678cc283b8f312a3a4c5bb314c468))
+* **internal:** add request options to SSE classes ([00dbc30](https://github.com/parallel-web/parallel-sdk-python/commit/00dbc3027e59adda51eb623d6a724501f70a7720))
+* **internal:** bump dependencies ([f49c841](https://github.com/parallel-web/parallel-sdk-python/commit/f49c841670d88f8fc38e0a17f19242f7570a9aad))
+* **internal:** codegen related update ([1b7c8ff](https://github.com/parallel-web/parallel-sdk-python/commit/1b7c8ff1969c65a422d6bcfcdc01c5b0c477b45b))
+* **internal:** fix lint error on Python 3.14 ([cb3f364](https://github.com/parallel-web/parallel-sdk-python/commit/cb3f3645bc67b98235a732f357d8a24fdf164032))
+* **internal:** make `test_proxy_environment_variables` more resilient ([d3ba149](https://github.com/parallel-web/parallel-sdk-python/commit/d3ba149917deab7ee28a3f38bd7b1f3f4bd2b9c6))
+* **internal:** make `test_proxy_environment_variables` more resilient to env ([1e1d858](https://github.com/parallel-web/parallel-sdk-python/commit/1e1d858e7c21785744d94ceef33e655dcf75eacc))
+* **test:** do not count install time for mock server timeout ([9766097](https://github.com/parallel-web/parallel-sdk-python/commit/9766097052cc86f0081cfe38b25d7dbf90232438))
+* update mock server docs ([028965c](https://github.com/parallel-web/parallel-sdk-python/commit/028965c0b2868051617b266493466f9fc7816705))
+
## 0.4.1 (2026-01-28)
Full Changelog: [v0.4.0...v0.4.1](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.0...v0.4.1)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6bde9f8..3276e79 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -88,8 +88,7 @@ $ pip install ./path-to-wheel-file.whl
Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests.
```sh
-# you will need npm installed
-$ npx prism mock path/to/your/openapi.yml
+$ ./scripts/mock
```
```sh
diff --git a/README.md b/README.md
index beeb420..431a0ab 100644
--- a/README.md
+++ b/README.md
@@ -98,6 +98,7 @@ All beta parameters are accessible via the `client.beta` namespace in the SDK.
Yes, all methods support a timeout. For more information, see [Timeouts](#timeouts).
+
**Can I specify retries via the SDK?**
Yes, errors can be retried via the SDK — the default retry count is 2. The maximum number
@@ -292,7 +293,7 @@ response = client.task_run.with_raw_response.create(
print(response.headers.get('X-My-Header'))
task_run = response.parse()
-print(task_run.run_id)
+print(task_run.interaction_id)
```
These methods return an [`APIResponse`](https://github.com/parallel-web/parallel-sdk-python/tree/main/src/parallel/_response.py) object.
diff --git a/api.md b/api.md
index e00a252..66e3651 100644
--- a/api.md
+++ b/api.md
@@ -35,102 +35,4 @@ Convenience methods:
- client.task_run.execute(input, processor, output: OutputSchema) -> TaskRunResult
- client.task_run.execute(input, processor, output: Type[OutputT]) -> ParsedTaskRunResult[OutputT]
-# Beta
-
-Types:
-
-```python
-from parallel.types.beta import (
- ExcerptSettings,
- ExtractError,
- ExtractResponse,
- ExtractResult,
- FetchPolicy,
- SearchResult,
- UsageItem,
- WebSearchResult,
-)
-```
-
-Methods:
-
-- client.beta.extract(\*\*params) -> ExtractResponse
-- client.beta.search(\*\*params) -> SearchResult
-
-## TaskRun
-
-Types:
-
-```python
-from parallel.types.beta import (
- BetaRunInput,
- BetaTaskRunResult,
- ErrorEvent,
- McpServer,
- McpToolCall,
- ParallelBeta,
- TaskRunEvent,
- Webhook,
- TaskRunEventsResponse,
-)
-```
-
-Methods:
-
-- client.beta.task_run.create(\*\*params) -> TaskRun
-- client.beta.task_run.events(run_id) -> TaskRunEventsResponse
-- client.beta.task_run.result(run_id, \*\*params) -> BetaTaskRunResult
-
-## TaskGroup
-
-Types:
-
-```python
-from parallel.types.beta import (
- TaskGroup,
- TaskGroupRunResponse,
- TaskGroupStatus,
- TaskGroupEventsResponse,
- TaskGroupGetRunsResponse,
-)
-```
-
-Methods:
-
-- client.beta.task_group.create(\*\*params) -> TaskGroup
-- client.beta.task_group.retrieve(task_group_id) -> TaskGroup
-- client.beta.task_group.add_runs(task_group_id, \*\*params) -> TaskGroupRunResponse
-- client.beta.task_group.events(task_group_id, \*\*params) -> TaskGroupEventsResponse
-- client.beta.task_group.get_runs(task_group_id, \*\*params) -> TaskGroupGetRunsResponse
-
-## FindAll
-
-Types:
-
-```python
-from parallel.types.beta import (
- FindAllCandidateMatchStatusEvent,
- FindAllEnrichInput,
- FindAllExtendInput,
- FindAllRun,
- FindAllRunInput,
- FindAllRunResult,
- FindAllRunStatusEvent,
- FindAllSchema,
- FindAllSchemaUpdatedEvent,
- IngestInput,
- FindAllEventsResponse,
-)
-```
-
-Methods:
-
-- client.beta.findall.create(\*\*params) -> FindAllRun
-- client.beta.findall.retrieve(findall_id) -> FindAllRun
-- client.beta.findall.cancel(findall_id) -> object
-- client.beta.findall.enrich(findall_id, \*\*params) -> FindAllSchema
-- client.beta.findall.events(findall_id, \*\*params) -> FindAllEventsResponse
-- client.beta.findall.extend(findall_id, \*\*params) -> FindAllSchema
-- client.beta.findall.ingest(\*\*params) -> FindAllSchema
-- client.beta.findall.result(findall_id) -> FindAllRunResult
-- client.beta.findall.schema(findall_id) -> FindAllSchema
+# [Beta](src/parallel/resources/beta/api.md)
diff --git a/pyproject.toml b/pyproject.toml
index c1f6b0c..12646c4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "parallel-web"
-version = "0.4.1"
+version = "0.4.2"
description = "The official Python library for the Parallel API"
dynamic = ["readme"]
license = "MIT"
@@ -70,7 +70,7 @@ format = { chain = [
# run formatting again to fix any inconsistencies when imports are stripped
"format:ruff",
]}
-"format:docs" = "python scripts/utils/ruffen-docs.py README.md api.md"
+"format:docs" = "bash -c 'python scripts/utils/ruffen-docs.py README.md $(find . -type f -name api.md)'"
"format:ruff" = "ruff format"
"lint" = { chain = [
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 6b24ce1..c32a9d9 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -12,14 +12,14 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.13.2
+aiohttp==3.13.3
# via httpx-aiohttp
# via parallel-web
aiosignal==1.4.0
# via aiohttp
annotated-types==0.7.0
# via pydantic
-anyio==4.12.0
+anyio==4.12.1
# via httpx
# via parallel-web
argcomplete==3.6.3
@@ -31,7 +31,7 @@ attrs==25.4.0
# via nox
backports-asyncio-runner==1.2.0
# via pytest-asyncio
-certifi==2025.11.12
+certifi==2026.1.4
# via httpcore
# via httpx
colorama==0.4.6
@@ -64,7 +64,7 @@ httpx==0.28.1
# via httpx-aiohttp
# via parallel-web
# via respx
-httpx-aiohttp==0.1.9
+httpx-aiohttp==0.1.12
# via parallel-web
humanize==4.13.0
# via nox
@@ -72,7 +72,7 @@ idna==3.11
# via anyio
# via httpx
# via yarl
-importlib-metadata==8.7.0
+importlib-metadata==8.7.1
iniconfig==2.1.0
# via pytest
markdown-it-py==3.0.0
@@ -85,14 +85,14 @@ multidict==6.7.0
mypy==1.17.0
mypy-extensions==1.1.0
# via mypy
-nodeenv==1.9.1
+nodeenv==1.10.0
# via pyright
nox==2025.11.12
packaging==25.0
# via dependency-groups
# via nox
# via pytest
-pathspec==0.12.1
+pathspec==1.0.3
# via mypy
platformdirs==4.4.0
# via virtualenv
@@ -118,13 +118,13 @@ python-dateutil==2.9.0.post0
# via time-machine
respx==0.22.0
rich==14.2.0
-ruff==0.14.7
+ruff==0.14.13
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via parallel-web
time-machine==2.19.0
-tomli==2.3.0
+tomli==2.4.0
# via dependency-groups
# via mypy
# via nox
@@ -144,7 +144,7 @@ typing-extensions==4.15.0
# via virtualenv
typing-inspection==0.4.2
# via pydantic
-virtualenv==20.35.4
+virtualenv==20.36.1
# via nox
yarl==1.22.0
# via aiohttp
diff --git a/requirements.lock b/requirements.lock
index 12e9e32..ff321d5 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -12,21 +12,21 @@
-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
-aiohttp==3.13.2
+aiohttp==3.13.3
# via httpx-aiohttp
# via parallel-web
aiosignal==1.4.0
# via aiohttp
annotated-types==0.7.0
# via pydantic
-anyio==4.12.0
+anyio==4.12.1
# via httpx
# via parallel-web
async-timeout==5.0.1
# via aiohttp
attrs==25.4.0
# via aiohttp
-certifi==2025.11.12
+certifi==2026.1.4
# via httpcore
# via httpx
distro==1.9.0
@@ -43,7 +43,7 @@ httpcore==1.0.9
httpx==0.28.1
# via httpx-aiohttp
# via parallel-web
-httpx-aiohttp==0.1.9
+httpx-aiohttp==0.1.12
# via parallel-web
idna==3.11
# via anyio
diff --git a/scripts/mock b/scripts/mock
index 0b28f6e..bcf3b39 100755
--- a/scripts/mock
+++ b/scripts/mock
@@ -21,11 +21,22 @@ echo "==> Starting mock server with URL ${URL}"
# Run prism mock on the given spec
if [ "$1" == "--daemon" ]; then
+ # Pre-install the package so the download doesn't eat into the startup timeout
+ npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism --version
+
npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log &
- # Wait for server to come online
+ # Wait for server to come online (max 30s)
echo -n "Waiting for server"
+ attempts=0
while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do
+ attempts=$((attempts + 1))
+ if [ "$attempts" -ge 300 ]; then
+ echo
+ echo "Timed out waiting for Prism server to start"
+ cat .prism.log
+ exit 1
+ fi
echo -n "."
sleep 0.1
done
diff --git a/src/parallel/_base_client.py b/src/parallel/_base_client.py
index 3f29df7..5128667 100644
--- a/src/parallel/_base_client.py
+++ b/src/parallel/_base_client.py
@@ -86,6 +86,7 @@
APIConnectionError,
APIResponseValidationError,
)
+from ._utils._json import openapi_dumps
log: logging.Logger = logging.getLogger(__name__)
@@ -554,8 +555,10 @@ def _build_request(
kwargs["content"] = options.content
elif isinstance(json_data, bytes):
kwargs["content"] = json_data
- else:
- kwargs["json"] = json_data if is_given(json_data) else None
+ elif not files:
+ # Don't set content when JSON is sent as multipart/form-data,
+ # since httpx's content param overrides other body arguments
+ kwargs["content"] = openapi_dumps(json_data) if is_given(json_data) and json_data is not None else None
kwargs["files"] = files
else:
headers.pop("Content-Type", None)
diff --git a/src/parallel/_client.py b/src/parallel/_client.py
index 0e076cb..cf3f898 100644
--- a/src/parallel/_client.py
+++ b/src/parallel/_client.py
@@ -104,6 +104,11 @@ def __init__(
@cached_property
def task_run(self) -> TaskRunResource:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
from .resources.task_run import TaskRunResource
return TaskRunResource(self)
@@ -284,6 +289,11 @@ def __init__(
@cached_property
def task_run(self) -> AsyncTaskRunResource:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
from .resources.task_run import AsyncTaskRunResource
return AsyncTaskRunResource(self)
@@ -415,6 +425,11 @@ def __init__(self, client: Parallel) -> None:
@cached_property
def task_run(self) -> task_run.TaskRunResourceWithRawResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
from .resources.task_run import TaskRunResourceWithRawResponse
return TaskRunResourceWithRawResponse(self._client.task_run)
@@ -434,6 +449,11 @@ def __init__(self, client: AsyncParallel) -> None:
@cached_property
def task_run(self) -> task_run.AsyncTaskRunResourceWithRawResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
from .resources.task_run import AsyncTaskRunResourceWithRawResponse
return AsyncTaskRunResourceWithRawResponse(self._client.task_run)
@@ -453,6 +473,11 @@ def __init__(self, client: Parallel) -> None:
@cached_property
def task_run(self) -> task_run.TaskRunResourceWithStreamingResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
from .resources.task_run import TaskRunResourceWithStreamingResponse
return TaskRunResourceWithStreamingResponse(self._client.task_run)
@@ -472,6 +497,11 @@ def __init__(self, client: AsyncParallel) -> None:
@cached_property
def task_run(self) -> task_run.AsyncTaskRunResourceWithStreamingResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
from .resources.task_run import AsyncTaskRunResourceWithStreamingResponse
return AsyncTaskRunResourceWithStreamingResponse(self._client.task_run)
diff --git a/src/parallel/_compat.py b/src/parallel/_compat.py
index 73a1f3e..020ffeb 100644
--- a/src/parallel/_compat.py
+++ b/src/parallel/_compat.py
@@ -139,6 +139,7 @@ def model_dump(
exclude_defaults: bool = False,
warnings: bool = True,
mode: Literal["json", "python"] = "python",
+ by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
return model.model_dump(
@@ -148,13 +149,12 @@ def model_dump(
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
+ by_alias=by_alias,
)
return cast(
"dict[str, Any]",
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
- exclude=exclude,
- exclude_unset=exclude_unset,
- exclude_defaults=exclude_defaults,
+ exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, by_alias=bool(by_alias)
),
)
diff --git a/src/parallel/_response.py b/src/parallel/_response.py
index e24d45a..cf9f5f3 100644
--- a/src/parallel/_response.py
+++ b/src/parallel/_response.py
@@ -152,6 +152,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -162,6 +163,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=extract_stream_chunk_type(self._stream_cls),
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
@@ -175,6 +177,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
cast_to=cast_to,
response=self.http_response,
client=cast(Any, self._client),
+ options=self._options,
),
)
diff --git a/src/parallel/_streaming.py b/src/parallel/_streaming.py
index e65969e..8550546 100644
--- a/src/parallel/_streaming.py
+++ b/src/parallel/_streaming.py
@@ -4,7 +4,7 @@
import json
import inspect
from types import TracebackType
-from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, Optional, AsyncIterator, cast
from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
import httpx
@@ -13,6 +13,7 @@
if TYPE_CHECKING:
from ._client import Parallel, AsyncParallel
+ from ._models import FinalRequestOptions
_T = TypeVar("_T")
@@ -22,7 +23,7 @@ class Stream(Generic[_T]):
"""Provides the core interface to iterate over a synchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEBytesDecoder
def __init__(
@@ -31,10 +32,12 @@ def __init__(
cast_to: type[_T],
response: httpx.Response,
client: Parallel,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
@@ -85,7 +88,7 @@ class AsyncStream(Generic[_T]):
"""Provides the core interface to iterate over an asynchronous stream response."""
response: httpx.Response
-
+ _options: Optional[FinalRequestOptions] = None
_decoder: SSEDecoder | SSEBytesDecoder
def __init__(
@@ -94,10 +97,12 @@ def __init__(
cast_to: type[_T],
response: httpx.Response,
client: AsyncParallel,
+ options: Optional[FinalRequestOptions] = None,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
+ self._options = options
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
diff --git a/src/parallel/_utils/_compat.py b/src/parallel/_utils/_compat.py
index dd70323..2c70b29 100644
--- a/src/parallel/_utils/_compat.py
+++ b/src/parallel/_utils/_compat.py
@@ -26,7 +26,7 @@ def is_union(tp: Optional[Type[Any]]) -> bool:
else:
import types
- return tp is Union or tp is types.UnionType
+ return tp is Union or tp is types.UnionType # type: ignore[comparison-overlap]
def is_typeddict(tp: Type[Any]) -> bool:
diff --git a/src/parallel/_utils/_json.py b/src/parallel/_utils/_json.py
new file mode 100644
index 0000000..6058421
--- /dev/null
+++ b/src/parallel/_utils/_json.py
@@ -0,0 +1,35 @@
+import json
+from typing import Any
+from datetime import datetime
+from typing_extensions import override
+
+import pydantic
+
+from .._compat import model_dump
+
+
+def openapi_dumps(obj: Any) -> bytes:
+ """
+ Serialize an object to UTF-8 encoded JSON bytes.
+
+ Extends the standard json.dumps with support for additional types
+ commonly used in the SDK, such as `datetime`, `pydantic.BaseModel`, etc.
+ """
+ return json.dumps(
+ obj,
+ cls=_CustomEncoder,
+ # Uses the same defaults as httpx's JSON serialization
+ ensure_ascii=False,
+ separators=(",", ":"),
+ allow_nan=False,
+ ).encode()
+
+
+class _CustomEncoder(json.JSONEncoder):
+ @override
+ def default(self, o: Any) -> Any:
+ if isinstance(o, datetime):
+ return o.isoformat()
+ if isinstance(o, pydantic.BaseModel):
+ return model_dump(o, exclude_unset=True, mode="json", by_alias=True)
+ return super().default(o)
diff --git a/src/parallel/_version.py b/src/parallel/_version.py
index 528ebb8..1b80dfe 100644
--- a/src/parallel/_version.py
+++ b/src/parallel/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "parallel"
-__version__ = "0.4.1" # x-release-please-version
+__version__ = "0.4.2" # x-release-please-version
diff --git a/src/parallel/resources/beta/api.md b/src/parallel/resources/beta/api.md
new file mode 100644
index 0000000..0b56fe7
--- /dev/null
+++ b/src/parallel/resources/beta/api.md
@@ -0,0 +1,99 @@
+# Beta
+
+Types:
+
+```python
+from parallel.types.beta import (
+ ExcerptSettings,
+ ExtractError,
+ ExtractResponse,
+ ExtractResult,
+ FetchPolicy,
+ SearchResult,
+ UsageItem,
+ WebSearchResult,
+)
+```
+
+Methods:
+
+- client.beta.extract(\*\*params) -> ExtractResponse
+- client.beta.search(\*\*params) -> SearchResult
+
+## TaskRun
+
+Types:
+
+```python
+from parallel.types.beta import (
+ BetaRunInput,
+ BetaTaskRunResult,
+ ErrorEvent,
+ McpServer,
+ McpToolCall,
+ ParallelBeta,
+ TaskRunEvent,
+ Webhook,
+ TaskRunEventsResponse,
+)
+```
+
+Methods:
+
+- client.beta.task_run.create(\*\*params) -> TaskRun
+- client.beta.task_run.events(run_id) -> TaskRunEventsResponse
+- client.beta.task_run.result(run_id, \*\*params) -> BetaTaskRunResult
+
+## TaskGroup
+
+Types:
+
+```python
+from parallel.types.beta import (
+ TaskGroup,
+ TaskGroupRunResponse,
+ TaskGroupStatus,
+ TaskGroupEventsResponse,
+ TaskGroupGetRunsResponse,
+)
+```
+
+Methods:
+
+- client.beta.task_group.create(\*\*params) -> TaskGroup
+- client.beta.task_group.retrieve(task_group_id) -> TaskGroup
+- client.beta.task_group.add_runs(task_group_id, \*\*params) -> TaskGroupRunResponse
+- client.beta.task_group.events(task_group_id, \*\*params) -> TaskGroupEventsResponse
+- client.beta.task_group.get_runs(task_group_id, \*\*params) -> TaskGroupGetRunsResponse
+
+## FindAll
+
+Types:
+
+```python
+from parallel.types.beta import (
+ FindAllCandidateMatchStatusEvent,
+ FindAllEnrichInput,
+ FindAllExtendInput,
+ FindAllRun,
+ FindAllRunInput,
+ FindAllRunResult,
+ FindAllRunStatusEvent,
+ FindAllSchema,
+ FindAllSchemaUpdatedEvent,
+ IngestInput,
+ FindAllEventsResponse,
+)
+```
+
+Methods:
+
+- client.beta.findall.create(\*\*params) -> FindAllRun
+- client.beta.findall.retrieve(findall_id) -> FindAllRun
+- client.beta.findall.cancel(findall_id) -> object
+- client.beta.findall.enrich(findall_id, \*\*params) -> FindAllSchema
+- client.beta.findall.events(findall_id, \*\*params) -> FindAllEventsResponse
+- client.beta.findall.extend(findall_id, \*\*params) -> FindAllSchema
+- client.beta.findall.ingest(\*\*params) -> FindAllSchema
+- client.beta.findall.result(findall_id) -> FindAllRunResult
+- client.beta.findall.schema(findall_id) -> FindAllSchema
diff --git a/src/parallel/resources/beta/beta.py b/src/parallel/resources/beta/beta.py
index 402ec56..c2f4368 100644
--- a/src/parallel/resources/beta/beta.py
+++ b/src/parallel/resources/beta/beta.py
@@ -57,14 +57,31 @@
class BetaResource(SyncAPIResource):
@cached_property
def task_run(self) -> TaskRunResource:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
return TaskRunResource(self._client)
@cached_property
def task_group(self) -> TaskGroupResource:
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
return TaskGroupResource(self._client)
@cached_property
def findall(self) -> FindAllResource:
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
return FindAllResource(self._client)
@cached_property
@@ -170,7 +187,7 @@ def search(
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
max_chars_per_result: Optional[int] | Omit = omit,
max_results: Optional[int] | Omit = omit,
- mode: Optional[Literal["one-shot", "agentic"]] | Omit = omit,
+ mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
processor: Optional[Literal["base", "pro"]] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
@@ -186,9 +203,6 @@ def search(
"""
Searches the web.
- To access this endpoint, pass the `parallel-beta` header with the value
- `search-extract-2025-10-10`.
-
Args:
excerpts: Optional settings to configure excerpt generation.
@@ -196,13 +210,16 @@ def search(
max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead.
- max_results: Upper bound on the number of results to return. May be limited by the processor.
- Defaults to 10 if not provided.
+ max_results: Upper bound on the number of results to return. Defaults to 10 if not provided.
- mode: Presets default values for parameters for different use cases. `one-shot`
- returns more comprehensive results and longer excerpts to answer questions from
- a single response, while `agentic` returns more concise, token-efficient results
- for use in an agentic loop.
+ mode: Presets default values for parameters for different use cases.
+
+ - `one-shot` returns more comprehensive results and longer excerpts to answer
+ questions from a single response
+ - `agentic` returns more concise, token-efficient results for use in an agentic
+ loop
+ - `fast` trades some quality for lower latency, with best results when used with
+ concise and high-quality objective and keyword queries
objective: Natural-language description of what the web search is trying to find. May
include guidance about preferred sources or freshness. At least one of objective
@@ -265,14 +282,31 @@ def search(
class AsyncBetaResource(AsyncAPIResource):
@cached_property
def task_run(self) -> AsyncTaskRunResource:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
return AsyncTaskRunResource(self._client)
@cached_property
def task_group(self) -> AsyncTaskGroupResource:
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
return AsyncTaskGroupResource(self._client)
@cached_property
def findall(self) -> AsyncFindAllResource:
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
return AsyncFindAllResource(self._client)
@cached_property
@@ -378,7 +412,7 @@ async def search(
fetch_policy: Optional[FetchPolicyParam] | Omit = omit,
max_chars_per_result: Optional[int] | Omit = omit,
max_results: Optional[int] | Omit = omit,
- mode: Optional[Literal["one-shot", "agentic"]] | Omit = omit,
+ mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit,
objective: Optional[str] | Omit = omit,
processor: Optional[Literal["base", "pro"]] | Omit = omit,
search_queries: Optional[SequenceNotStr[str]] | Omit = omit,
@@ -394,9 +428,6 @@ async def search(
"""
Searches the web.
- To access this endpoint, pass the `parallel-beta` header with the value
- `search-extract-2025-10-10`.
-
Args:
excerpts: Optional settings to configure excerpt generation.
@@ -404,13 +435,16 @@ async def search(
max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead.
- max_results: Upper bound on the number of results to return. May be limited by the processor.
- Defaults to 10 if not provided.
+ max_results: Upper bound on the number of results to return. Defaults to 10 if not provided.
+
+ mode: Presets default values for parameters for different use cases.
- mode: Presets default values for parameters for different use cases. `one-shot`
- returns more comprehensive results and longer excerpts to answer questions from
- a single response, while `agentic` returns more concise, token-efficient results
- for use in an agentic loop.
+ - `one-shot` returns more comprehensive results and longer excerpts to answer
+ questions from a single response
+ - `agentic` returns more concise, token-efficient results for use in an agentic
+ loop
+ - `fast` trades some quality for lower latency, with best results when used with
+ concise and high-quality objective and keyword queries
objective: Natural-language description of what the web search is trying to find. May
include guidance about preferred sources or freshness. At least one of objective
@@ -483,14 +517,31 @@ def __init__(self, beta: BetaResource) -> None:
@cached_property
def task_run(self) -> TaskRunResourceWithRawResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
return TaskRunResourceWithRawResponse(self._beta.task_run)
@cached_property
def task_group(self) -> TaskGroupResourceWithRawResponse:
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
return TaskGroupResourceWithRawResponse(self._beta.task_group)
@cached_property
def findall(self) -> FindAllResourceWithRawResponse:
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
return FindAllResourceWithRawResponse(self._beta.findall)
@@ -507,14 +558,31 @@ def __init__(self, beta: AsyncBetaResource) -> None:
@cached_property
def task_run(self) -> AsyncTaskRunResourceWithRawResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
return AsyncTaskRunResourceWithRawResponse(self._beta.task_run)
@cached_property
def task_group(self) -> AsyncTaskGroupResourceWithRawResponse:
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
return AsyncTaskGroupResourceWithRawResponse(self._beta.task_group)
@cached_property
def findall(self) -> AsyncFindAllResourceWithRawResponse:
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
return AsyncFindAllResourceWithRawResponse(self._beta.findall)
@@ -531,14 +599,31 @@ def __init__(self, beta: BetaResource) -> None:
@cached_property
def task_run(self) -> TaskRunResourceWithStreamingResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
return TaskRunResourceWithStreamingResponse(self._beta.task_run)
@cached_property
def task_group(self) -> TaskGroupResourceWithStreamingResponse:
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
return TaskGroupResourceWithStreamingResponse(self._beta.task_group)
@cached_property
def findall(self) -> FindAllResourceWithStreamingResponse:
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
return FindAllResourceWithStreamingResponse(self._beta.findall)
@@ -555,12 +640,29 @@ def __init__(self, beta: AsyncBetaResource) -> None:
@cached_property
def task_run(self) -> AsyncTaskRunResourceWithStreamingResponse:
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
return AsyncTaskRunResourceWithStreamingResponse(self._beta.task_run)
@cached_property
def task_group(self) -> AsyncTaskGroupResourceWithStreamingResponse:
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
return AsyncTaskGroupResourceWithStreamingResponse(self._beta.task_group)
@cached_property
def findall(self) -> AsyncFindAllResourceWithStreamingResponse:
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
return AsyncFindAllResourceWithStreamingResponse(self._beta.findall)
diff --git a/src/parallel/resources/beta/findall.py b/src/parallel/resources/beta/findall.py
index b2cf437..ef5ab71 100644
--- a/src/parallel/resources/beta/findall.py
+++ b/src/parallel/resources/beta/findall.py
@@ -53,6 +53,10 @@
class FindAllResource(SyncAPIResource):
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
+
@cached_property
def with_raw_response(self) -> FindAllResourceWithRawResponse:
"""
@@ -594,6 +598,10 @@ def schema(
class AsyncFindAllResource(AsyncAPIResource):
+ """
+ The FindAll API discovers and evaluates entities that match complex criteria from natural language objectives. Submit a high-level goal and the service automatically generates structured match conditions, discovers relevant candidates, and evaluates each against the criteria. Returns comprehensive results with detailed reasoning, citations, and confidence scores for each match decision. Streaming events and webhooks are supported.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncFindAllResourceWithRawResponse:
"""
diff --git a/src/parallel/resources/beta/task_group.py b/src/parallel/resources/beta/task_group.py
index 23ebee6..b15eab7 100644
--- a/src/parallel/resources/beta/task_group.py
+++ b/src/parallel/resources/beta/task_group.py
@@ -38,6 +38,16 @@
class TaskGroupResource(SyncAPIResource):
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
+
@cached_property
def with_raw_response(self) -> TaskGroupResourceWithRawResponse:
"""
@@ -144,7 +154,8 @@ def add_runs(
Initiates multiple task runs within a TaskGroup.
Args:
- inputs: List of task runs to execute.
+ inputs: List of task runs to execute. Up to 1,000 runs can be specified per request. If
+ you'd like to add more runs, split them across multiple TaskGroup POST requests.
default_task_spec: Specification for a task.
@@ -314,6 +325,16 @@ def get_runs(
class AsyncTaskGroupResource(AsyncAPIResource):
+ """
+ The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling.
+ - Submit hundreds or thousands of Tasks as a single group
+ - Observe group progress and receive results as they complete
+ - Real-time updates via Server-Sent Events (SSE)
+ - Add tasks to an existing group while it is running
+ - Group-level retry and error aggregation
+ Status: beta and subject to change.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncTaskGroupResourceWithRawResponse:
"""
@@ -420,7 +441,8 @@ async def add_runs(
Initiates multiple task runs within a TaskGroup.
Args:
- inputs: List of task runs to execute.
+ inputs: List of task runs to execute. Up to 1,000 runs can be specified per request. If
+ you'd like to add more runs, split them across multiple TaskGroup POST requests.
default_task_spec: Specification for a task.
diff --git a/src/parallel/resources/beta/task_run.py b/src/parallel/resources/beta/task_run.py
index d823a3e..3fb567c 100644
--- a/src/parallel/resources/beta/task_run.py
+++ b/src/parallel/resources/beta/task_run.py
@@ -33,6 +33,12 @@
class TaskRunResource(SyncAPIResource):
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
+
@cached_property
def with_raw_response(self) -> TaskRunResourceWithRawResponse:
"""
@@ -60,6 +66,7 @@ def create(
enable_events: Optional[bool] | Omit = omit,
mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
+ previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
webhook: Optional[WebhookParam] | Omit = omit,
@@ -99,6 +106,8 @@ def create(
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
+ previous_interaction_id: Interaction ID to use as context for this request.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -142,6 +151,7 @@ def create(
"enable_events": enable_events,
"mcp_servers": mcp_servers,
"metadata": metadata,
+ "previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
"webhook": webhook,
@@ -251,6 +261,12 @@ def result(
class AsyncTaskRunResource(AsyncAPIResource):
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
+
@cached_property
def with_raw_response(self) -> AsyncTaskRunResourceWithRawResponse:
"""
@@ -278,6 +294,7 @@ async def create(
enable_events: Optional[bool] | Omit = omit,
mcp_servers: Optional[Iterable[McpServerParam]] | Omit = omit,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
+ previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
webhook: Optional[WebhookParam] | Omit = omit,
@@ -317,6 +334,8 @@ async def create(
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
+ previous_interaction_id: Interaction ID to use as context for this request.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -360,6 +379,7 @@ async def create(
"enable_events": enable_events,
"mcp_servers": mcp_servers,
"metadata": metadata,
+ "previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
"webhook": webhook,
diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py
index 06af545..eb0df13 100644
--- a/src/parallel/resources/task_run.py
+++ b/src/parallel/resources/task_run.py
@@ -37,6 +37,12 @@
class TaskRunResource(SyncAPIResource):
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
+
@cached_property
def with_raw_response(self) -> TaskRunResourceWithRawResponse:
"""
@@ -62,6 +68,7 @@ def create(
input: Union[str, Dict[str, object]],
processor: str,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
+ previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -86,6 +93,8 @@ def create(
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
+ previous_interaction_id: Interaction ID to use as context for this request.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -112,6 +121,7 @@ def create(
"input": input,
"processor": processor,
"metadata": metadata,
+ "previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
},
@@ -326,6 +336,12 @@ def execute(
class AsyncTaskRunResource(AsyncAPIResource):
+ """The Task API executes web research and extraction tasks.
+
+ Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences.
+ - Output metadata: citations, excerpts, reasoning, and confidence per field
+ """
+
@cached_property
def with_raw_response(self) -> AsyncTaskRunResourceWithRawResponse:
"""
@@ -351,6 +367,7 @@ async def create(
input: Union[str, Dict[str, object]],
processor: str,
metadata: Optional[Dict[str, Union[str, float, bool]]] | Omit = omit,
+ previous_interaction_id: Optional[str] | Omit = omit,
source_policy: Optional[SourcePolicy] | Omit = omit,
task_spec: Optional[TaskSpecParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -375,6 +392,8 @@ async def create(
metadata: User-provided metadata stored with the run. Keys and values must be strings with
a maximum length of 16 and 512 characters respectively.
+ previous_interaction_id: Interaction ID to use as context for this request.
+
source_policy: Source policy for web search results.
This policy governs which sources are allowed/disallowed in results.
@@ -401,6 +420,7 @@ async def create(
"input": input,
"processor": processor,
"metadata": metadata,
+ "previous_interaction_id": previous_interaction_id,
"source_policy": source_policy,
"task_spec": task_spec,
},
diff --git a/src/parallel/types/beta/beta_run_input.py b/src/parallel/types/beta/beta_run_input.py
index 690ac46..66f63e9 100644
--- a/src/parallel/types/beta/beta_run_input.py
+++ b/src/parallel/types/beta/beta_run_input.py
@@ -46,6 +46,9 @@ class BetaRunInput(BaseModel):
respectively.
"""
+ previous_interaction_id: Optional[str] = None
+ """Interaction ID to use as context for this request."""
+
source_policy: Optional[SourcePolicy] = None
"""Source policy for web search results.
diff --git a/src/parallel/types/beta/beta_run_input_param.py b/src/parallel/types/beta/beta_run_input_param.py
index e2578c5..0112bc1 100644
--- a/src/parallel/types/beta/beta_run_input_param.py
+++ b/src/parallel/types/beta/beta_run_input_param.py
@@ -48,6 +48,9 @@ class BetaRunInputParam(TypedDict, total=False):
respectively.
"""
+ previous_interaction_id: Optional[str]
+ """Interaction ID to use as context for this request."""
+
source_policy: Optional[SourcePolicy]
"""Source policy for web search results.
diff --git a/src/parallel/types/beta/beta_search_params.py b/src/parallel/types/beta/beta_search_params.py
index 58a8874..4a0776f 100644
--- a/src/parallel/types/beta/beta_search_params.py
+++ b/src/parallel/types/beta/beta_search_params.py
@@ -26,17 +26,17 @@ class BetaSearchParams(TypedDict, total=False):
"""DEPRECATED: Use `excerpts.max_chars_per_result` instead."""
max_results: Optional[int]
- """Upper bound on the number of results to return.
+ """Upper bound on the number of results to return. Defaults to 10 if not provided."""
- May be limited by the processor. Defaults to 10 if not provided.
- """
-
- mode: Optional[Literal["one-shot", "agentic"]]
+ mode: Optional[Literal["one-shot", "agentic", "fast"]]
"""Presets default values for parameters for different use cases.
- `one-shot` returns more comprehensive results and longer excerpts to answer
- questions from a single response, while `agentic` returns more concise,
- token-efficient results for use in an agentic loop.
+ - `one-shot` returns more comprehensive results and longer excerpts to answer
+ questions from a single response
+ - `agentic` returns more concise, token-efficient results for use in an agentic
+ loop
+ - `fast` trades some quality for lower latency, with best results when used with
+ concise and high-quality objective and keyword queries
"""
objective: Optional[str]
diff --git a/src/parallel/types/beta/excerpt_settings_param.py b/src/parallel/types/beta/excerpt_settings_param.py
index 43ceb31..2835f57 100644
--- a/src/parallel/types/beta/excerpt_settings_param.py
+++ b/src/parallel/types/beta/excerpt_settings_param.py
@@ -15,13 +15,13 @@ class ExcerptSettingsParam(TypedDict, total=False):
"""Optional upper bound on the total number of characters to include per url.
Excerpts may contain fewer characters than this limit to maximize relevance and
- token efficiency, but will never contain fewer than 1000 characters per result.
+ token efficiency. Values below 1000 will be automatically set to 1000.
"""
max_chars_total: Optional[int]
"""
Optional upper bound on the total number of characters to include across all
urls. Results may contain fewer characters than this limit to maximize relevance
- and token efficiency, but will never contain fewer than 1000 characters per
- result.This overall limit applies in addition to max_chars_per_result.
+ and token efficiency. Values below 1000 will be automatically set to 1000. This
+ overall limit applies in addition to max_chars_per_result.
"""
diff --git a/src/parallel/types/beta/findall_run.py b/src/parallel/types/beta/findall_run.py
index 9cbe080..ad55025 100644
--- a/src/parallel/types/beta/findall_run.py
+++ b/src/parallel/types/beta/findall_run.py
@@ -32,7 +32,13 @@ class Status(BaseModel):
termination_reason: Optional[
Literal[
- "low_match_rate", "match_limit_met", "candidates_exhausted", "user_cancelled", "error_occurred", "timeout"
+ "low_match_rate",
+ "match_limit_met",
+ "candidates_exhausted",
+ "user_cancelled",
+ "error_occurred",
+ "timeout",
+ "insufficient_funds",
]
] = None
"""Reason for termination when FindAll run is in terminal status."""
diff --git a/src/parallel/types/beta/task_group_add_runs_params.py b/src/parallel/types/beta/task_group_add_runs_params.py
index 68523f9..5732934 100644
--- a/src/parallel/types/beta/task_group_add_runs_params.py
+++ b/src/parallel/types/beta/task_group_add_runs_params.py
@@ -15,7 +15,11 @@
class TaskGroupAddRunsParams(TypedDict, total=False):
inputs: Required[Iterable[BetaRunInputParam]]
- """List of task runs to execute."""
+ """List of task runs to execute.
+
+ Up to 1,000 runs can be specified per request. If you'd like to add more runs,
+ split them across multiple TaskGroup POST requests.
+ """
default_task_spec: Optional[TaskSpecParam]
"""Specification for a task.
diff --git a/src/parallel/types/beta/task_run_create_params.py b/src/parallel/types/beta/task_run_create_params.py
index 2842939..f7290c9 100644
--- a/src/parallel/types/beta/task_run_create_params.py
+++ b/src/parallel/types/beta/task_run_create_params.py
@@ -48,6 +48,9 @@ class TaskRunCreateParams(TypedDict, total=False):
respectively.
"""
+ previous_interaction_id: Optional[str]
+ """Interaction ID to use as context for this request."""
+
source_policy: Optional[SourcePolicy]
"""Source policy for web search results.
diff --git a/src/parallel/types/task_run.py b/src/parallel/types/task_run.py
index fceb400..0bdb0b1 100644
--- a/src/parallel/types/task_run.py
+++ b/src/parallel/types/task_run.py
@@ -21,6 +21,13 @@ class TaskRun(BaseModel):
created_at: Optional[str] = None
"""Timestamp of the creation of the task, as an RFC 3339 string."""
+ interaction_id: str
+ """Identifier for this interaction.
+
+ Pass this value as `previous_interaction_id` to reuse context for a future
+ request.
+ """
+
is_active: bool
"""Whether the run is currently active, i.e.
diff --git a/src/parallel/types/task_run_create_params.py b/src/parallel/types/task_run_create_params.py
index 6c81803..5f1c572 100644
--- a/src/parallel/types/task_run_create_params.py
+++ b/src/parallel/types/task_run_create_params.py
@@ -25,6 +25,9 @@ class TaskRunCreateParams(TypedDict, total=False):
respectively.
"""
+ previous_interaction_id: Optional[str]
+ """Interaction ID to use as context for this request."""
+
source_policy: Optional[SourcePolicy]
"""Source policy for web search results.
diff --git a/tests/api_resources/beta/test_findall.py b/tests/api_resources/beta/test_findall.py
index b4bb063..6ee829d 100644
--- a/tests/api_resources/beta/test_findall.py
+++ b/tests/api_resources/beta/test_findall.py
@@ -296,7 +296,7 @@ def test_path_params_enrich(self, client: Parallel) -> None:
},
)
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
findall_stream = client.beta.findall.events(
@@ -304,7 +304,7 @@ def test_method_events(self, client: Parallel) -> None:
)
findall_stream.response.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events_with_all_params(self, client: Parallel) -> None:
findall_stream = client.beta.findall.events(
@@ -315,7 +315,7 @@ def test_method_events_with_all_params(self, client: Parallel) -> None:
)
findall_stream.response.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.findall.with_raw_response.events(
@@ -326,7 +326,7 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.findall.with_streaming_response.events(
@@ -340,7 +340,7 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"):
@@ -811,7 +811,7 @@ async def test_path_params_enrich(self, async_client: AsyncParallel) -> None:
},
)
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
findall_stream = await async_client.beta.findall.events(
@@ -819,7 +819,7 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await findall_stream.response.aclose()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None:
findall_stream = await async_client.beta.findall.events(
@@ -830,7 +830,7 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel)
)
await findall_stream.response.aclose()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.findall.with_raw_response.events(
@@ -841,7 +841,7 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.findall.with_streaming_response.events(
@@ -855,7 +855,7 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"):
diff --git a/tests/api_resources/beta/test_task_group.py b/tests/api_resources/beta/test_task_group.py
index 321e47e..cc200ce 100644
--- a/tests/api_resources/beta/test_task_group.py
+++ b/tests/api_resources/beta/test_task_group.py
@@ -123,6 +123,7 @@ def test_method_add_runs_with_all_params(self, client: Parallel) -> None:
}
],
"metadata": {"foo": "string"},
+ "previous_interaction_id": "previous_interaction_id",
"source_policy": {
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -211,7 +212,7 @@ def test_path_params_add_runs(self, client: Parallel) -> None:
],
)
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.events(
@@ -219,7 +220,7 @@ def test_method_events(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events_with_all_params(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.events(
@@ -229,7 +230,7 @@ def test_method_events_with_all_params(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.task_group.with_raw_response.events(
@@ -240,7 +241,7 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.task_group.with_streaming_response.events(
@@ -254,7 +255,7 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -262,7 +263,7 @@ def test_path_params_events(self, client: Parallel) -> None:
task_group_id="",
)
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_get_runs(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.get_runs(
@@ -270,7 +271,7 @@ def test_method_get_runs(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_get_runs_with_all_params(self, client: Parallel) -> None:
task_group_stream = client.beta.task_group.get_runs(
@@ -282,7 +283,7 @@ def test_method_get_runs_with_all_params(self, client: Parallel) -> None:
)
task_group_stream.response.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_get_runs(self, client: Parallel) -> None:
response = client.beta.task_group.with_raw_response.get_runs(
@@ -293,7 +294,7 @@ def test_raw_response_get_runs(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_get_runs(self, client: Parallel) -> None:
with client.beta.task_group.with_streaming_response.get_runs(
@@ -307,7 +308,7 @@ def test_streaming_response_get_runs(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_get_runs(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -423,6 +424,7 @@ async def test_method_add_runs_with_all_params(self, async_client: AsyncParallel
}
],
"metadata": {"foo": "string"},
+ "previous_interaction_id": "previous_interaction_id",
"source_policy": {
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -511,7 +513,7 @@ async def test_path_params_add_runs(self, async_client: AsyncParallel) -> None:
],
)
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.events(
@@ -519,7 +521,7 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.events(
@@ -529,7 +531,7 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel)
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_group.with_raw_response.events(
@@ -540,7 +542,7 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_group.with_streaming_response.events(
@@ -554,7 +556,7 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
@@ -562,7 +564,7 @@ async def test_path_params_events(self, async_client: AsyncParallel) -> None:
task_group_id="",
)
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_get_runs(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.get_runs(
@@ -570,7 +572,7 @@ async def test_method_get_runs(self, async_client: AsyncParallel) -> None:
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel) -> None:
task_group_stream = await async_client.beta.task_group.get_runs(
@@ -582,7 +584,7 @@ async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel
)
await task_group_stream.response.aclose()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_group.with_raw_response.get_runs(
@@ -593,7 +595,7 @@ async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_get_runs(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_group.with_streaming_response.get_runs(
@@ -607,7 +609,7 @@ async def test_streaming_response_get_runs(self, async_client: AsyncParallel) ->
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_get_runs(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"):
diff --git a/tests/api_resources/beta/test_task_run.py b/tests/api_resources/beta/test_task_run.py
index 6728cbd..794846d 100644
--- a/tests/api_resources/beta/test_task_run.py
+++ b/tests/api_resources/beta/test_task_run.py
@@ -43,6 +43,7 @@ def test_method_create_with_all_params(self, client: Parallel) -> None:
}
],
metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -94,7 +95,7 @@ def test_streaming_response_create(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_method_events(self, client: Parallel) -> None:
task_run_stream = client.beta.task_run.events(
@@ -102,7 +103,7 @@ def test_method_events(self, client: Parallel) -> None:
)
task_run_stream.response.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_raw_response_events(self, client: Parallel) -> None:
response = client.beta.task_run.with_raw_response.events(
@@ -113,7 +114,7 @@ def test_raw_response_events(self, client: Parallel) -> None:
stream = response.parse()
stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_streaming_response_events(self, client: Parallel) -> None:
with client.beta.task_run.with_streaming_response.events(
@@ -127,7 +128,7 @@ def test_streaming_response_events(self, client: Parallel) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
def test_path_params_events(self, client: Parallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
@@ -212,6 +213,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncParallel)
}
],
metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -263,7 +265,7 @@ async def test_streaming_response_create(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_method_events(self, async_client: AsyncParallel) -> None:
task_run_stream = await async_client.beta.task_run.events(
@@ -271,7 +273,7 @@ async def test_method_events(self, async_client: AsyncParallel) -> None:
)
await task_run_stream.response.aclose()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
response = await async_client.beta.task_run.with_raw_response.events(
@@ -282,7 +284,7 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None:
stream = await response.parse()
await stream.close()
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_streaming_response_events(self, async_client: AsyncParallel) -> None:
async with async_client.beta.task_run.with_streaming_response.events(
@@ -296,7 +298,7 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip(reason="Prism doesn't support text/event-stream responses")
+ @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses")
@parametrize
async def test_path_params_events(self, async_client: AsyncParallel) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
diff --git a/tests/api_resources/test_task_run.py b/tests/api_resources/test_task_run.py
index 197c31b..68e7db1 100644
--- a/tests/api_resources/test_task_run.py
+++ b/tests/api_resources/test_task_run.py
@@ -32,6 +32,7 @@ def test_method_create_with_all_params(self, client: Parallel) -> None:
input="What was the GDP of France in 2023?",
processor="base",
metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
@@ -182,6 +183,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncParallel)
input="What was the GDP of France in 2023?",
processor="base",
metadata={"foo": "string"},
+ previous_interaction_id="previous_interaction_id",
source_policy={
"after_date": parse_date("2024-01-01"),
"exclude_domains": ["reddit.com", "x.com", ".ai"],
diff --git a/tests/test_client.py b/tests/test_client.py
index feec74d..c2b772f 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -961,6 +961,14 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+ # Delete in case our environment has any proxy env vars set
+ monkeypatch.delenv("HTTP_PROXY", raising=False)
+ monkeypatch.delenv("ALL_PROXY", raising=False)
+ monkeypatch.delenv("NO_PROXY", raising=False)
+ monkeypatch.delenv("http_proxy", raising=False)
+ monkeypatch.delenv("https_proxy", raising=False)
+ monkeypatch.delenv("all_proxy", raising=False)
+ monkeypatch.delenv("no_proxy", raising=False)
client = DefaultHttpxClient()
@@ -1881,6 +1889,14 @@ async def test_get_platform(self) -> None:
async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+ # Delete in case our environment has any proxy env vars set
+ monkeypatch.delenv("HTTP_PROXY", raising=False)
+ monkeypatch.delenv("ALL_PROXY", raising=False)
+ monkeypatch.delenv("NO_PROXY", raising=False)
+ monkeypatch.delenv("http_proxy", raising=False)
+ monkeypatch.delenv("https_proxy", raising=False)
+ monkeypatch.delenv("all_proxy", raising=False)
+ monkeypatch.delenv("no_proxy", raising=False)
client = DefaultAsyncHttpxClient()
diff --git a/tests/test_utils/test_json.py b/tests/test_utils/test_json.py
new file mode 100644
index 0000000..d6c0dfd
--- /dev/null
+++ b/tests/test_utils/test_json.py
@@ -0,0 +1,126 @@
+from __future__ import annotations
+
+import datetime
+from typing import Union
+
+import pydantic
+
+from parallel import _compat
+from parallel._utils._json import openapi_dumps
+
+
+class TestOpenapiDumps:
+ def test_basic(self) -> None:
+ data = {"key": "value", "number": 42}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"key":"value","number":42}'
+
+ def test_datetime_serialization(self) -> None:
+ dt = datetime.datetime(2023, 1, 1, 12, 0, 0)
+ data = {"datetime": dt}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"datetime":"2023-01-01T12:00:00"}'
+
+ def test_pydantic_model_serialization(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str
+ last_name: str
+ age: int
+
+ model_instance = User(first_name="John", last_name="Kramer", age=83)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"first_name":"John","last_name":"Kramer","age":83}}'
+
+ def test_pydantic_model_with_default_values(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+ score: int = 0
+
+ model_instance = User(name="Alice")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Alice"}}'
+
+ def test_pydantic_model_with_default_values_overridden(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ role: str = "user"
+ active: bool = True
+
+ model_instance = User(name="Bob", role="admin", active=False)
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Bob","role":"admin","active":false}}'
+
+ def test_pydantic_model_with_alias(self) -> None:
+ class User(pydantic.BaseModel):
+ first_name: str = pydantic.Field(alias="firstName")
+ last_name: str = pydantic.Field(alias="lastName")
+
+ model_instance = User(firstName="John", lastName="Doe")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"firstName":"John","lastName":"Doe"}}'
+
+ def test_pydantic_model_with_alias_and_default(self) -> None:
+ class User(pydantic.BaseModel):
+ user_name: str = pydantic.Field(alias="userName")
+ user_role: str = pydantic.Field(default="member", alias="userRole")
+ is_active: bool = pydantic.Field(default=True, alias="isActive")
+
+ model_instance = User(userName="charlie")
+ data = {"model": model_instance}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"charlie"}}'
+
+ model_with_overrides = User(userName="diana", userRole="admin", isActive=False)
+ data = {"model": model_with_overrides}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"userName":"diana","userRole":"admin","isActive":false}}'
+
+ def test_pydantic_model_with_nested_models_and_defaults(self) -> None:
+ class Address(pydantic.BaseModel):
+ street: str
+ city: str = "Unknown"
+
+ class User(pydantic.BaseModel):
+ name: str
+ address: Address
+ verified: bool = False
+
+ if _compat.PYDANTIC_V1:
+ # to handle forward references in Pydantic v1
+ User.update_forward_refs(**locals()) # type: ignore[reportDeprecated]
+
+ address = Address(street="123 Main St")
+ user = User(name="Diana", address=address)
+ data = {"user": user}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"user":{"name":"Diana","address":{"street":"123 Main St"}}}'
+
+ address_with_city = Address(street="456 Oak Ave", city="Boston")
+ user_verified = User(name="Eve", address=address_with_city, verified=True)
+ data = {"user": user_verified}
+ json_bytes = openapi_dumps(data)
+ assert (
+ json_bytes == b'{"user":{"name":"Eve","address":{"street":"456 Oak Ave","city":"Boston"},"verified":true}}'
+ )
+
+ def test_pydantic_model_with_optional_fields(self) -> None:
+ class User(pydantic.BaseModel):
+ name: str
+ email: Union[str, None]
+ phone: Union[str, None]
+
+ model_with_none = User(name="Eve", email=None, phone=None)
+ data = {"model": model_with_none}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Eve","email":null,"phone":null}}'
+
+ model_with_values = User(name="Frank", email="frank@example.com", phone=None)
+ data = {"model": model_with_values}
+ json_bytes = openapi_dumps(data)
+ assert json_bytes == b'{"model":{"name":"Frank","email":"frank@example.com","phone":null}}'