Skip to content

Failed to validate chunk with tool call in stream mode #31

@khshanovskyi

Description

@khshanovskyi

Name and Version

=0.2.0rc0

What steps will reproduce the bug?

Local environment.

Context:
I'm trying to create an agent with AsyncDialClient. It worked fine with regural calls to LLM, but with response from LLM with tool call it fails

Code:

from typing import Any

from aidial_client import AsyncDial

from aidial_sdk.chat_completion import Message, Role, Choice
from task.tools.base import BaseTool
from task.utils import unpack_messages


class LLMAgent:

    def __init__(
            self,
            endpoint: str,
            api_key: str,
            api_version: str,
            system_prompt: str,
            tools: list[BaseTool]
    ):
        self.endpoint = endpoint
        self.api_key = api_key
        self.api_version = api_version
        self.system_prompt = system_prompt
        self.tools = tools
        self._tools_dict: dict[str, BaseTool] = {
            tool.name: tool
            for tool in tools
        }

    async def handle_request(
            self, messages: list[Message], choice: Choice, deployment_name: str, **kwargs
    ) -> None:
        client: AsyncDial = AsyncDial(
            base_url=self.endpoint,
            api_key=self.api_key,
            api_version=self.api_version,

        )
        chunks = await client.chat.completions.create(
            stream=True,
            messages=self._prepare_messages(messages),
            tools=[
                {
                    "type": "function",
                    "function": {
                        "name": "web_search",
                        "description": "Performs WEB search.",
                        "parameters": {
                            "type": "object",
                            "properties": {
                                "request": {
                                    "type": "string",
                                    "description": "The search query or question to search for on the web"
                                }
                            },
                            "required": ["request"]
                        }
                    }
                }
            ],
            deployment_name=deployment_name,
            **kwargs
        )

        async for chunk in chunks:
            if chunk.choices and len(chunk.choices) > 0:
                delta = chunk.choices[0].delta

                if delta and delta.content:
                    choice.append_content(delta.content)
                print(delta)

    def _prepare_messages(self, messages: list[Message]) -> list[dict[str, Any]]:
        unpacked_messages = unpack_messages(messages)
        unpacked_messages.insert(
            0,
            {
                "role": Role.SYSTEM.value,
                "content": self.system_prompt,
            }
        )
        for msg in messages:
            print(msg)
        return unpacked_messages

What is the expected behavior?

Chunk with tool call will be parsed and printed like:

INFO: Uvicorn running on http://0.0.0.0:5030 (Press CTRL+C to quit)
role=<Role.USER: 'user'> content='what can you do?' custom_content=None custom_fields=None name=None tool_calls=None tool_call_id=None function_call=None refusal=None
role=<Role.ASSISTANT: 'assistant'> content='I can assist you with a wide range of tasks, including:\n\n1. Information Retrieval:\n - Perform searches on the web for up-to-date information.\n - Extract answers or summarize content using attached files.\n\n2. File Analysis:\n - Search for specific information within attached texts.\n - Help summarize or answer questions about the attached documents.\n\n3. Task Automation:\n - Provide structured information and assist with decision-making by researching various topics.\n\nHow can I assist you today?' custom_content=None custom_fields=None name=None tool_calls=None tool_call_id=None function_call=None refusal=None
role=<Role.USER: 'user'> content='what is the weather in Kyiv?' custom_content=None custom_fields=None name=None tool_calls=None tool_call_id=None function_call=None refusal=None
INFO: 127.0.0.1:55412 - "POST /openai/deployments/super-agent/chat/completions?api-version=2025-01-01-preview HTTP/1.1" 200 OK
ChoiceDelta(content=None, function_call=None, refusal=None, role='assistant', tool_calls=[ChoiceDeltaToolCall(index=0, id='call_giAQRJYhG7UEMKwTU5dkuOKq', function=ChoiceDeltaToolCallFunction(arguments='', name='web_search'), type='function')])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='{"', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='request', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='":"', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='current', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments=' weather', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments=' in', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments=' Kyiv', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=[ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='"}', name=None), type=None)])
ChoiceDelta(content=None, function_call=None, refusal=None, role=None, tool_calls=None)

What do you see instead?

INFO: Uvicorn running on http://0.0.0.0:5030 (Press CTRL+C to quit)
role=<Role.USER: 'user'> content='what can you do?' custom_content=None custom_fields=None name=None tool_calls=None tool_call_id=None function_call=None refusal=None
role=<Role.ASSISTANT: 'assistant'> content='I can assist you with a wide range of tasks, including:\n\n1. Information Retrieval:\n - Perform searches on the web for up-to-date information.\n - Extract answers or summarize content using attached files.\n\n2. File Analysis:\n - Search for specific information within attached texts.\n - Help summarize or answer questions about the attached documents.\n\n3. Task Automation:\n - Provide structured information and assist with decision-making by researching various topics.\n\nHow can I assist you today?' custom_content=None custom_fields=None name=None tool_calls=None tool_call_id=None function_call=None refusal=None
role=<Role.USER: 'user'> content='what is the weather in Kyiv?' custom_content=None custom_fields=None name=None tool_calls=None tool_call_id=None function_call=None refusal=None
INFO: 127.0.0.1:54816 - "POST /openai/deployments/super-agent/chat/completions?api-version=2025-01-01-preview HTTP/1.1" 200 OK
ChatCompletionMessage(role='assistant', content=None, custom_content=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_TE1MbrQSDbzq5BNPpsGOpTQy', function=FunctionCall(arguments='', name='web_search'), type='function', index=0)], refusal=None)
ERROR: | 2025-07-11 17:43:51 | aidial_sdk | 22563 | Error during processing the request
Traceback (most recent call last):
File "/Users/pavlokhshanovskyi/PycharmProjects/dialx-ai-mentorship/ai-dial-agent/.venv/lib/python3.11/site-packages/aidial_sdk/chat_completion/response.py", line 77, in _run_producer
await producer(self.request, self)
File "/Users/pavlokhshanovskyi/PycharmProjects/dialx-ai-mentorship/ai-dial-agent/task/app.py", line 26, in chat_completion
await LLMAgent(
File "/Users/pavlokhshanovskyi/PycharmProjects/dialx-ai-mentorship/ai-dial-agent/task/llm_agent.py", line 65, in handle_request
async for chunk in chunks:
File "/Users/pavlokhshanovskyi/PycharmProjects/dialx-ai-mentorship/ai-dial-agent/.venv/lib/python3.11/site-packages/aidial_client/_utils/_openai.py", line 53, in convert_openai_async_stream
yield ChatCompletionChunk(**chunk.model_dump())
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/pavlokhshanovskyi/PycharmProjects/dialx-ai-mentorship/ai-dial-agent/.venv/lib/python3.11/site-packages/pydantic/main.py", line 253, in init
validated_self = self.pydantic_validator.validate_python(data, self_instance=self)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
pydantic_core._pydantic_core.ValidationError: 5 validation errors for ChatCompletionChunk
choices.0.delta.ChatCompletionMessage.role
Input should be 'assistant' [type=literal_error, input_value=None, input_type=NoneType]
For further information visit https://errors.pydantic.dev/2.11/v/literal_error
choices.0.delta.ChatCompletionMessage.tool_calls.0.id
Input should be a valid string [type=string_type, input_value=None, input_type=NoneType]
For further information visit https://errors.pydantic.dev/2.11/v/string_type
choices.0.delta.ChatCompletionMessage.tool_calls.0.function.name
Input should be a valid string [type=string_type, input_value=None, input_type=NoneType]
For further information visit https://errors.pydantic.dev/2.11/v/string_type
choices.0.delta.ChatCompletionMessage.tool_calls.0.type
Input should be 'function' [type=literal_error, input_value=None, input_type=NoneType]
For further information visit https://errors.pydantic.dev/2.11/v/literal_error
choices.0.delta.ChunkEmptyDelta.tool_calls
Input should be None [type=literal_error, input_value=[{'index': 0, 'id': None,...': None}, 'type': None}], input_type=list]
For further information visit https://errors.pydantic.dev/2.11/v/literal_error

Additional information

No response

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't working

    Type

    No type
    No fields configured for issues without a type.

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions