Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions astrbot/core/agent/runners/tool_loop_agent_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,10 +183,10 @@ async def _complete_with_assistant_response(self, llm_resp: LLMResponse) -> None
self.stats.end_time = time.time()

parts = []
if llm_resp.reasoning_content or llm_resp.reasoning_signature:
if llm_resp.reasoning_content is not None or llm_resp.reasoning_signature:
parts.append(
ThinkPart(
think=llm_resp.reasoning_content,
think=llm_resp.reasoning_content or "",
encrypted=llm_resp.reasoning_signature,
)
)
Expand Down Expand Up @@ -876,10 +876,10 @@ async def step(self):

# 将结果添加到上下文中
parts = []
if llm_resp.reasoning_content or llm_resp.reasoning_signature:
if llm_resp.reasoning_content is not None or llm_resp.reasoning_signature:
parts.append(
ThinkPart(
think=llm_resp.reasoning_content,
think=llm_resp.reasoning_content or "",
encrypted=llm_resp.reasoning_signature,
)
)
Expand Down Expand Up @@ -1361,10 +1361,10 @@ async def _finalize_aborted_step(
self.stats.end_time = time.time()

parts = []
if llm_resp.reasoning_content or llm_resp.reasoning_signature:
if llm_resp.reasoning_content is not None or llm_resp.reasoning_signature:
parts.append(
ThinkPart(
think=llm_resp.reasoning_content,
think=llm_resp.reasoning_content or "",
encrypted=llm_resp.reasoning_signature,
)
)
Expand Down
4 changes: 1 addition & 3 deletions astrbot/core/provider/entities.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ class LLMResponse:
"""Tool call IDs."""
tools_call_extra_content: dict[str, dict[str, Any]] = field(default_factory=dict)
"""Tool call extra content. tool_call_id -> extra_content dict"""
reasoning_content: str = ""
reasoning_content: str | None = None
"""The reasoning content extracted from the LLM, if any."""
reasoning_signature: str | None = None
"""The signature of the reasoning content, if any."""
Expand Down Expand Up @@ -404,8 +404,6 @@ def __init__(
raw_completion (ChatCompletion, optional): 原始响应, OpenAI 格式. Defaults to None.

"""
if reasoning_content is None:
reasoning_content = ""
if tools_call_args is None:
tools_call_args = []
if tools_call_name is None:
Expand Down
2 changes: 1 addition & 1 deletion astrbot/core/provider/sources/anthropic_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _ensure_usable_response(
stop_reason: str | None = None,
) -> None:
has_text_output = bool((llm_response.completion_text or "").strip())
has_reasoning_output = bool(llm_response.reasoning_content.strip())
has_reasoning_output = bool((llm_response.reasoning_content or "").strip())
has_tool_output = bool(llm_response.tools_call_args)
if has_text_output or has_reasoning_output or has_tool_output:
return
Expand Down
2 changes: 1 addition & 1 deletion astrbot/core/provider/sources/gemini_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def _ensure_usable_response(
finish_reason: str | None = None,
) -> None:
has_text_output = bool((llm_response.completion_text or "").strip())
has_reasoning_output = bool(llm_response.reasoning_content.strip())
has_reasoning_output = bool((llm_response.reasoning_content or "").strip())
has_tool_output = bool(llm_response.tools_call_args)
if has_text_output or has_reasoning_output or has_tool_output:
return
Expand Down
57 changes: 34 additions & 23 deletions astrbot/core/provider/sources/openai_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -671,9 +671,9 @@ async def _query_stream(
reasoning = self._extract_reasoning_content(chunk)
_y = False
llm_response.id = chunk.id
llm_response.reasoning_content = ""
llm_response.reasoning_content = None
llm_response.completion_text = ""
if reasoning:
if reasoning is not None:
llm_response.reasoning_content = reasoning
_y = True
if delta and delta.content:
Expand Down Expand Up @@ -701,22 +701,28 @@ async def _query_stream(
def _extract_reasoning_content(
self,
completion: ChatCompletion | ChatCompletionChunk,
) -> str:
) -> str | None:
"""Extract reasoning content from OpenAI ChatCompletion if available."""
reasoning_text = ""

def _get_reasoning_attr(obj: Any) -> str | None:
fields_set = getattr(obj, "model_fields_set", None)
if isinstance(fields_set, set) and self.reasoning_key in fields_set:
attr = getattr(obj, self.reasoning_key, "")
return "" if attr is None else str(attr)
attr = getattr(obj, self.reasoning_key, None)
return None if attr is None else str(attr)

if not completion.choices:
return reasoning_text
return None
if isinstance(completion, ChatCompletion):
choice = completion.choices[0]
reasoning_attr = getattr(choice.message, self.reasoning_key, None)
if reasoning_attr:
reasoning_text = str(reasoning_attr)
reasoning_attr = _get_reasoning_attr(choice.message)
elif isinstance(completion, ChatCompletionChunk):
delta = completion.choices[0].delta
reasoning_attr = getattr(delta, self.reasoning_key, None)
if reasoning_attr:
reasoning_text = str(reasoning_attr)
return reasoning_text
reasoning_attr = _get_reasoning_attr(delta)
else:
return None
return reasoning_attr

def _extract_usage(self, usage: CompletionUsage | dict) -> TokenUsage:
ptd = getattr(usage, "prompt_tokens_details", None)
Expand Down Expand Up @@ -859,7 +865,9 @@ async def _parse_openai_completion(

# parse the reasoning content if any
# the priority is higher than the <think> tag extraction
llm_response.reasoning_content = self._extract_reasoning_content(completion)
reasoning_content = self._extract_reasoning_content(completion)
if reasoning_content is not None:
llm_response.reasoning_content = reasoning_content

# parse tool calls if any
if choice.message.tool_calls and tools is not None:
Expand Down Expand Up @@ -906,7 +914,7 @@ async def _parse_openai_completion(
"API 返回的 completion 由于内容安全过滤被拒绝(非 AstrBot)。",
)
has_text_output = bool((llm_response.completion_text or "").strip())
has_reasoning_output = bool(llm_response.reasoning_content.strip())
has_reasoning_output = bool((llm_response.reasoning_content or "").strip())
if (
not has_text_output
and not has_reasoning_output
Expand Down Expand Up @@ -987,31 +995,34 @@ def _finally_convert_payload(self, payloads: dict) -> None:
model in deepseek_reasoning_models
or "api.deepseek.com" in self.client.base_url.host
)

for message in payloads.get("messages", []):
if message.get("role") == "assistant" and isinstance(
message.get("content"), list
):
reasoning_content = ""
reasoning_content_present = False
new_content = [] # not including think part
for part in message["content"]:
if part.get("type") == "think":
reasoning_content_present = True
reasoning_content += str(part.get("think"))
else:
new_content.append(part)
# Some providers (Grok, etc.) reject empty content lists.
# When all parts were think blocks, fall back to None.
message["content"] = new_content or None
if is_deepseek_v4_reasoning and not reasoning_content:
logger.info(
"Deepseek v4 model requires non-empty reasoning content, but got empty. Setting to 'none' to satisfy the requirement."
)
# Deepseek models require the field on assistant
# history messages, even when the reasoning content is empty.
message["reasoning_content"] = "none"
elif reasoning_content:
if reasoning_content_present:
message["reasoning_content"] = reasoning_content

if (
message.get("role") == "assistant"
and is_deepseek_v4_reasoning
and "reasoning_content" not in message
):
# DeepSeek v4 reasoning models require the field on assistant
# history messages, even when the reasoning content is empty.
message["reasoning_content"] = ""

# Gemini 的 function_response 要求 google.protobuf.Struct(即 JSON 对象),
# 纯文本会触发 400 Invalid argument,需要包一层 JSON。
if is_gemini and message.get("role") == "tool":
Expand Down
Loading