diff --git a/pyproject.toml b/pyproject.toml index 46a7e8bd6..e989536d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,17 @@ langgraph = [ "langgraph>=0.6.2", "langchain-openai>=0.3.27", ] +tinker = [ + "fastapi>=0.128.0", + "huggingface_hub", + "numpy", + "pillow", + "pydantic>=2.12.5", + "tinker>=0.8.1", + "torch>=2.8.0", + "transformers>=4.55.2,<=4.57.3", + "uvicorn>=0.35.0", +] [project.scripts] art = "art.cli:app" @@ -115,7 +126,6 @@ unused-ignore-comment = "ignore" allowed-unresolved-imports = [ # tinker deps "tinker.**", - "tinker_cookbook.**", # backend deps "accelerate.**", "awscli.**", @@ -166,12 +176,6 @@ dev = [ "pyarrow>=15.0.0", "prek>=0.2.29", ] -tinker = [ - "fastapi>=0.128.0", - "tinker>=0.8.1", - "tinker-cookbook>=0.1.0", - "uvicorn>=0.35.0", -] [tool.uv.sources] panza = { git = "https://github.com/corbt/panza.git" } diff --git a/src/art/__init__.py b/src/art/__init__.py index d07d20274..01ccb2318 100644 --- a/src/art/__init__.py +++ b/src/art/__init__.py @@ -57,13 +57,6 @@ def __init__(self, **kwargs): from .local import LocalBackend from .model import Model, TrainableModel from .serverless import ServerlessBackend - -try: - from .tinker import TinkerBackend - from .tinker_native import TinkerNativeBackend -except ModuleNotFoundError: - TinkerBackend = None # type: ignore[assignment] - TinkerNativeBackend = None # type: ignore[assignment] from .trajectories import Trajectory, TrajectoryGroup from .types import ( LocalTrainResult, @@ -102,5 +95,3 @@ def __init__(self, **kwargs): "capture_yielded_trajectory", "yield_trajectory", ] -if TinkerBackend is not None: - __all__.extend(["TinkerBackend", "TinkerNativeBackend"]) diff --git a/src/art/pipeline_trainer/binary_prefix_tool_pipeline.py b/src/art/pipeline_trainer/binary_prefix_tool_pipeline.py index 70b413b66..e646aed73 100644 --- a/src/art/pipeline_trainer/binary_prefix_tool_pipeline.py +++ b/src/art/pipeline_trainer/binary_prefix_tool_pipeline.py @@ -7,6 +7,7 @@ from pathlib import Path import re from typing import Any, cast +import uuid from dotenv import load_dotenv from openai.types.chat.chat_completion_tool_choice_option_param import ( @@ -16,6 +17,7 @@ import polars as pl import art +from art.tinker_native import TinkerNativeBackend from . import PipelineTrainer, make_group_rollout_fn @@ -178,6 +180,8 @@ async def main() -> None: "BASE_MODEL", "Qwen/Qwen3-4B-Instruct-2507" ) # Qwen/Qwen3-30B-A3B-Instruct-2507 model_name = os.environ.get("MODEL_NAME", "pipeline-binary-prefix-tool") + run_suffix = os.environ.get("RUN_SUFFIX") or uuid.uuid4().hex[:8] + model_name = f"{model_name}-{run_suffix}" project = os.environ.get("PROJECT", "binary-prefix-tool-pipeline") art_path = os.environ.get("ART_PATH") @@ -213,7 +217,7 @@ async def main() -> None: } } - backend = art.TinkerNativeBackend(path=art_path) + backend = TinkerNativeBackend(path=art_path) model = art.TrainableModel( name=model_name, project=project, @@ -239,6 +243,7 @@ async def do_rollout(scenario: Scenario, temp: float) -> art.Trajectory: ) choice = response.choices[0] raw_guess, source = extract_guess(choice) + sampled_content = choice.message.content or "" guess = raw_guess or "" valid_guess = is_valid_guess(guess) prefix_len = shared_prefix_len(guess, SECRET_BITS) if valid_guess else 0 @@ -258,6 +263,7 @@ async def do_rollout(scenario: Scenario, temp: float) -> art.Trajectory: messages_and_choices=[*messages, choice], tools=TOOLS, reward=reward, + logs=[f"sampled_content:\n{sampled_content}"], metrics=metrics, ) diff --git a/src/art/pipeline_trainer/yes_no_maybe_pipeline.py b/src/art/pipeline_trainer/yes_no_maybe_pipeline.py index 63e3323dd..3909bc0d3 100644 --- a/src/art/pipeline_trainer/yes_no_maybe_pipeline.py +++ b/src/art/pipeline_trainer/yes_no_maybe_pipeline.py @@ -12,6 +12,7 @@ from dotenv import load_dotenv import art +from art.tinker_native import TinkerNativeBackend from . import PipelineTrainer @@ -106,7 +107,7 @@ async def main() -> None: model_name = f"{MODEL_NAME}-{datetime.now().strftime('%Y%m%d-%H%M%S')}" print("Initializing TinkerNativeBackend") - backend = art.TinkerNativeBackend() + backend = TinkerNativeBackend() print(f"Initializing TrainableModel: {model_name}") model = art.TrainableModel(name=model_name, project=PROJECT, base_model=BASE_MODEL) diff --git a/src/art/tinker/cookbook_v/__init__.py b/src/art/tinker/cookbook_v/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/art/tinker/cookbook_v/hyperparam_utils.py b/src/art/tinker/cookbook_v/hyperparam_utils.py new file mode 100644 index 000000000..94c6bd648 --- /dev/null +++ b/src/art/tinker/cookbook_v/hyperparam_utils.py @@ -0,0 +1,192 @@ +""" +Utilities for guessing good hyperparameters for fine-tuning. +""" + +import json +import math +import struct +from typing import Dict, Tuple + +import huggingface_hub +import numpy as np +from transformers import AutoConfig + +from .utils.misc_utils import not_none + + +def _list_param_shapes_from_safetensors_remote( + repo_id: str, + revision: str = "main", + token: str | None = None, +) -> Dict[str, Tuple[int, ...]]: + """ + Returns {param_name: shape_tuple} by reading ONLY the safetensors header(s) + over HTTP (ranged requests). No full file download. + """ + fs = huggingface_hub.HfFileSystem(token=token) + info = huggingface_hub.model_info(repo_id, revision=revision, token=token) + + # find all .safetensors files (handles sharded checkpoints) + st_files = [ + s.rfilename + for s in not_none(info.siblings) + if s.rfilename.endswith(".safetensors") + ] + if not st_files: + raise FileNotFoundError("No .safetensors files found in this repo.") + + shapes: Dict[str, Tuple[int, ...]] = {} + + for fname in st_files: + # Open remote file via fsspec; this performs HTTP range reads under the hood + path = f"{repo_id}@{revision}/{fname}" # HfFileSystem path format + with fs.open(path, "rb") as f: + # safetensors spec: + # [0:8] = little-endian u64 header_len + # [8:8+header_len] = UTF-8 JSON header + header_len_bytes = f.read(8) + assert isinstance(header_len_bytes, bytes) + if len(header_len_bytes) < 8: + raise IOError(f"File too small or not safetensors: {fname}") + (header_len,) = struct.unpack(" { "dtype": "...", "shape": [...], "data_offsets": [start, end] } + for name, meta in header.items(): + if name == "__metadata__": # optional global metadata block + continue + shapes[name] = tuple(meta["shape"]) + + return shapes + + +def get_lora_lr_over_full_finetune_lr(model_name: str, lora_alpha: int = 32) -> float: + """ + Return the factor that you should scale the full fine-tuning learning rate by to get the equivalent LoRA learning rate. + Previously we had a more complicated formula, but the factor of 10 was more accurate empirically. + See Lora Without Regret (https://thinkingmachines.ai/blog/lora/) for more details. + """ + return 10.0 + + +def _get_hidden_size(model_name: str) -> int: + if "meta-llama/Llama-3" in model_name: + # Bypass HF_TOKEN requirement for Llama-3 models + return { + "meta-llama/Llama-3.2-1B": 2048, + "meta-llama/Llama-3.2-1B-Instruct": 2048, + "meta-llama/Llama-3.2-3B": 3072, + "meta-llama/Llama-3.2-3B-Instruct": 3072, + "meta-llama/Llama-3.1-8B": 4096, + "meta-llama/Llama-3.1-8B-Instruct": 4096, + "meta-llama/Llama-3.1-70B": 8192, + "meta-llama/Llama-3.3-70B-Instruct": 8192, + }[model_name] + + if model_name in ( + "deepseek-ai/DeepSeek-V3.1", + "deepseek-ai/DeepSeek-V3.1-Base", + "moonshotai/Kimi-K2-Thinking", + ): + return 7168 + + config = AutoConfig.from_pretrained(model_name) + return config.hidden_size + + +def get_lora_param_count( + model_name: str, + lora_rank: int = 32, + detailed: bool = False, + include_experts: bool = True, + shared_expert_outer_loras: bool = True, +) -> int | dict[str, int]: + """ + Get the number of parameters in the LoRA adapter. + """ + + dim_sum = 0 + dim_sum_experts = 0 + ignore = ["gate", "embed_tokens", "q_b_proj", "kv_b_proj"] + if not include_experts: + ignore.append("experts") + + for name, shape in _list_param_shapes_from_safetensors_remote(model_name).items(): + if ( + len(shape) == 2 + and name.endswith(".weight") + and not any([v in name.split(".") for v in ignore]) + ): + parts = name.split(".") + if "experts" not in parts or not shared_expert_outer_loras: + dim_sum += shape[0] + shape[1] + else: + # For expert shared outer_loras, we only count the outer dims once, since they are shared across experts + expert_idx = int(parts[parts.index("experts") + 1]) + weight_name = parts[parts.index("experts") + 2] + assert weight_name in ["gate_proj", "down_proj", "up_proj"], ( + f"Unexpected expert weight name: {weight_name}" + ) + intermediate_dim = shape[1] if weight_name == "down_proj" else shape[0] + outer_dim = shape[0] if weight_name == "down_proj" else shape[1] + + dim_sum_experts += intermediate_dim + if expert_idx == 0: + dim_sum_experts += outer_dim + + non_expert_params = lora_rank * dim_sum + expert_params = lora_rank * dim_sum_experts + + return ( + (expert_params + non_expert_params) + if not detailed + else { + "expert_params": expert_params, + "non_expert_params": non_expert_params, + "total_params": expert_params + non_expert_params, + } + ) + + +def get_lr(model_name: str, is_lora: bool = True) -> float: + base_lr = 5e-05 + lora_multiplier = 10.0 + + lr = base_lr * lora_multiplier if is_lora else base_lr + if "llama" in model_name.lower(): + exponent_model = 0.781 + elif "qwen" in model_name.lower(): + exponent_model = 0.0775 + else: + raise ValueError(f"Unknown model: {model_name}") + # TODO: sweep to determine LR multipliers for other models + lr = lr * (2000 / _get_hidden_size(model_name)) ** exponent_model + return lr + + +def get_full_finetune_param_count(model_name: str) -> float: + count = 0 + for name, shape in _list_param_shapes_from_safetensors_remote(model_name).items(): + count += np.prod(shape) + return float(count) + + +def get_full_finetune_lr_multiplier(model_name: str): + return 1.0 / math.sqrt(get_full_finetune_param_count(model_name)) + + +def get_lora_lr_multiplier(model_name: str): + """ + Get a model-specific mutliplier for the LR, when training with LoRA. + Given two models A and B, and learning rate LR_A that's known to be optimal for A, + we can guess an optimal learning rate for B as + LR_B = LR_A * get_lora_lr_multiplier(B) / get_lora_lr_multiplier(A) + """ + return get_full_finetune_lr_multiplier( + model_name + ) * get_lora_lr_over_full_finetune_lr(model_name) diff --git a/src/art/tinker/cookbook_v/image_processing_utils.py b/src/art/tinker/cookbook_v/image_processing_utils.py new file mode 100644 index 000000000..1d4051765 --- /dev/null +++ b/src/art/tinker/cookbook_v/image_processing_utils.py @@ -0,0 +1,55 @@ +""" +Utilities for working with image processors. Create new types to avoid needing to import AutoImageProcessor and BaseImageProcessor. + + +Avoid importing AutoImageProcessor and BaseImageProcessor until runtime, because they're slow imports. +""" + +from __future__ import annotations + +from functools import cache +from typing import TYPE_CHECKING, Any, TypeAlias + +from PIL import Image + +if TYPE_CHECKING: + # this import takes a few seconds, so avoid it on the module import when possible + from transformers.image_processing_utils import BaseImageProcessor + + ImageProcessor: TypeAlias = BaseImageProcessor +else: + # make it importable from other files as a type in runtime + ImageProcessor: TypeAlias = Any + + +@cache +def get_image_processor(model_name: str) -> ImageProcessor: + model_name = model_name.split(":")[0] + + from transformers.models.auto.image_processing_auto import AutoImageProcessor + + processor = AutoImageProcessor.from_pretrained(model_name, use_fast=True) + assert processor.is_fast, f"Could not load fast image processor for {model_name}" + return processor + + +def resize_image(image: Image.Image, max_size: int) -> Image.Image: + """ + Resize an image so that its longest side is at most max_size pixels. + + Preserves aspect ratio and uses LANCZOS resampling for quality. + Returns the original image if it's already smaller than max_size. + """ + + width, height = image.size + if max(width, height) <= max_size: + return image + + if width > height: + new_width = max_size + new_height = int(height * max_size / width) + else: + new_height = max_size + new_width = int(width * max_size / height) + + return image.resize((new_width, new_height), Image.Resampling.LANCZOS) diff --git a/src/art/tinker/cookbook_v/renderers/__init__.py b/src/art/tinker/cookbook_v/renderers/__init__.py new file mode 100644 index 000000000..0e03e8a34 --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/__init__.py @@ -0,0 +1,155 @@ +""" +Renderers for converting message lists into training and sampling prompts. + +Use viz_sft_dataset to visualize the output of different renderers. E.g., + python -m tinker_cookbook.supervised.viz_sft_dataset dataset_path=Tulu3Builder renderer_name=role_colon +""" + +from ..image_processing_utils import ImageProcessor +from ..tokenizer_utils import Tokenizer + +# Types and utilities used by external code +from .base import ( + # Content part types + ContentPart, + ImagePart, + Message, + # Renderer base + RenderContext, + Renderer, + Role, + TextPart, + ThinkingPart, + ToolCall, + ToolSpec, + TrainOnWhat, + # Utility functions + ensure_text, + format_content_as_string, + get_text_content, + parse_content_blocks, +) + +# Renderer classes used directly by tests +from .deepseek_v3 import DeepSeekV3ThinkingRenderer +from .gpt_oss import GptOssRenderer +from .qwen3 import Qwen3Renderer + + +def get_renderer( + name: str, tokenizer: Tokenizer, image_processor: ImageProcessor | None = None +) -> Renderer: + """Factory function to create renderers by name. + + Args: + name: Renderer name. Supported values: + - "role_colon": Simple role:content format + - "llama3": Llama 3 chat format + - "qwen3": Qwen3 with thinking enabled + - "qwen3_vl": Qwen3 vision-language with thinking + - "qwen3_vl_instruct": Qwen3 vision-language instruct (no thinking) + - "qwen3_disable_thinking": Qwen3 with thinking disabled + - "qwen3_instruct": Qwen3 instruct 2507 (no thinking) + - "deepseekv3": DeepSeek V3 (defaults to non-thinking mode) + - "deepseekv3_disable_thinking": DeepSeek V3 non-thinking (alias) + - "deepseekv3_thinking": DeepSeek V3 thinking mode + - "kimi_k2": Kimi K2 Thinking format + - "gpt_oss_no_sysprompt": GPT-OSS without system prompt + - "gpt_oss_low_reasoning": GPT-OSS with low reasoning + - "gpt_oss_medium_reasoning": GPT-OSS with medium reasoning + - "gpt_oss_high_reasoning": GPT-OSS with high reasoning + tokenizer: The tokenizer to use. + image_processor: Required for VL renderers. + + Returns: + A Renderer instance. + + Raises: + ValueError: If the renderer name is unknown. + AssertionError: If a VL renderer is requested without an image_processor. + """ + # Import renderer classes lazily to avoid circular imports and keep exports minimal + from .deepseek_v3 import DeepSeekV3DisableThinkingRenderer + from .gpt_oss import GptOssRenderer + from .kimi_k2 import KimiK2Renderer + from .llama3 import Llama3Renderer + from .qwen3 import ( + Qwen3DisableThinkingRenderer, + Qwen3InstructRenderer, + Qwen3VLInstructRenderer, + Qwen3VLRenderer, + ) + from .role_colon import RoleColonRenderer + + if name == "role_colon": + return RoleColonRenderer(tokenizer) + elif name == "llama3": + return Llama3Renderer(tokenizer) + elif name == "qwen3": + return Qwen3Renderer(tokenizer) + elif name == "qwen3_vl": + assert image_processor is not None, ( + "qwen3_vl renderer requires an image_processor" + ) + return Qwen3VLRenderer(tokenizer, image_processor) + elif name == "qwen3_vl_instruct": + assert image_processor is not None, ( + "qwen3_vl_instruct renderer requires an image_processor" + ) + return Qwen3VLInstructRenderer(tokenizer, image_processor) + elif name == "qwen3_disable_thinking": + return Qwen3DisableThinkingRenderer(tokenizer) + elif name == "qwen3_instruct": + return Qwen3InstructRenderer(tokenizer) + elif name == "deepseekv3": + # Default to non-thinking mode (matches HF template default behavior) + return DeepSeekV3DisableThinkingRenderer(tokenizer) + elif name == "deepseekv3_disable_thinking": + # Alias for backward compatibility + return DeepSeekV3DisableThinkingRenderer(tokenizer) + elif name == "deepseekv3_thinking": + return DeepSeekV3ThinkingRenderer(tokenizer) + elif name == "kimi_k2": + return KimiK2Renderer(tokenizer) + elif name == "gpt_oss_no_sysprompt": + return GptOssRenderer(tokenizer, use_system_prompt=False) + elif name == "gpt_oss_low_reasoning": + return GptOssRenderer(tokenizer, use_system_prompt=True, reasoning_effort="low") + elif name == "gpt_oss_medium_reasoning": + return GptOssRenderer( + tokenizer, use_system_prompt=True, reasoning_effort="medium" + ) + elif name == "gpt_oss_high_reasoning": + return GptOssRenderer( + tokenizer, use_system_prompt=True, reasoning_effort="high" + ) + else: + raise ValueError(f"Unknown renderer: {name}") + + +__all__ = [ + # Types + "ContentPart", + "ImagePart", + "Message", + "Role", + "TextPart", + "ThinkingPart", + "ToolCall", + "ToolSpec", + # Renderer base + "RenderContext", + "Renderer", + "TrainOnWhat", + # Utility functions + "ensure_text", + "format_content_as_string", + "get_text_content", + "parse_content_blocks", + # Factory + "get_renderer", + # Renderer classes (used by tests) + "DeepSeekV3ThinkingRenderer", + "GptOssRenderer", + "Qwen3Renderer", +] diff --git a/src/art/tinker/cookbook_v/renderers/base.py b/src/art/tinker/cookbook_v/renderers/base.py new file mode 100644 index 000000000..b46874e95 --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/base.py @@ -0,0 +1,1016 @@ +""" +Base types, utilities, and abstract Renderer class for message rendering. + +Use viz_sft_dataset to visualize the output of different renderers. E.g., + python -m tinker_cookbook.supervised.viz_sft_dataset dataset_path=Tulu3Builder renderer_name=role_colon +""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from enum import StrEnum +import io +import json +import logging +import re +from typing import Literal, NotRequired, Optional, Protocol, TypedDict +import urllib.request + +from PIL import Image +import pydantic +import tinker +import torch + +from ..tokenizer_utils import Tokenizer + +logger = logging.getLogger(__name__) + +# Tool types are based on kosong (https://github.com/MoonshotAI/kosong). + + +class StrictBase(pydantic.BaseModel): + """ + Pydantic base class that's immutable and doesn't silently ignore extra fields. + """ + + model_config = pydantic.ConfigDict(frozen=True, extra="forbid") + + def __str__(self) -> str: + return repr(self) + + +class ToolCall(StrictBase): + """ + Structured tool invocation following OpenAI/kosong format. + + This represents a request to invoke a tool/function. The structure follows + the OpenAI function calling format for compatibility with various LLM APIs. + + Example: + tool_call = ToolCall( + function=ToolCall.FunctionBody( + name="search", + arguments='{"query_list": ["python async", "pydantic validation"]}' + ), + id="call_abc123" + ) + """ + + class FunctionBody(pydantic.BaseModel): + """ + Tool call function body containing the tool name and arguments. + + The arguments field must be a valid JSON string that will be parsed + by the tool implementation. + """ + + name: str + """The name of the tool to be called.""" + arguments: str + """Arguments of the tool call in JSON string format.""" + + type: Literal["function"] = "function" + """Tool call type, must be 'function' for compatibility.""" + + id: str | None = None + """Optional unique identifier for tracking this specific tool call.""" + + function: FunctionBody + """The function body containing tool name and arguments.""" + + +class UnparsedToolCall(StrictBase): + """ + Represents a tool call that failed to parse from model output. + + When a model generates text that looks like a tool call but cannot be + parsed (e.g., invalid JSON), this class captures the raw text and error + for debugging and optional re-rendering. + + Example: + unparsed = UnparsedToolCall( + raw_text='{"name": "search", invalid json}', + error="Invalid JSON: Expecting property name" + ) + """ + + raw_text: str + """The original text from the model that failed to parse.""" + + error: str + """Description of what went wrong during parsing.""" + + +class TextPart(TypedDict): + """A chunk of text content in a message, usually meant to be visible to the user + (unlike ThinkingPart, which is internal reasoning).""" + + type: Literal["text"] + text: str + + +class ImagePart(TypedDict): + """ + A chunk of image content in a message. + """ + + type: Literal["image"] + image: str | Image.Image + + +class ThinkingPart(TypedDict): + """Model's internal reasoning (chain-of-thought) as a content part.""" + + type: Literal["thinking"] + thinking: str # The thinking/reasoning content + + +class ToolCallPart(TypedDict): + """Tool/function call as a content part, preserving position in content list.""" + + type: Literal["tool_call"] + tool_call: ToolCall # The parsed tool call object + + +class UnparsedToolCallPart(TypedDict): + """Tool call that failed to parse, preserving raw text for debugging.""" + + type: Literal["unparsed_tool_call"] + raw_text: str # Raw text of the tool call block including tags + error: str # Description of what went wrong during parsing + + +# Container for a part of a multimodal message content +ContentPart = TextPart | ImagePart | ThinkingPart | ToolCallPart | UnparsedToolCallPart + + +# NOTE: we use a broad type definition for the role to be flexible +# Common roles are "user", "assistant", "system", "tool" +Role = str + +# Content is a string or a list of parts +Content = str | list[ContentPart] + + +class Message(TypedDict): + """ + Container for a single turn in a multi-turn conversation. + + Args: + + role: Role + String that denotes the source of the message, typically system, user, assistant, and tool. + content: Content + Content of the message, can be a string, or a list of ContentPart. + When content is a list, it can contain TextPart, ImagePart, and ThinkingPart elements. + ThinkingPart represents the model's internal reasoning (chain-of-thought). + tool_calls: NotRequired[list[ToolCall]] + Optional sequence of successfully parsed tool calls generated by the model. + unparsed_tool_calls: NotRequired[list[UnparsedToolCall]] + Optional sequence of tool calls that failed to parse (e.g., invalid JSON). + The raw text is preserved for debugging or re-rendering. + trainable: NotRequired[bool] + Optional indicator whether this message should contribute to the training loss. + tool_call_id: NotRequired[str] + For tool result messages (role="tool"): ID correlating this result to a specific + tool call. Used by renderers whose wire format references calls by ID (e.g., Kimi K2 + renders "## Return of {tool_call_id}"). The value should match ToolCall.id from the + assistant's tool_calls. Not all formats use IDs - GptOss/Harmony does not. + name: NotRequired[str] + For tool result messages (role="tool"): The function name that was called. + Required by GptOss (renders "<|start|>functions.{name}..."), optional for others. + When constructing tool results, include both name and tool_call_id when available + since different renderers require different fields. + + """ + + role: Role + content: Content + + tool_calls: NotRequired[list[ToolCall]] + unparsed_tool_calls: NotRequired[list["UnparsedToolCall"]] + trainable: NotRequired[bool] + tool_call_id: NotRequired[str] + name: NotRequired[str] + + +@dataclass +class RenderContext: + """ + Context passed to render_message for rendering a single message. + + This allows renderers to access information about the message's position + in the conversation without changing the render_message signature for + each new piece of context needed. + """ + + idx: int + """Index of the message in the conversation (0-based).""" + + is_last: bool + """Whether this is the last message in the conversation.""" + + prev_message: Message | None = None + """The previous message in the conversation, if any.""" + + +class ToolSpec(TypedDict): + """ + Tool specification following the OpenAI function calling format. + + This represents a tool that can be called by the model, including its name, + description, and parameter schema. + + Example: + tool_spec: ToolSpec = { + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City name"}, + }, + "required": ["location"], + }, + } + """ + + name: str + """The name of the tool.""" + description: str + """A description of what the tool does.""" + parameters: dict + """JSON Schema object describing the tool's parameters.""" + + +def ensure_text(content: Content) -> str: + """ + Assert that content is text-only and return it as a string. + + Raises ValueError if content contains images or multiple parts. + Use this to validate that message content is text-only before + processing it in code paths that don't support multimodal content. + """ + if isinstance(content, str): + return content + if len(content) == 1 and content[0]["type"] == "text": + return content[0]["text"] + raise ValueError( + f"Expected text content, got multimodal content with {len(content)} parts" + ) + + +def ensure_list(content: Content) -> list[ContentPart]: + """Normalize content to list form. Wraps string content in a TextPart.""" + if isinstance(content, str): + return [TextPart(type="text", text=content)] + return content + + +def remove_thinking(parts: list[ContentPart]) -> list[ContentPart]: + """Filter out ThinkingPart elements from a content part list.""" + return [p for p in parts if p["type"] != "thinking"] + + +def get_text_content(message: Message) -> str: + """Extract text content from message, stripping thinking parts. + + Use this after parse_response when you only need the text output, + ignoring any thinking/reasoning content. + """ + content = message["content"] + if isinstance(content, str): + return content + return "".join(p["text"] for p in content if p["type"] == "text") + + +def format_content_as_string(content: Content, separator: str = "\n") -> str: + """Format message content as a string, preserving all part types. + + Unlike get_text_content which only extracts text parts, this formats + all content parts (thinking, text, tool_call, etc.) as a readable string. + + This is useful for compatibility with APIs that expect string content + (e.g., OpenAI Chat Completions API), but we don't recommend it if you + need to ensure correctness - prefer working with structured content directly + and using build_generation_prompt to convert to tokens. + + Args: + content: Message content (string or list of ContentPart). + separator: String to join parts with. Default is newline. + + Returns: + Formatted string representation of all content parts. + """ + if isinstance(content, str): + return content + + parts = [] + for p in content: + if p["type"] == "thinking": + parts.append(f"{p['thinking']}") + elif p["type"] == "text": + parts.append(p["text"]) + elif p["type"] == "tool_call": + tc = p["tool_call"] + parts.append( + f"{tc.function.name}({tc.function.arguments})" + ) + elif p["type"] == "unparsed_tool_call": + parts.append(f"{p['raw_text']}") + else: + raise ValueError(f"Unknown content part type: {p['type']}") + return separator.join(parts) + + +def _parse_tool_call_json( + tool_call_str: str, raw_text: str +) -> ToolCall | UnparsedToolCall: + """Parse tool call JSON. Returns UnparsedToolCall on failure.""" + try: + tool_call = json.loads(tool_call_str.strip()) + except json.JSONDecodeError as e: + return UnparsedToolCall(raw_text=raw_text, error=f"Invalid JSON: {e}") + + if not isinstance(tool_call, dict): + return UnparsedToolCall( + raw_text=raw_text, error="Tool call is not a JSON object" + ) + + name = tool_call.get("name") + arguments = tool_call.get("arguments") + tool_id = tool_call.get("id") + + if not isinstance(name, str): + return UnparsedToolCall( + raw_text=raw_text, error="Missing or invalid 'name' field" + ) + if not isinstance(arguments, dict): + return UnparsedToolCall( + raw_text=raw_text, error="Missing or invalid 'arguments' field" + ) + + if tool_id is not None and not isinstance(tool_id, str): + tool_id = None + + # TODO: arguments is already a dict from json.loads above, but ToolCall.FunctionBody.arguments + # expects a JSON string. This round-trip (loads then dumps) is wasteful. Consider changing + # FunctionBody.arguments to accept dict directly, or parse tool calls more lazily. + # We may want to revisit the decision to store arguments as unparsed JSON string. + return ToolCall( + function=ToolCall.FunctionBody(name=name, arguments=json.dumps(arguments)), + id=tool_id, + ) + + +def parse_content_blocks(content: str) -> list[ContentPart] | None: + """ + Parse a string with ... and ... tags. + + Handles interleaved thinking, tool call, and text blocks, returning parts + in order. Empty parts are omitted. Failed tool call parses are included as + UnparsedToolCallPart to preserve ordering. + + Whitespace is preserved exactly - roundtrip (parse then render) is identity. + + Args: + content: String potentially containing and/or blocks. + + Returns: + List of ContentPart (ThinkingPart, TextPart, ToolCallPart, UnparsedToolCallPart) + in order. Returns None if no special tags are found - caller should use + the original string for backward compatibility. + + Example: + >>> parse_content_blocks("step 1answer{...}more") + [ + ThinkingPart(type="thinking", thinking="step 1"), + TextPart(type="text", text="answer"), + ToolCallPart(type="tool_call", tool_call=ToolCall(...)), + TextPart(type="text", text="more"), + ] + """ + if "" not in content and "" not in content: + return None # No special blocks, caller should use original string + + parts: list[ContentPart] = [] + pos = 0 + + # Pattern to find both ... and ... blocks + pattern = re.compile( + r"(.*?)|(.*?)", re.DOTALL + ) + + for match in pattern.finditer(content): + # Add any text before this block (preserve whitespace for identity roundtrip) + text_before = content[pos : match.start()] + if text_before: # Skip only truly empty strings + parts.append(TextPart(type="text", text=text_before)) + + if match.group(1) is not None: + # This is a block + thinking = match.group(1) + if thinking: # Skip empty thinking blocks + parts.append(ThinkingPart(type="thinking", thinking=thinking)) + else: + # This is a block + tool_call_json = match.group(2) + raw_text = match.group(0) # Full match including tags + parsed = _parse_tool_call_json(tool_call_json, raw_text) + if isinstance(parsed, UnparsedToolCall): + # Include unparsed tool calls as UnparsedToolCallPart to preserve order + parts.append( + UnparsedToolCallPart( + type="unparsed_tool_call", + raw_text=parsed.raw_text, + error=parsed.error, + ) + ) + else: + parts.append(ToolCallPart(type="tool_call", tool_call=parsed)) + + pos = match.end() + + # Add any remaining text after the last block + remaining = content[pos:] + if remaining: # Skip only truly empty strings + parts.append(TextPart(type="text", text=remaining)) + + return parts + + +def parse_think_blocks(content: str) -> list[ContentPart] | None: + """ + Parse a string with only ... tags into ThinkingPart/TextPart list. + + This is a simpler version of parse_content_blocks for renderers that use + non-standard tool call formats (like DeepSeek's <|tool▁calls▁begin|>). + + Whitespace is preserved exactly - roundtrip (parse then render) is identity. + + Args: + content: String potentially containing ... blocks. + + Returns: + List of ThinkingPart and TextPart in order. None if no tags found. + """ + if "" not in content: + return None + + parts: list[ContentPart] = [] + pos = 0 + pattern = re.compile(r"(.*?)", re.DOTALL) + + for match in pattern.finditer(content): + text_before = content[pos : match.start()] + if text_before: # Skip only truly empty strings + parts.append(TextPart(type="text", text=text_before)) + + thinking = match.group(1) + if thinking: # Skip empty thinking blocks + parts.append(ThinkingPart(type="thinking", thinking=thinking)) + + pos = match.end() + + remaining = content[pos:] + if remaining: # Skip only truly empty strings + parts.append(TextPart(type="text", text=remaining)) + + return parts + + +def _tool_call_payload(tool_call: ToolCall) -> dict[str, object]: + """Minimal JSON payload for embedding in blocks.""" + # Convert from nested structure to flat format for compatibility + return { + "name": tool_call.function.name, + "arguments": json.loads(tool_call.function.arguments), + } + + +@dataclass(frozen=True) +class RenderedMessage: + """ + Container for parts of a rendered message, structured for loss masking. + + A rendered message is split into header and output to control which tokens receive + training loss. In the simplest case (where the full conversation is formed by + concatenation), building a supervised example from messages [m_0, ..., m_{n-1}] + produces: + + tokens = BOS + header_0 + output_0 + header_1 + output_1 + ... + header_{n-1} + output_{n-1} + + However, some renderers modify this structure. For example, Qwen3Renderer strips + thinking blocks from historical assistant messages. Such renderers must override + build_supervised_example to match their build_generation_prompt behavior. + + Attributes: + output: What the model generates for this turn: the message text/images plus + end-of-turn tokens. This is the trainable portion. + Examples: " Hello world\\\\n\\\\n" (RoleColon), "Hello world<|eot_id|>" (Llama3). + header: Role identifier and delimiters that introduce the turn. This is what the + model sees but does not generate. + Examples: "User:" (RoleColon), "<|start_header_id|>user<|end_header_id|>\\\\n\\\\n" (Llama3). + Typically receives zero training weight. + stop_overlap: Edge case field for formats where the stop sequence spans message + boundaries. Most renderers (Llama3, Qwen3, DeepSeek, etc.) don't use this—their + stop tokens are included in output. + + Only RoleColonRenderer uses this. Its stop sequence is "\\\\n\\\\nUser:", where "\\\\n\\\\n" + ends the output but "User:" would duplicate the next message's header. To avoid + duplication, "User:" is stored here and only appended for the last message in + supervised training. The name "stop_overlap" reflects that these tokens are the + overlap between the stop sequence and the next message's header. + """ + + output: list[tinker.ModelInputChunk] + """What the model generates for this turn.""" + + header: tinker.EncodedTextChunk | None = None + """Role identifier and delimiters that introduce the turn.""" + + stop_overlap: tinker.EncodedTextChunk | None = None + """Tokens that overlap between stop sequence and next message's header.""" + + +class TrainOnWhat(StrEnum): + LAST_ASSISTANT_MESSAGE = "last_assistant_message" + ALL_ASSISTANT_MESSAGES = "all_assistant_messages" + ALL_MESSAGES = "all_messages" + ALL_TOKENS = "all_tokens" + ALL_USER_AND_SYSTEM_MESSAGES = "all_user_and_system_messages" + CUSTOMIZED = "customized" + + +class Renderer(ABC): + """ + Abstract base class for rendering message lists into training and sampling prompts. + + Subclasses must implement: + - get_stop_sequences(): Return stop tokens/strings for sampling + - render_message(): Break a message into header/output/stop_overlap components + - parse_response(): Convert sampled tokens back into a Message + + The default build_generation_prompt and build_supervised_example implementations + assume simple concatenation of rendered messages. Override these if your renderer + modifies the conversation structure (e.g., stripping thinking blocks from history). + """ + + tokenizer: Tokenizer + + def __init__(self, tokenizer: Tokenizer): + self.tokenizer = tokenizer + + @property + def has_extension_property(self) -> bool: + """Whether this renderer satisfies the sequence extension property. + + A renderer has the extension property if, for any multi-turn conversation, + calling build_generation_prompt at each successive assistant turn produces + token sequences where each is a prefix of the next. This enables: + - Merging multiple timesteps into a single training datum + - KV-cache reuse during sampling + - O(T) compute scaling instead of O(T^2) for T-turn trajectories + + Renderers that strip thinking blocks from history (like Qwen3Renderer with + strip_thinking_from_history=True) do NOT have this property because the + observation at timestep 2 is not a prefix of timestep 1's full sequence. + + See docs/rl/sequence-extension.mdx for details. + """ + return False + + @property + def _bos_tokens(self) -> list[int]: + return [] + + @abstractmethod + def get_stop_sequences(self) -> list[str] | list[int]: + """Return the stop sequences used when sampling from this renderer.""" + ... + + @abstractmethod + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + """ + Render a single message into its header/output/stop_overlap components. + + This method breaks down a message into parts for loss masking. See RenderedMessage + for detailed semantics of each component. + + Args: + message: The message to render. + ctx: Context about the message's position in the conversation, including: + - idx: The index of this message (0-based) + - is_last: Whether this is the last message + - prev_message: The previous message, if any + + Returns: + RenderedMessage with header, output, and optionally stop_overlap. + """ + ... + + @abstractmethod + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + """ + Parse sampled tokens back into a Message. + + Args: + response: Token IDs returned from sampling. + + Returns: + A tuple of (message, success). If success is False, the response could not + be parsed (e.g., missing stop token), but a best-effort message is still returned. + """ + ... + + def to_openai_message(self, message: Message) -> dict: + """ + Convert a Message to OpenAI chat completions API format. + + The returned object can be passed into the transformers library's + apply_chat_template function, which is useful for testing purposes. + + It's also useful for querying models that are being served through + OpenAI-compatible APIs (OpenRouter, vLLM, etc.). + + The base implementation handles: + - Basic role/content conversion + - tool_calls conversion from ToolCall objects to OpenAI dict format + - tool_call_id and name for tool response messages + + Subclasses should override this to handle model-specific features like + reasoning_content for thinking models. + + Args: + message: The Message to convert. + + Returns: + A dict in OpenAI API message format. + """ + result: dict = {"role": message["role"]} + + # Handle content + content = message["content"] + if isinstance(content, str): + result["content"] = content + else: + # Structured content with ThinkingPart/TextPart/etc. + # Base implementation: concatenate text parts, render thinking as tags + # TODO: Add proper support for ImagePart by converting to OpenAI-style content parts + # (list of {"type": "image_url", "image_url": {...}} dicts) + parts = [] + for p in content: + if p["type"] == "text": + parts.append(p["text"]) + elif p["type"] == "thinking": + parts.append(f"{p['thinking']}") + elif p["type"] == "image": + raise NotImplementedError( + "to_openai_message does not support ImagePart content. " + "Images would be silently dropped, leading to incorrect HF template " + "comparisons or OpenAI API calls. Use build_generation_prompt for VL models." + ) + # Skip tool_call and unparsed_tool_call parts - handled via tool_calls field + result["content"] = "".join(parts) + + # Handle tool_calls (convert ToolCall objects to OpenAI format) + if "tool_calls" in message and message["tool_calls"]: + result["tool_calls"] = [ + { + "type": "function", + "id": tc.id, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in message["tool_calls"] + ] + + # Handle tool response fields + if message["role"] == "tool": + if "tool_call_id" in message: + result["tool_call_id"] = message["tool_call_id"] + if "name" in message: + result["name"] = message["name"] + + return result + + def create_conversation_prefix_with_tools( + self, tools: list[ToolSpec], system_prompt: str = "" + ) -> list[Message]: + """Create message(s) with tool specifications to prepend to conversations. + + Returns one or more messages to prepend to the conversation. This is the + standard way to add tools - the returned messages should be placed at the + start of your message list before user/assistant messages. + + Args: + tools: List of tool specifications. + system_prompt: The system prompt content. + + Returns: + List of messages to prepend to the conversation. + + Raises: + NotImplementedError: If the renderer doesn't support tool calling. + """ + raise NotImplementedError + + def _get_generation_suffix(self, role: Role, ctx: RenderContext) -> list[int]: + """Return tokens to append to the prompt for generation. + + This is called by build_generation_prompt to add the role header that + precedes the model's response. The default implementation renders an + empty message and extracts its header tokens. + + Args: + role: The role to generate (usually "assistant") + ctx: Context for the generation suffix. Note that ctx.is_last is True + because we're rendering the header for the final (to-be-generated) message. + + Returns: + List of token IDs for the role header. Examples in string form: + - Llama3: "<|start_header_id|>assistant<|end_header_id|>\n\n" + - Qwen3: "<|im_start|>assistant\n" + - DeepSeek: "<|Assistant|>" (single special token) + """ + # Default: render an empty message and use its header tokens + rendered = self.render_message(Message(role=role, content=""), ctx) + if rendered.header: + return list(rendered.header.tokens) + return [] + + def build_generation_prompt( + self, + messages: list[Message], + role: Role = "assistant", + prefill: str | None = None, + ) -> tinker.ModelInput: + """ + Generates tokens for sampling from the model. + + Args: + messages: a list of messages to render. + role: the role of the partial message to be completed. + prefill: an optional string to prefill in the model's generation. + """ + + chunks: list[tinker.types.ModelInputChunk] = [] + if self._bos_tokens: + chunks.append(tinker.types.EncodedTextChunk(tokens=self._bos_tokens)) + for idx, message in enumerate(messages): + ctx = RenderContext( + idx=idx, + is_last=(idx == len(messages) - 1), + prev_message=messages[idx - 1] if idx > 0 else None, + ) + rendered_message = self.render_message(message, ctx) + header_chunk = rendered_message.header + output_chunks = rendered_message.output + if header_chunk: + chunks.append(header_chunk) + chunks.extend([x for x in output_chunks if x]) + + suffix_ctx = RenderContext( + idx=len(messages), + is_last=True, + prev_message=messages[-1] if messages else None, + ) + suffix_tokens = self._get_generation_suffix(role, suffix_ctx) + if suffix_tokens: + chunks.append(tinker.types.EncodedTextChunk(tokens=suffix_tokens)) + + if prefill: + chunks.append( + tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(prefill, add_special_tokens=False) + ) + ) + return tinker.ModelInput(chunks=chunks) + + def build_supervised_example( + self, + messages: list[Message], + train_on_what: TrainOnWhat = TrainOnWhat.LAST_ASSISTANT_MESSAGE, + ) -> tuple[tinker.ModelInput, torch.Tensor]: + """ + Build tokens and per-token weights for supervised fine-tuning. + + This default implementation concatenates rendered messages in order. Override + this method if your build_generation_prompt does anything that breaks the simple + concatenation assumption—for example, if it strips thinking blocks from history + (like Qwen3Renderer), injects default system prompts (like KimiK2Renderer), or + otherwise modifies the token sequence. + + The supervised example tokens should match what build_generation_prompt would + produce for the same conversation prefix, so the model trains on the same + distribution it sees at inference time. + + Args: + messages: A list of messages to render. + train_on_what: Controls which tokens receive non-zero training weight: + - LAST_ASSISTANT_MESSAGE: Only the last assistant message + - ALL_ASSISTANT_MESSAGES: All assistant messages + - ALL_MESSAGES: All messages (but not headers) + - ALL_TOKENS: Everything including headers + - ALL_USER_AND_SYSTEM_MESSAGES: User and system messages only + - CUSTOMIZED: Use the 'trainable' field on each message + + Returns: + A tuple of (model_input, weights) where weights is a 1D tensor with the + same length as the total number of tokens. + """ + # Warn if training on multiple assistant messages with a renderer that doesn't + # satisfy the extension property. In that case, each assistant message sees a + # different context prefix, so they should be trained as separate examples. + # NOTE: This warning only covers ALL_ASSISTANT_MESSAGES. Other modes that train + # multiple assistant messages (e.g., ALL_MESSAGES, ALL_TOKENS, CUSTOMIZED) should + # be used with caution when has_extension_property=False. + if ( + train_on_what == TrainOnWhat.ALL_ASSISTANT_MESSAGES + and not self.has_extension_property + ): + logger.warning( + "WARNING: Using train_on_what=ALL_ASSISTANT_MESSAGES with a renderer that " + "does not satisfy the extension property (has_extension_property=False). " + "This means earlier assistant messages in the conversation see a different " + "token prefix than what build_generation_prompt would produce at that turn. " + "You should instead create separate conversations for each assistant message " + "and call build_supervised_example with train_on_what=LAST_ASSISTANT_MESSAGE " + "for each one. See docs/rl/sequence-extension.mdx for details." + ) + + model_input_chunks_weights: list[ + tuple[tinker.types.ModelInputChunk, float] + ] = [] + if self._bos_tokens: + model_input_chunks_weights.append( + (tinker.types.EncodedTextChunk(tokens=self._bos_tokens), 0.0) + ) + + for idx, message in enumerate(messages): + if train_on_what == TrainOnWhat.CUSTOMIZED: + assert "trainable" in message, ( + "When using CUSTOMIZED train_on_what, each message must have a trainable field: True if loss is applied on this message, False otherwise" + ) + else: + assert "trainable" not in message, ( + "When using non-CUSTOMIZED train_on_what, each message must not have a trainable field. Either change train_on_what to CUSTOMIZED or remove the trainable field from the message" + ) + + is_last_message = idx == len(messages) - 1 + is_assistant = message["role"] == "assistant" + is_user_or_system = message["role"] in ["user", "system"] + + # only apply weight to header if train_on_what is ALL_TOKENS + ctx = RenderContext( + idx=idx, + is_last=is_last_message, + prev_message=messages[idx - 1] if idx > 0 else None, + ) + rendered_message = self.render_message(message, ctx) + header_part = rendered_message.header + output_parts = rendered_message.output + stop_overlap_part = rendered_message.stop_overlap + + header_weight = int(train_on_what == TrainOnWhat.ALL_TOKENS) + if header_part: + model_input_chunks_weights += [(header_part, header_weight)] + + match train_on_what: + case TrainOnWhat.LAST_ASSISTANT_MESSAGE: + output_has_weight = is_last_message and is_assistant + case TrainOnWhat.ALL_ASSISTANT_MESSAGES: + output_has_weight = is_assistant + case TrainOnWhat.ALL_MESSAGES: + output_has_weight = True + case TrainOnWhat.ALL_TOKENS: + output_has_weight = True + case TrainOnWhat.ALL_USER_AND_SYSTEM_MESSAGES: + output_has_weight = is_user_or_system + case TrainOnWhat.CUSTOMIZED: + output_has_weight = message.get("trainable", False) + case _: + raise ValueError(f"Unknown train_on_what: {train_on_what}") + + model_input_chunks_weights += [ + (output_part, int(output_has_weight)) + for output_part in output_parts + if output_part + ] + + # stop_overlap completes the stop sequence for formats like RoleColon (e.g., "User:") + # Only included for the last message. + if is_last_message and stop_overlap_part: + model_input_chunks_weights += [ + (stop_overlap_part, int(output_has_weight)) + ] + + weights_data = [ + w for chunk, w in model_input_chunks_weights for _ in range(chunk.length) + ] + weights_tensor = torch.tensor(weights_data) + + model_input_chunks = [chunk for chunk, _ in model_input_chunks_weights] + return tinker.ModelInput(chunks=model_input_chunks), weights_tensor + + +def tokens_weights_from_strings_weights( + strings_weights: list[tuple[str, float]], + tokenizer: Tokenizer, +) -> tuple[torch.Tensor, torch.Tensor]: + strings, weights = zip(*strings_weights, strict=True) + token_chunks = [ + tokenizer.encode(s, add_special_tokens=i == 0) for i, s in enumerate(strings) + ] + weights = torch.cat( + [ + torch.full((len(chunk),), w) + for chunk, w in zip(token_chunks, weights, strict=True) + ] + ) + tokens = torch.cat([torch.tensor(chunk) for chunk in token_chunks]) + assert tokens.dtype == torch.int64 + return tokens, weights + + +def parse_response_for_stop_token( + response: list[int], tokenizer: Tokenizer, stop_token: int +) -> tuple[Message, bool]: + """Parse response for a single stop token. + + We expect a properly rendered response to have exactly one stop token; but it may have zero if e.g. the model + ran out of tokens when sampling, which will incur a format error. If there are > 1, there is likely a bug in the + sampler and we should error. + """ + emt_count = response.count(stop_token) + if emt_count == 0: + str_response = tokenizer.decode(response) + logger.debug(f"Response is not a valid assistant response: {str_response}") + return Message(role="assistant", content=str_response), False + elif emt_count == 1: + str_response = tokenizer.decode(response[: response.index(stop_token)]) + return Message(role="assistant", content=str_response), True + else: + raise ValueError( + f"When parsing response, expected to split into 1 or 2 pieces using stop tokens, but got {emt_count}. " + "You probably are using the wrong stop tokens when sampling" + ) + + +# ============================================================================ +# Image processing utilities (used by VL renderers) +# ============================================================================ + + +class ImageProcessorProtocol(Protocol): + merge_size: int + patch_size: int + + def get_number_of_image_patches( + self, height: int, width: int, images_kwargs: Optional[dict] = None + ) -> int: + raise NotImplementedError() + + +def image_to_chunk( + image_or_str: Image.Image | str, image_processor: ImageProcessorProtocol +) -> tinker.types.ImageChunk: + """ + Convert a PIL Image to a tinker.types.ImageChunk for QwenVL + """ + + # load an image from a data URI or a URL + if isinstance(image_or_str, str): + with urllib.request.urlopen(image_or_str) as response: + pil_image = Image.open(io.BytesIO(response.read())) + + # Otherwise the image is a PIL image and can be loaded directly + elif isinstance(image_or_str, Image.Image): + pil_image = image_or_str + + # Validate the provided data is actually a valid image type + else: + raise ValueError( + "The provided image must be a PIL.Image.Image, URL, or data URI." + ) + + # Convert to RGB if needed (JPEG doesn't support RGBA/LA/P modes) + if pil_image.mode in ("RGBA", "LA", "P"): + pil_image = pil_image.convert("RGB") + + img_byte_arr = io.BytesIO() + pil_image.save(img_byte_arr, format="JPEG") + image_data = img_byte_arr.getvalue() + + width, height = pil_image.size + num_image_tokens = ( + image_processor.get_number_of_image_patches(height, width, images_kwargs={}) + // image_processor.merge_size**2 + ) + + return tinker.types.ImageChunk( + data=image_data, + format="jpeg", + expected_tokens=num_image_tokens, + ) diff --git a/src/art/tinker/cookbook_v/renderers/deepseek_v3.py b/src/art/tinker/cookbook_v/renderers/deepseek_v3.py new file mode 100644 index 000000000..a16b4843f --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/deepseek_v3.py @@ -0,0 +1,527 @@ +""" +DeepSeek V3 family renderers. + +Includes: +- DeepSeekV3ThinkingRenderer: V3 models in thinking mode +- DeepSeekV3DisableThinkingRenderer: V3 models with thinking disabled +""" + +import json +import re + +import tinker + +from ..tokenizer_utils import Tokenizer +from .base import ( + Message, + RenderContext, + RenderedMessage, + Renderer, + ToolCall, + ToolSpec, + UnparsedToolCall, + ensure_text, + parse_response_for_stop_token, + parse_think_blocks, +) + + +class _DeepSeekV3BaseRenderer(Renderer): + """ + Base renderer for DeepSeek V3 models with common rendering logic. + + This is a private base class. Use DeepSeekV3ThinkingRenderer or + DeepSeekV3DisableThinkingRenderer instead. + + System messages at position 0 are rendered without role tokens (matching HF template). + System messages at later positions require system_role_as_user=True to convert to user role. + + The default strip_thinking_from_history=True matches HF behavior where thinking + traces are removed from historical assistant messages in multi-turn conversations. + Use strip_thinking_from_history=False for multi-turn RL to get the extension property. + """ + + def __init__( + self, + tokenizer: Tokenizer, + system_role_as_user: bool = False, + strip_thinking_from_history: bool = True, + ): + super().__init__(tokenizer) + self.system_role_as_user = system_role_as_user + self.strip_thinking_from_history = strip_thinking_from_history + + @property + def has_extension_property(self) -> bool: + """Extension property depends on strip_thinking_from_history setting. + + When strip_thinking_from_history=False, thinking traces are preserved in + history, so each successive observation is a prefix extension of the previous. + + When strip_thinking_from_history=True (default), thinking traces are stripped + from historical messages, breaking the extension property. + """ + return not self.strip_thinking_from_history + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + """Render a single message to tokens. + + Args: + message: The message to render. + ctx: Context about the message's position, including: + - idx: The index of this message (0-based) + - is_last: Whether this is the last message (affects thinking stripping) + - prev_message: The previous message, used to detect post-tool formatting + """ + # Check if this assistant message follows a tool response + follows_tool = ( + ctx.prev_message is not None and ctx.prev_message["role"] == "tool" + ) + + content = message["content"] + + if message["role"] == "system": + # HF template collects all system messages at the start without role tokens + # We only support this for idx=0; later system messages need system_role_as_user=True + content_str = ensure_text(content) + if ctx.idx == 0: + header_tokens: list[int] = [] + output_str = content_str + elif self.system_role_as_user: + # Convert later system messages to user role + role_token = self._get_special_token("User") + header_tokens = [role_token] + output_str = content_str + else: + raise ValueError( + "DeepSeek only supports system message at start. " + "Use system_role_as_user=True to convert later system messages to user role." + ) + elif message["role"] == "user": + role_token = self._get_special_token("User") + header_tokens = [role_token] + output_str = ensure_text(content) + elif message["role"] == "assistant": + has_tool_calls = "tool_calls" in message and message["tool_calls"] + + # Determine if we should strip thinking content from this message + should_strip_thinking = ( + self.strip_thinking_from_history + and not has_tool_calls + and not ctx.is_last + ) + + if isinstance(content, list): + # Structured content - handle with list operations + parts = content + # Render parts in order, preserving interleaved thinking/text structure. + # No separator needed - whitespace is preserved in TextPart for roundtrip identity. + rendered_parts = [] + for p in parts: + if p["type"] == "thinking": + if should_strip_thinking: + # Skip thinking content entirely when stripping + # (header gets added separately to match HF format) + pass + else: + rendered_parts.append(f"{p['thinking']}") + elif p["type"] == "text": + rendered_parts.append(p["text"]) + # ToolCallPart handled via message's tool_calls field + output_content = "".join(rendered_parts) + else: + # String content - pass through as-is. + # Stripping only works with structured content (ThinkingPart). + output_content = content + + if follows_tool: + # Post-tool assistant: no role token, content flows directly after tool output + header_tokens = [] + output_str = output_content + else: + # Normal assistant message + role_token = self._get_special_token("Assistant") + header_tokens = [role_token] + output_str = output_content + elif message["role"] == "tool": + # Tool responses use special tool output tokens to match HF template + header_tokens = self.tokenizer.encode( + "<|tool▁output▁begin|>", add_special_tokens=False + ) + output_str = ensure_text(content) + "<|tool▁output▁end|>" + else: + raise ValueError(f"Unsupported role: {message['role']}") + + # Handle tool calls in assistant messages + # HF format: <|tool▁calls▁begin|><|tool▁call▁begin|>name<|tool▁sep|>args<|tool▁call▁end|><|tool▁calls▁end|> + if "tool_calls" in message and message["tool_calls"]: + output_str += "<|tool▁calls▁begin|>" + for tool_call in message["tool_calls"]: + func_name = tool_call.function.name + args = tool_call.function.arguments + output_str += f"<|tool▁call▁begin|>{func_name}<|tool▁sep|>{args}<|tool▁call▁end|>" + output_str += "<|tool▁calls▁end|>" + + output_tokens = self.tokenizer.encode(output_str, add_special_tokens=False) + + # Add end_of_sentence only for assistant messages with content + # (not for empty generation prompt messages) + if message["role"] == "assistant" and message["content"]: + output_tokens.append(self._end_message_token) + + output: list[tinker.ModelInputChunk] = [ + tinker.types.EncodedTextChunk(tokens=output_tokens) + ] + # Only include header if non-empty; tinker rejects empty token chunks with + # "Chunk N has empty tokens list". This happens for system messages at idx=0. + if header_tokens: + return RenderedMessage( + header=tinker.types.EncodedTextChunk(tokens=header_tokens), + output=output, + ) + else: + return RenderedMessage(output=output) + + def _get_special_token(self, name: str) -> int: + sep = chr(65372) + s = f"<{sep}{name}{sep}>" + res = self.tokenizer.encode(s, add_special_tokens=False) + assert len(res) == 1, f"Expected single token for {s}, got {res}" + return res[0] + + @property + def _bos_tokens(self) -> list[int]: + return [self._get_special_token("begin▁of▁sentence")] + + @property + def _end_message_token(self) -> int: + return self._get_special_token("end▁of▁sentence") + + def get_stop_sequences(self) -> list[int]: + return [self._end_message_token] + + def _parse_deepseek_tool_calls( + self, content: str + ) -> tuple[list[ToolCall], list[UnparsedToolCall]]: + """Parse tool calls from DeepSeek V3.1 format. + + Expected format (per HuggingFace model card and chat template): + <|tool▁calls▁begin|><|tool▁call▁begin|>func_name<|tool▁sep|>{"arg":"value"}<|tool▁call▁end|><|tool▁calls▁end|> + + Multiple tool calls are chained directly without separators. + + References: + - DeepSeek V3.1 Model Card: https://huggingface.co/deepseek-ai/DeepSeek-V3.1 + - Chat Template: https://huggingface.co/deepseek-ai/DeepSeek-V3.1/blob/main/assets/chat_template.jinja + """ + tool_calls: list[ToolCall] = [] + unparsed_tool_calls: list[UnparsedToolCall] = [] + + calls_match = re.search( + r"<|tool▁calls▁begin|>(.*?)<|tool▁calls▁end|>", content, re.DOTALL + ) + if not calls_match: + return tool_calls, unparsed_tool_calls + + for match in re.finditer( + r"<|tool▁call▁begin|>(\w+)<|tool▁sep|>(.*?)<|tool▁call▁end|>", + calls_match.group(1), + re.DOTALL, + ): + raw_text = match.group(0) + func_name, args_str = match.group(1), match.group(2).strip() + + try: + json.loads(args_str) + tool_calls.append( + ToolCall( + function=ToolCall.FunctionBody( + name=func_name, arguments=args_str + ) + ) + ) + except json.JSONDecodeError as e: + unparsed_tool_calls.append( + UnparsedToolCall(raw_text=raw_text, error=f"Invalid JSON: {e}") + ) + + return tool_calls, unparsed_tool_calls + + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + assistant_message, parse_success = parse_response_for_stop_token( + response, self.tokenizer, self._end_message_token + ) + if not parse_success: + return assistant_message, False + + assert isinstance(assistant_message["content"], str) + content = assistant_message["content"] + + # Parse DeepSeek-specific tool calls + tool_calls, unparsed_tool_calls = self._parse_deepseek_tool_calls(content) + if tool_calls: + assistant_message["tool_calls"] = tool_calls + if unparsed_tool_calls: + assistant_message["unparsed_tool_calls"] = unparsed_tool_calls + + # Strip tool calls section from content (both parsed and unparsed) + if tool_calls or unparsed_tool_calls: + content = re.sub( + r"\s*<|tool▁calls▁begin|>.*?<|tool▁calls▁end|>", + "", + content, + flags=re.DOTALL, + ) + content = content.strip() + + # Parse ... blocks into ThinkingPart/TextPart list + parts = parse_think_blocks(content) + if parts is not None: + assistant_message["content"] = parts + else: + assistant_message["content"] = content + + return assistant_message, True + + def to_openai_message(self, message: Message) -> dict: + """Convert a Message to OpenAI API format with reasoning_content for thinking. + + DeepSeek's API uses reasoning_content for thinking models, similar to OpenAI's o1. + """ + result: dict = {"role": message["role"]} + + content = message["content"] + if isinstance(content, str): + result["content"] = content + else: + # Extract thinking into reasoning_content, keep text in content + thinking_parts = [] + text_parts = [] + for p in content: + if p["type"] == "thinking": + thinking_parts.append(p["thinking"]) + elif p["type"] == "text": + text_parts.append(p["text"]) + + result["content"] = "".join(text_parts) + if thinking_parts: + result["reasoning_content"] = "".join(thinking_parts) + + # Handle tool_calls + if "tool_calls" in message and message["tool_calls"]: + result["tool_calls"] = [ + { + "type": "function", + "id": tc.id, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in message["tool_calls"] + ] + + # Handle tool response fields + if message["role"] == "tool": + if "tool_call_id" in message: + result["tool_call_id"] = message["tool_call_id"] + if "name" in message: + result["name"] = message["name"] + + return result + + def create_conversation_prefix_with_tools( + self, tools: list[ToolSpec], system_prompt: str = "" + ) -> list[Message]: + """Create system message with DeepSeek V3.1 tool specifications. + + DeepSeek V3.1 tool calling requires tools to be described in the system message + using a specific format with ### headers and inline JSON parameters. + + Note: Tool calling is supported in non-thinking mode only. + + References: + - DeepSeek V3.1 Model Card (ToolCall section): https://huggingface.co/deepseek-ai/DeepSeek-V3.1 + - DeepSeek V3.1 Chat Template: https://huggingface.co/deepseek-ai/DeepSeek-V3.1/blob/main/assets/chat_template.jinja + - DeepSeek API Tool Calls Guide: https://api-docs.deepseek.com/guides/tool_calls + """ + tools_text = "" + if tools: + # Format each tool with ### header, description, and parameters + tool_blocks = [] + for tool in tools: + tool_block = f"""### {tool["name"]} +Description: {tool["description"]} + +Parameters: {json.dumps(tool["parameters"])}""" + tool_blocks.append(tool_block) + + tools_text = f""" + +## Tools +You have access to the following tools: + +{chr(10).join(tool_blocks)} + +IMPORTANT: ALWAYS adhere to this exact format for tool use: +<|tool▁calls▁begin|><|tool▁call▁begin|>tool_call_name<|tool▁sep|>tool_call_arguments<|tool▁call▁end|><|tool▁calls▁end|> + +Where: +- `tool_call_name` must be an exact match to one of the available tools +- `tool_call_arguments` must be valid JSON that strictly follows the tool's Parameters Schema +- For multiple tool calls, chain them directly without separators or spaces""" + + return [Message(role="system", content=system_prompt + tools_text)] + + +class DeepSeekV3ThinkingRenderer(_DeepSeekV3BaseRenderer): + """ + Renderer for DeepSeek V3 models in THINKING mode. + + Format: + <|begin_of_sentence|><|User|>question<|Assistant|>reasoninganswer<|end_of_sentence|> + + For non-thinking mode, use DeepSeekV3DisableThinkingRenderer instead. + + Generation prompts include prefill to trigger thinking mode. + Think tags in message content come from ThinkPart rendering. + + When strip_thinking_from_history=True (default), historical assistant messages + get added to header and thinking content stripped, matching HF behavior. + """ + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + """Render message, adding to header when stripping thinking from history. + + HF's thinking=True template uses at the start of historical assistant + messages to signal "we're past the thinking phase, here's the answer". + """ + rendered = super().render_message(message, ctx) + + # Add to header for historical assistant messages when stripping thinking. + # This matches the base class's should_strip_thinking logic - only historical messages + # (not the last one) get added. The last message is the supervised target and + # should preserve its format (including any ThinkingPart). + follows_tool = ( + ctx.prev_message is not None and ctx.prev_message["role"] == "tool" + ) + should_add_think_close = ( + message["role"] == "assistant" + and not follows_tool + and self.strip_thinking_from_history + and not ctx.is_last + ) + + if should_add_think_close: + think_close_tokens = self.tokenizer.encode( + "", add_special_tokens=False + ) + old_header_tokens = list(rendered.header.tokens) if rendered.header else [] + new_header = tinker.EncodedTextChunk( + tokens=old_header_tokens + think_close_tokens + ) + rendered = RenderedMessage(header=new_header, output=rendered.output) + + return rendered + + def build_generation_prompt( + self, + messages: list[Message], + role: str = "assistant", + prefill: str | None = None, + ) -> tinker.ModelInput: + """Build generation prompt with prefill to trigger thinking mode. + + Does NOT add when the previous message is a tool response, + as tool-use conversations stay in non-thinking mode (matching HF behavior). + """ + # Don't add prefill after tool responses - tool use is non-thinking mode + if messages and messages[-1]["role"] == "tool": + return super().build_generation_prompt(messages, role, prefill) + + # Add prefill to trigger thinking, combined with any user-provided prefill + think_prefill = "" + (prefill or "") + return super().build_generation_prompt(messages, role, think_prefill) + + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + """Parse response, prepending since we prefill with it. + + When sampling with build_generation_prompt, the tag is part of the + prefill and not included in the sampled tokens. The response will be + "reasoninganswer" so we prepend if necessary. + """ + think_prefix_token: int = self.tokenizer.convert_tokens_to_ids("") # type: ignore[assignment] + think_suffix_token: int = self.tokenizer.convert_tokens_to_ids("") # type: ignore[assignment] + + # Only prepend if the response doesn't already start with it and contains + starts_with_think = len(response) > 0 and response[0] == think_prefix_token + if not starts_with_think and think_suffix_token in response: + response = [think_prefix_token] + response + + return super().parse_response(response) + + +class DeepSeekV3DisableThinkingRenderer(_DeepSeekV3BaseRenderer): + """ + Renderer for DeepSeek V3 models in NON-THINKING mode. + + Format: + <|begin_of_sentence|><|User|>question<|Assistant|>answer<|end_of_sentence|> + + The prefix signals to the model to skip reasoning and respond directly. + Any ... blocks in the content are stripped. + + For thinking mode, use DeepSeekV3ThinkingRenderer instead. + """ + + @property + def has_extension_property(self) -> bool: + """Non-thinking mode always satisfies extension - no thinking to strip from history.""" + return True + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + """Render message in non-thinking mode. + + For assistant messages (not following tool): + - Strip any ThinkingPart from structured content + - Add to header to signal non-thinking mode + """ + # Check if this assistant message follows a tool response + follows_tool = ( + ctx.prev_message is not None and ctx.prev_message["role"] == "tool" + ) + + if message["role"] == "assistant" and not follows_tool: + content = message["content"] + + # Strip thinking from content + if isinstance(content, list): + # Remove ThinkingPart, keep only text + text_content = "".join( + p["text"] for p in content if p["type"] == "text" + ) + else: + # Strip ... blocks from string content + text_content = re.sub( + r".*?", "", content, flags=re.DOTALL + ) + + message = message.copy() + message["content"] = text_content + + # Call parent to get base rendering + rendered = super().render_message(message, ctx) + + # Add to header for assistant messages (not following tool) + # This goes in header (weight=0) so observation matches generation prompt. + if message["role"] == "assistant" and not follows_tool: + think_close_tokens = self.tokenizer.encode( + "", add_special_tokens=False + ) + old_header_tokens = list(rendered.header.tokens) if rendered.header else [] + new_header = tinker.EncodedTextChunk( + tokens=old_header_tokens + think_close_tokens + ) + rendered = RenderedMessage(header=new_header, output=rendered.output) + + return rendered diff --git a/src/art/tinker/cookbook_v/renderers/gpt_oss.py b/src/art/tinker/cookbook_v/renderers/gpt_oss.py new file mode 100644 index 000000000..786a0ee0c --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/gpt_oss.py @@ -0,0 +1,667 @@ +"""GptOssRenderer - OpenAI's open source model format (Harmony).""" + +from datetime import datetime +import json +import re +import warnings + +import tinker +import torch + +from ..tokenizer_utils import Tokenizer +from .base import ( + ContentPart, + Message, + RenderContext, + RenderedMessage, + Renderer, + Role, + TextPart, + ThinkingPart, + ToolCall, + ToolSpec, + TrainOnWhat, + UnparsedToolCall, + ensure_list, + ensure_text, +) + +# ============================================================================= +# TypeScript formatting utilities (stateless, used for Harmony tool definitions) +# ============================================================================= + + +def _json_type_to_typescript(schema: dict) -> str: + """Convert a single JSON schema type to TypeScript.""" + if "oneOf" in schema: + return " | ".join(_json_type_to_typescript(s) for s in schema["oneOf"]) + if "anyOf" in schema: + return " | ".join(_json_type_to_typescript(s) for s in schema["anyOf"]) + + json_type = schema.get("type", "any") + + if isinstance(json_type, list): + return " | ".join(_json_type_to_typescript({"type": t}) for t in json_type) + + if json_type == "string": + if "enum" in schema: + return " | ".join(json.dumps(v) for v in schema["enum"]) + base_type = "string" + elif json_type == "number" or json_type == "integer": + base_type = "number" + elif json_type == "boolean": + base_type = "boolean" + elif json_type == "array": + items_type = _json_type_to_typescript(schema.get("items", {})) + base_type = f"{items_type}[]" + elif json_type == "object": + base_type = _json_schema_to_typescript(schema) + else: + base_type = "any" + + if schema.get("nullable"): + return f"{base_type} | null" + return base_type + + +def _json_schema_to_typescript(schema: dict) -> str: + """Convert JSON schema to an inline TypeScript-ish type string.""" + if schema.get("type") != "object": + return "any" + + properties = schema.get("properties", {}) + required = set(schema.get("required", [])) + + type_parts = [] + for prop_name, prop_schema in properties.items(): + prop_type = _json_type_to_typescript(prop_schema) + optional = "" if prop_name in required else "?" + type_parts.append(f"{prop_name}{optional}: {prop_type}") + + return "{ " + ", ".join(type_parts) + " }" + + +def _schema_comments(schema: dict) -> list[str]: + """Extract comments from schema (title, description, examples).""" + comments: list[str] = [] + title = schema.get("title") + if title: + comments.append(str(title)) + comments.append("") + description = schema.get("description") + if description: + comments.append(str(description)) + examples = schema.get("examples") + if examples: + comments.append("Examples:") + for example in examples: + comments.append(f"- {json.dumps(example)}") + return comments + + +def _format_parameters_block(schema: dict) -> str: + """Format function parameters as a TypeScript-style block.""" + if schema.get("type") != "object" or not schema.get("properties"): + return "()" + + lines = [] + header = "(_:" + schema_description = schema.get("description") + if schema_description: + header += f" // {schema_description}" + lines.append(header) + lines.append("{") + + properties = schema.get("properties", {}) + required = set(schema.get("required", [])) + for prop_name, prop_schema in properties.items(): + for comment in _schema_comments(prop_schema): + lines.append(f"// {comment}") + prop_type = _json_type_to_typescript(prop_schema) + optional = "" if prop_name in required else "?" + default_comment = "" + if "default" in prop_schema: + default_comment = f" // default: {json.dumps(prop_schema['default'])}" + lines.append(f"{prop_name}{optional}: {prop_type},{default_comment}") + + lines.append("})") + return "\n".join(lines) + + +def _format_tool_definition(tool: ToolSpec) -> str: + """Format a single tool as a Harmony TypeScript-style definition.""" + lines = [] + if tool.get("description"): + lines.append(f"// {tool['description']}") + + params = tool.get("parameters") or {} + params_block = _format_parameters_block(params) + lines.append(f"type {tool['name']} = {params_block} => any;") + return "\n".join(lines) + + +class GptOssRenderer(Renderer): + """ + Renderer for OpenAI's open source models using the Harmony format. + + Wire format: <|start|>role<|channel|>channel<|message|>content<|end|> + No newlines between messages. Last assistant message ends with <|return|>; + historical assistant messages end with <|end|>. + + Harmony Channels + ---------------- + Each assistant message specifies a "channel" that controls how the content is + interpreted and displayed. An assistant turn can have multiple channel segments + (rendered as separate <|start|>assistant... blocks): + + - analysis: Chain-of-thought reasoning (hidden from end users, like blocks) + - commentary: Tool calls to developer-defined functions, or user-visible "preambles" + before tool calls. Uses `to=functions.name` to route to specific tools. + - final: The user-facing response text + + A typical assistant turn with thinking + tool call + final answer would render as: + <|start|>assistant<|channel|>analysis<|message|>{thinking}<|end|> + <|start|>assistant to=functions.get_weather<|channel|>commentary <|constrain|>json<|message|>{args}<|call|> + ... (tool result) ... + <|start|>assistant<|channel|>final<|message|>{answer}<|return|> + + Tool Calling + ------------ + - Tool definitions: Go in developer message with TypeScript-style syntax + - Tool calls: <|start|>assistant to=functions.name<|channel|>commentary <|constrain|>json<|message|>{args}<|call|> + - Tool results: <|start|>functions.name to=assistant<|channel|>commentary<|message|>{result}<|end|> + + Reference: https://raw.githubusercontent.com/openai/openai-cookbook/main/articles/openai-harmony.md + """ + + # System prompt content (without rendering tokens). Tool channel instructions are NOT + # included here; they are only added when tools are defined in the developer message. + system_prompt_content = ( + "You are ChatGPT, a large language model trained by OpenAI.\n" + "Knowledge cutoff: 2024-06\n" + "Current date: {current_date}\n\n" + "Reasoning: {reasoning_effort}\n\n" + "# Valid channels: analysis, commentary, final. Channel must be included for every message." + ) + use_system_prompt: bool = False + reasoning_effort: str | None = None + current_date: str | None = ( + None # If use_system_prompt=True, will use the current date if this is None. Set this to a fixed date for deterministic system prompt. + ) + + def __init__( + self, + tokenizer: Tokenizer, + use_system_prompt: bool = False, + reasoning_effort: str | None = None, + current_date: str | None = None, + ): + super().__init__(tokenizer) + self.use_system_prompt = use_system_prompt + self.reasoning_effort = reasoning_effort + self.current_date = current_date + assert use_system_prompt == (reasoning_effort is not None), ( + "Reasoning effort must be set iff using system prompt" + ) + + # Internal role for OpenAI's system prompt (bypasses system->developer mapping) + _INTERNAL_SYSTEM_ROLE = "_gptoss_internal_system" + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + role = message["role"] + + # Handle tool result messages (role="tool") + if role == "tool": + return self._render_tool_result_message(message, ctx) + + # Internal system role renders as actual "system" without transformation + if role == self._INTERNAL_SYSTEM_ROLE: + role = "system" + # User-provided "system" messages map to "developer" (per HF template) + elif role == "system": + role = "developer" + + header_str = f"<|start|>{role}" + output_str = "" + tool_calls: list[ToolCall] = [] + + if message["role"] == "assistant": + # Assistant channels. See https://cookbook.openai.com/articles/openai-harmony + # Extract text and thinking from content list + parts = ensure_list(message["content"]) + text_content = "".join(p["text"] for p in parts if p["type"] == "text") + thinking_content = "".join( + p["thinking"] for p in parts if p["type"] == "thinking" + ) + tool_calls = message.get("tool_calls") or [] + + # Analysis channel (CoT) - only if there's thinking content + if thinking_content: + output_str += f"<|channel|>analysis<|message|>{thinking_content}<|end|><|start|>assistant" + + # Handle tool calls (goes in commentary channel) + if tool_calls: + # If there's text content with tool calls, render as commentary preamble first + if text_content: + output_str += f"<|channel|>commentary<|message|>{text_content}<|end|><|start|>assistant" + output_str += self._render_tool_calls(tool_calls) + else: + # Final channel (Response Content) + output_str += f"<|channel|>final<|message|>{text_content}" + elif message["role"] == "system": + # User-provided system messages get "# Instructions" wrapper (rendered as developer) + output_str += ( + f"<|message|># Instructions\n\n{ensure_text(message['content'])}\n\n" + ) + else: + # user, developer, internal system, and other roles: plain content + output_str += f"<|message|>{ensure_text(message['content'])}" + + # End token logic: + # - Tool calls: each tool call already includes <|call|> via _render_tool_calls, no end token needed + # - Assistant (no tool calls): <|return|> if last message, <|end|> otherwise + # - All other roles: <|end|> + if message["role"] == "assistant": + if not tool_calls: + if ctx.is_last: + output_str += "<|return|>" + else: + output_str += "<|end|>" + # Note: tool_calls case needs no end token here - _render_tool_calls adds <|call|> + else: + output_str += "<|end|>" + + header = tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(header_str, add_special_tokens=False) + ) + output: list[tinker.ModelInputChunk] = [ + tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(output_str, add_special_tokens=False) + ) + ] + return RenderedMessage(header=header, output=output) + + def _render_tool_calls(self, tool_calls: list[ToolCall]) -> str: + """Render tool calls in Harmony commentary channel format. + + Each tool call becomes a separate commentary message: + to=functions.name<|channel|>commentary <|constrain|>json<|message|>{args} + + Multiple tool calls are separated by <|call|><|start|>assistant. + """ + result_parts = [] + for i, tc in enumerate(tool_calls): + # Format: to=functions.name<|channel|>commentary <|constrain|>json<|message|>{args} + result_parts.append( + f" to=functions.{tc.function.name}<|channel|>commentary <|constrain|>json<|message|>" + f"{tc.function.arguments}<|call|>" + ) + # If not the last tool call, close message and start new assistant message + if i < len(tool_calls) - 1: + result_parts.append("<|start|>assistant") + return "".join(result_parts) + + def _render_tool_result_message( + self, message: Message, ctx: RenderContext + ) -> RenderedMessage: + """Render a tool result message. + + Format: <|start|>functions.name to=assistant<|channel|>commentary<|message|>{result}<|end|> + + IMPORTANT: The tool name MUST be provided in the message's "name" field. + The renderer is stateless and cannot track tool_call_id -> name mappings. + When constructing tool result messages, always include the "name" field: + + {"role": "tool", "name": "get_weather", "content": "72 degrees", "tool_call_id": "..."} + + If "name" is missing, this will produce "functions.unknown" which is incorrect. + """ + # Get the tool name from the "name" field + tool_name = message.get("name", "") + if not tool_name: + warnings.warn( + "Tool message missing 'name' field. GptOssRenderer requires the 'name' field " + "to render tool results correctly. Add 'name' to your tool messages: " + "{'role': 'tool', 'name': 'function_name', 'content': '...', 'tool_call_id': '...'}", + UserWarning, + stacklevel=3, + ) + tool_name = "unknown" + + # Ensure qualified with "functions." prefix + if not tool_name.startswith("functions."): + tool_name = f"functions.{tool_name}" + + # Build the header with tool name as role and to=assistant + header_str = f"<|start|>{tool_name} to=assistant" + + # Tool results go in commentary channel + content = ensure_text(message["content"]) + output_str = f"<|channel|>commentary<|message|>{content}<|end|>" + + header = tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(header_str, add_special_tokens=False) + ) + output: list[tinker.ModelInputChunk] = [ + tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(output_str, add_special_tokens=False) + ) + ] + return RenderedMessage(header=header, output=output) + + def _get_system_message(self) -> Message | None: + """Return system message if configured, else None. + + Uses internal role to render as actual 'system' (not mapped to 'developer'). + """ + if not self.use_system_prompt: + return None + current_date = ( + self.current_date + if self.current_date is not None + else datetime.now().strftime("%Y-%m-%d") + ) + content = self.system_prompt_content.format( + current_date=current_date, + reasoning_effort=self.reasoning_effort, + ) + return Message(role=self._INTERNAL_SYSTEM_ROLE, content=content) + + @property + def _bos_tokens(self) -> list[int]: + # GptOss has no BOS token. System prompt is prepended as a message. + return [] + + def _warn_if_user_system_message(self, messages: list[Message]) -> None: + """Warn if user provides system message when use_system_prompt=True.""" + if self.use_system_prompt and messages and messages[0]["role"] == "system": + warnings.warn( + "use_system_prompt=True but messages already start with a system message. " + "The built-in system prompt will be prepended, resulting in two system messages. " + "Either set use_system_prompt=False or remove the system message from your messages.", + UserWarning, + stacklevel=3, + ) + + def build_generation_prompt( + self, + messages: list[Message], + role: Role = "assistant", + prefill: str | None = None, + ) -> tinker.ModelInput: + """Build generation prompt, prepending system message if configured.""" + self._warn_if_user_system_message(messages) + system_msg = self._get_system_message() + if system_msg: + messages = [system_msg] + list(messages) + return super().build_generation_prompt(messages, role, prefill) + + def build_supervised_example( + self, + messages: list[Message], + train_on_what: TrainOnWhat = TrainOnWhat.LAST_ASSISTANT_MESSAGE, + ) -> tuple[tinker.ModelInput, torch.Tensor]: + """Build supervised example, prepending system message if configured.""" + self._warn_if_user_system_message(messages) + system_msg = self._get_system_message() + if system_msg: + messages = [system_msg] + list(messages) + return super().build_supervised_example(messages, train_on_what) + + @property + def _return_token(self) -> int: + res = self.tokenizer.encode("<|return|>", add_special_tokens=False) + assert len(res) == 1, f"Expected single token for <|return|>, got {len(res)}" + return res[0] + + @property + def _call_token(self) -> int: + res = self.tokenizer.encode("<|call|>", add_special_tokens=False) + assert len(res) == 1, f"Expected single token for <|call|>, got {len(res)}" + return res[0] + + def get_stop_sequences(self) -> list[int]: + # Both <|return|> and <|call|> are stop tokens + # <|return|> for normal completion, <|call|> for tool calls + return [self._return_token, self._call_token] + + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + call_count = response.count(self._call_token) + return_count = response.count(self._return_token) + if call_count == 0 and return_count == 0: + str_response = self.tokenizer.decode(response) + return Message(role="assistant", content=str_response), False + if call_count > 1: + raise ValueError( + f"When parsing response, expected at most 1 <|call|> token, but got {call_count}. " + "You probably are using the wrong stop tokens when sampling" + ) + if return_count > 1: + raise ValueError( + f"When parsing response, expected at most 1 <|return|> token, but got {return_count}. " + "You probably are using the wrong stop tokens when sampling" + ) + + stop_idx = response.index(self._return_token) if return_count else None + if call_count: + call_idx = response.index(self._call_token) + if stop_idx is None or call_idx < stop_idx: + stop_idx = call_idx + + assert stop_idx is not None + str_response = self.tokenizer.decode(response[:stop_idx]) + parts, tool_calls, unparsed = self._parse_harmony_output(str_response) + content: list[ContentPart] | str = parts if parts else str_response + + message: Message = {"role": "assistant", "content": content} + if tool_calls: + message["tool_calls"] = tool_calls + if unparsed: + message["unparsed_tool_calls"] = unparsed + + return message, True + + def to_openai_message(self, message: Message) -> dict: + """Convert a Message to OpenAI API format with reasoning_content for thinking. + + GptOss uses the analysis channel for thinking, which maps to reasoning_content + in OpenAI's API format. + """ + result: dict = {"role": message["role"]} + + content = message["content"] + if isinstance(content, str): + result["content"] = content + else: + # Extract thinking into reasoning_content, keep text in content + thinking_parts = [] + text_parts = [] + for p in content: + if p["type"] == "thinking": + thinking_parts.append(p["thinking"]) + elif p["type"] == "text": + text_parts.append(p["text"]) + + result["content"] = "".join(text_parts) + if thinking_parts: + result["reasoning_content"] = "".join(thinking_parts) + + # Handle tool_calls + if "tool_calls" in message and message["tool_calls"]: + result["tool_calls"] = [ + { + "type": "function", + "id": tc.id, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in message["tool_calls"] + ] + + # Handle tool response fields + if message["role"] == "tool": + if "tool_call_id" in message: + result["tool_call_id"] = message["tool_call_id"] + if "name" in message: + result["name"] = message["name"] + + return result + + def _parse_harmony_output( + self, content: str + ) -> tuple[list[ContentPart], list[ToolCall], list[UnparsedToolCall]]: + messages = self._parse_harmony_messages(content) + parts: list[ContentPart] = [] + tool_calls: list[ToolCall] = [] + unparsed: list[UnparsedToolCall] = [] + + for msg in messages: + msg_content = msg["content"] or "" + msg_raw_text = msg["raw_text"] or "" + if not msg_content.strip(): + continue + + recipient = msg["recipient"] + if recipient and recipient.startswith("functions."): + tool_name = recipient.split("functions.", 1)[1] + try: + json.loads(msg_content) + tool_calls.append( + ToolCall( + function=ToolCall.FunctionBody( + name=tool_name, arguments=msg_content.strip() + ), + id=None, # Harmony format doesn't include tool call IDs + ) + ) + except json.JSONDecodeError as e: + unparsed.append( + UnparsedToolCall( + raw_text=msg_raw_text, error=f"Invalid JSON: {e}" + ) + ) + continue + + channel = msg["channel"] + if channel == "analysis": + parts.append(ThinkingPart(type="thinking", thinking=msg_content)) + elif channel == "final": + parts.append(TextPart(type="text", text=msg_content)) + elif channel == "commentary": + parts.append(TextPart(type="text", text=msg_content)) + + return parts, tool_calls, unparsed + + def _parse_harmony_messages(self, content: str) -> list[dict[str, str | None]]: + """Parse Harmony format content into a list of message dicts. + + Uses manual string parsing (find/rfind) rather than regex. This approach + is intentional: it will continue to work if we move away from using + stringified tokens, which would be preferable for robustness. + """ + messages: list[dict[str, str | None]] = [] + idx = 0 + message_token = "<|message|>" + end_tokens = ("<|end|>", "<|call|>", "<|return|>") + + while True: + message_idx = content.find(message_token, idx) + if message_idx == -1: + break + + header_start = content.rfind("<|start|>", idx, message_idx) + if header_start == -1: + header_start = idx + header = content[header_start:message_idx] + + content_start = message_idx + len(message_token) + end_idx = len(content) + end_token = "" + for token in end_tokens: + token_idx = content.find(token, content_start) + if token_idx != -1 and token_idx < end_idx: + end_idx = token_idx + end_token = token + + body = content[content_start:end_idx] + + channel = None + channel_match = re.search(r"<\|channel\|>([^<\s]+)", header) + if channel_match: + channel = channel_match.group(1) + + recipient = None + recipient_match = re.search(r"to=([^\s<]+)", header) + if recipient_match: + recipient = recipient_match.group(1) + + content_type = None + content_type_match = re.search(r"<\|constrain\|>\s*([^\s<]+)", header) + if content_type_match: + content_type = content_type_match.group(1) + + messages.append( + { + "channel": channel, + "recipient": recipient, + "content_type": content_type, + "content": body, + "raw_text": content[header_start : end_idx + len(end_token)] + if end_token + else content[header_start:], + } + ) + + idx = end_idx + len(end_token) + + return messages + + def create_conversation_prefix_with_tools( + self, tools: list[ToolSpec], system_prompt: str = "" + ) -> list[Message]: + """Create conversation prefix with tools in Harmony format. + + Returns a list of messages to prepend to conversations: + 1. If tools present: A system message with tool routing instruction + 2. A developer message with user instructions and tool definitions + + Tools are defined using TypeScript-ish syntax in a `functions` namespace, + following the OpenAI Harmony spec. + + Note: When using this with tools, you typically don't need use_system_prompt=True + since this method provides the necessary system setup for tool routing. + + Reference: https://raw.githubusercontent.com/openai/openai-cookbook/main/articles/openai-harmony.md + """ + messages: list[Message] = [] + + # Tool routing instruction goes in system message (per Harmony spec) + if tools: + messages.append( + Message( + role=self._INTERNAL_SYSTEM_ROLE, + content="Calls to these tools must go to the commentary channel: 'functions'.", + ) + ) + + # User instructions and tool definitions go in developer message + content_parts: list[str] = [] + if system_prompt: + content_parts.append(f"# Instructions\n\n{system_prompt}") + + if tools: + tool_defs = [_format_tool_definition(tool) for tool in tools] + tools_text = "\n\n".join(tool_defs) + content_parts.append( + "# Tools\n\n## functions\n\nnamespace functions {\n\n" + f"{tools_text}\n\n" + "} // namespace functions" + ) + + if content_parts: + content = "\n\n".join(content_parts) + messages.append(Message(role="developer", content=content)) + + return messages diff --git a/src/art/tinker/cookbook_v/renderers/kimi_k2.py b/src/art/tinker/cookbook_v/renderers/kimi_k2.py new file mode 100644 index 000000000..dd087a2eb --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/kimi_k2.py @@ -0,0 +1,447 @@ +"""Renderer for Moonshot AI's Kimi K2 models.""" + +import json +import re +import warnings + +import tinker +import torch + +from .base import ( + Message, + RenderContext, + RenderedMessage, + Renderer, + Role, + ToolCall, + ToolSpec, + TrainOnWhat, + UnparsedToolCall, + ensure_list, + ensure_text, + parse_response_for_stop_token, + parse_think_blocks, +) + +_TOOL_CALLS_SECTION_RE = re.compile( + r"<\|tool_calls_section_begin\|>(.*?)<\|tool_calls_section_end\|>" + r"|<\|tool_call_section_begin\|>(.*?)<\|tool_call_section_end\|>", + re.DOTALL, +) +_TOOL_CALL_RE = re.compile( + r"<\|tool_call_begin\|>\s*([^<]+:\d+)\s*<\|tool_call_argument_begin\|>\s*(.*?)\s*<\|tool_call_end\|>", + re.DOTALL, +) + + +def _split_tool_calls_section(content: str) -> tuple[str, str | None]: + match = _TOOL_CALLS_SECTION_RE.search(content) + if not match: + return content, None + tool_section = match.group(1) if match.group(1) is not None else match.group(2) + return content[: match.start()], tool_section + + +def _extract_tool_name(tool_id: str) -> str: + if not tool_id: + return "" + name_part = tool_id.split(":", 1)[0] + if "." in name_part: + _, name_part = name_part.split(".", 1) + return name_part + + +def _parse_tool_calls_section( + tool_section: str, +) -> tuple[list[ToolCall], list[UnparsedToolCall]]: + tool_calls: list[ToolCall] = [] + unparsed_tool_calls: list[UnparsedToolCall] = [] + + for match in _TOOL_CALL_RE.finditer(tool_section): + raw_text = match.group(0) + tool_id = match.group(1).strip() + args_str = match.group(2).strip() + func_name = _extract_tool_name(tool_id) + + try: + json.loads(args_str) + tool_calls.append( + ToolCall( + function=ToolCall.FunctionBody(name=func_name, arguments=args_str), + id=tool_id if tool_id else None, + ) + ) + except json.JSONDecodeError as e: + unparsed_tool_calls.append( + UnparsedToolCall(raw_text=raw_text, error=f"Invalid JSON: {e}") + ) + + return tool_calls, unparsed_tool_calls + + +class KimiK2Renderer(Renderer): + """ + Format for moonshotai/Kimi-K2-Thinking: + <|im_system|>system<|im_middle|>You are Kimi, an AI assistant created by Moonshot AI.<|im_end|> + <|im_user|>user<|im_middle|>What can you help me with?<|im_end|> + <|im_assistant|>assistant<|im_middle|>reasoningI can help you with...<|im_end|> + + Historical assistant messages use empty blocks, while the final assistant + response preserves reasoning_content in the thinking block. + + Note: Per the HuggingFace chat template, the default system message is automatically + prepended if no system message is provided. This ensures train-eval consistency when + using HF's apply_chat_template for inference. + """ + + DEFAULT_SYSTEM_PROMPT = "You are Kimi, an AI assistant created by Moonshot AI." + + def _ensure_system_message(self, messages: list[Message]) -> list[Message]: + """Ensure a default system message is present if none exists. + + This matches the HuggingFace chat template behavior where a default system + message is automatically added when none is provided. + + The default system message is inserted at the appropriate position: + - If messages is empty: adds default system message + - If starting with tool_declare: inserts default system after tool_declare (if no system message follows) + - Otherwise: prepends default system message before first message (if first message isn't system) + """ + if not messages: + default_system = Message(role="system", content=self.DEFAULT_SYSTEM_PROMPT) + return [default_system] + + # Accept both system and tool_declare as valid starting messages + first_role = messages[0]["role"] + if first_role == "tool_declare": + # Check if a system message already exists after tool_declare + if len(messages) >= 2 and messages[1]["role"] == "system": + return messages + # No system message, insert default after tool_declare + default_system = Message(role="system", content=self.DEFAULT_SYSTEM_PROMPT) + return [messages[0], default_system] + list(messages[1:]) + elif first_role != "system": + default_system = Message(role="system", content=self.DEFAULT_SYSTEM_PROMPT) + return [default_system] + list(messages) + + return messages + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + """ + Render a message. For assistant messages, ctx.is_last controls whether thinking is preserved + (True) or stripped to empty (False). + """ + role = message["role"] + + # Build role token based on role type + if role == "user": + header_str = f"<|im_user|>{role}<|im_middle|>" + elif role == "assistant": + header_str = f"<|im_assistant|>{role}<|im_middle|>" + elif role == "system": + header_str = f"<|im_system|>{role}<|im_middle|>" + elif role == "tool_declare": + # Tool declaration uses system token but with "tool_declare" as display name + header_str = f"<|im_system|>{role}<|im_middle|>" + elif role == "tool": + # HF template uses message.name if present, otherwise role + role_name = message.get("name") + if not role_name: + warnings.warn( + "Tool message missing 'name' field. Using 'tool' as fallback. " + "Consider setting 'name' to match the tool function name for better context.", + UserWarning, + stacklevel=3, + ) + role_name = role + header_str = f"<|im_system|>{role_name}<|im_middle|>" + + # Tool responses have special formatting - need tool_call_id to correlate with the call + tool_call_id = message.get("tool_call_id", "") + if not tool_call_id: + warnings.warn( + "Tool message missing 'tool_call_id' field. KimiK2Renderer requires 'tool_call_id' " + "to render tool results correctly. The value should match ToolCall.id from the " + "assistant's tool_calls.", + UserWarning, + stacklevel=3, + ) + header_str += f"## Return of {tool_call_id}\n" + else: + # Unknown roles default to system-style formatting + header_str = f"<|im_system|>{role}<|im_middle|>" + + # Build output content + output_str = "" + if role == "assistant": + # Extract thinking and text from content list + parts = ensure_list(message["content"]) + thinking_content = "".join( + p["thinking"] for p in parts if p["type"] == "thinking" + ) + text_content = "".join(p["text"] for p in parts if p["type"] == "text") + + # For the last assistant message (is_last=True), preserve thinking; otherwise use empty think block + if ctx.is_last and thinking_content: + output_str = f"{thinking_content}" + else: + output_str = "" + output_str += text_content + + # Handle tool calls + if "tool_calls" in message and message["tool_calls"]: + output_str += "<|tool_calls_section_begin|>" + for idx, tool_call in enumerate(message["tool_calls"]): + tool_id = tool_call.id + if not tool_id: + tool_id = f"functions.{tool_call.function.name}:{idx}" + args = tool_call.function.arguments + output_str += f"<|tool_call_begin|>{tool_id}<|tool_call_argument_begin|>{args}<|tool_call_end|>" + output_str += "<|tool_calls_section_end|>" + else: + output_str = ensure_text(message["content"]) + + output_str += "<|im_end|>" + + header = tinker.types.EncodedTextChunk(tokens=self.tokenizer.encode(header_str)) + output: list[tinker.ModelInputChunk] = [ + tinker.types.EncodedTextChunk(tokens=self.tokenizer.encode(output_str)) + ] + return RenderedMessage(header=header, output=output) + + def build_generation_prompt( + self, + messages: list[Message], + role: Role = "assistant", + prefill: str | None = None, + ) -> tinker.ModelInput: + messages = self._ensure_system_message(messages) + chunks: list[tinker.types.ModelInputChunk] = [] + + for idx, message in enumerate(messages): + # For generation prompt, no message is "last assistant" since we're generating new response + ctx = RenderContext( + idx=idx, + is_last=False, + prev_message=messages[idx - 1] if idx > 0 else None, + ) + rendered_message = self.render_message(message, ctx) + header_chunk = rendered_message.header + output_chunks = rendered_message.output + if header_chunk: + chunks.append(header_chunk) + chunks.extend([x for x in output_chunks if x]) + + # Add generation prompt for new assistant message + gen_prompt = f"<|im_assistant|>{role}<|im_middle|>" + chunks.append( + tinker.types.EncodedTextChunk(tokens=self.tokenizer.encode(gen_prompt)) + ) + if prefill: + chunks.append( + tinker.types.EncodedTextChunk(tokens=self.tokenizer.encode(prefill)) + ) + return tinker.ModelInput(chunks=chunks) + + def build_supervised_example( + self, + messages: list[Message], + train_on_what: TrainOnWhat = TrainOnWhat.LAST_ASSISTANT_MESSAGE, + ) -> tuple[tinker.ModelInput, torch.Tensor]: + """ + Override to properly handle thinking preservation for the last assistant message. + Also ensures default system message is prepended if none is present. + """ + messages = self._ensure_system_message(messages) + + # Find last non-tool-call assistant message index + last_assistant_idx = -1 + for idx in range(len(messages) - 1, -1, -1): + if ( + messages[idx]["role"] == "assistant" + and "tool_calls" not in messages[idx] + ): + last_assistant_idx = idx + break + + model_input_chunks_weights: list[ + tuple[tinker.types.ModelInputChunk, float] + ] = [] + + for idx, message in enumerate(messages): + if train_on_what == TrainOnWhat.CUSTOMIZED: + assert "trainable" in message, ( + "When using CUSTOMIZED train_on_what, each message must have a trainable field" + ) + else: + assert "trainable" not in message, ( + "When using non-CUSTOMIZED train_on_what, each message must not have a trainable field" + ) + + is_last_message = idx == len(messages) - 1 + is_assistant = message["role"] == "assistant" + is_user_or_system = message["role"] in ["user", "system"] + + # For Kimi K2, preserve thinking only for the suffix after the last non-tool-call assistant. + is_last_assistant = ( + is_assistant and last_assistant_idx != -1 and idx >= last_assistant_idx + ) + ctx = RenderContext( + idx=idx, + is_last=is_last_assistant, + prev_message=messages[idx - 1] if idx > 0 else None, + ) + rendered_message = self.render_message(message, ctx) + + header_part = rendered_message.header + output_parts = rendered_message.output + + header_weight = int(train_on_what == TrainOnWhat.ALL_TOKENS) + if header_part: + model_input_chunks_weights += [(header_part, header_weight)] + + match train_on_what: + case TrainOnWhat.LAST_ASSISTANT_MESSAGE: + output_has_weight = is_last_message and is_assistant + case TrainOnWhat.ALL_ASSISTANT_MESSAGES: + output_has_weight = is_assistant + case TrainOnWhat.ALL_MESSAGES: + output_has_weight = True + case TrainOnWhat.ALL_TOKENS: + output_has_weight = True + case TrainOnWhat.ALL_USER_AND_SYSTEM_MESSAGES: + output_has_weight = is_user_or_system + case TrainOnWhat.CUSTOMIZED: + output_has_weight = message.get("trainable", False) + case _: + raise ValueError(f"Unknown train_on_what: {train_on_what}") + + model_input_chunks_weights += [ + (output_part, int(output_has_weight)) + for output_part in output_parts + if output_part + ] + + weights_data = [ + w for chunk, w in model_input_chunks_weights for _ in range(chunk.length) + ] + weights_tensor = torch.tensor(weights_data) + + model_input_chunks = [chunk for chunk, _ in model_input_chunks_weights] + return tinker.ModelInput(chunks=model_input_chunks), weights_tensor + + @property + def _end_message_token(self) -> int: + tokens = self.tokenizer.encode("<|im_end|>") + assert len(tokens) == 1, ( + f"Expected single token for <|im_end|>, got {len(tokens)}" + ) + return tokens[0] + + def get_stop_sequences(self) -> list[int]: + return [self._end_message_token] + + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + assistant_message, parse_success = parse_response_for_stop_token( + response, self.tokenizer, self._end_message_token + ) + if not parse_success: + return assistant_message, False + + content = assistant_message["content"] + assert isinstance(content, str) + + # Handle tool calls if present + text_content, tool_section = _split_tool_calls_section(content) + if tool_section is not None: + tool_calls, unparsed_tool_calls = _parse_tool_calls_section(tool_section) + if tool_calls: + assistant_message["tool_calls"] = tool_calls + if unparsed_tool_calls: + assistant_message["unparsed_tool_calls"] = unparsed_tool_calls + + content_parts = parse_think_blocks(text_content) + assistant_message["content"] = ( + content_parts if content_parts is not None else text_content + ) + + return assistant_message, True + + def to_openai_message(self, message: Message) -> dict: + """Convert a Message to OpenAI API format with reasoning_content for thinking. + + Kimi K2's HF template explicitly expects reasoning_content as a separate field. + """ + result: dict = {"role": message["role"]} + + content = message["content"] + if isinstance(content, str): + result["content"] = content + else: + # Extract thinking into reasoning_content, keep text in content + thinking_parts = [] + text_parts = [] + for p in content: + if p["type"] == "thinking": + thinking_parts.append(p["thinking"]) + elif p["type"] == "text": + text_parts.append(p["text"]) + + result["content"] = "".join(text_parts) + if thinking_parts: + result["reasoning_content"] = "".join(thinking_parts) + + # Handle tool_calls + if "tool_calls" in message and message["tool_calls"]: + result["tool_calls"] = [ + { + "type": "function", + "id": tc.id, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in message["tool_calls"] + ] + + # Handle tool response fields + if message["role"] == "tool": + if "tool_call_id" in message: + result["tool_call_id"] = message["tool_call_id"] + if "name" in message: + result["name"] = message["name"] + + return result + + def create_conversation_prefix_with_tools( + self, tools: list[ToolSpec], system_prompt: str = "" + ) -> list[Message]: + """Create system messages with Kimi K2 tool specifications. + + Per the HuggingFace chat template, Kimi K2 places the tool_declare message + BEFORE the regular system message. The tool_declare payload expects the + OpenAI-style tool schema ({"type":"function","function":{...}}). + If no system_prompt is provided, uses the default system prompt to match + HuggingFace chat template behavior. + + Reference: https://huggingface.co/moonshotai/Kimi-K2-Thinking/blob/main/chat_template.jinja + """ + messages: list[Message] = [] + + # Tool declaration message comes first (per HF chat template) + if tools: + tools_payload = [{"type": "function", "function": tool} for tool in tools] + # Use sort_keys=True since Kimi K2 sorts keys alphabetically with its own custom apply_chat_template function + tools_json = json.dumps( + tools_payload, separators=(",", ":"), sort_keys=True + ) + messages.append(Message(role="tool_declare", content=tools_json)) + + # Regular system message second (use default if none provided) + actual_system_prompt = ( + system_prompt if system_prompt else self.DEFAULT_SYSTEM_PROMPT + ) + messages.append(Message(role="system", content=actual_system_prompt)) + + return messages diff --git a/src/art/tinker/cookbook_v/renderers/llama3.py b/src/art/tinker/cookbook_v/renderers/llama3.py new file mode 100644 index 000000000..0e6f76198 --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/llama3.py @@ -0,0 +1,72 @@ +"""Renderer for Llama 3 chat format.""" + +import tinker + +from .base import ( + Message, + RenderContext, + RenderedMessage, + Renderer, + ensure_text, + parse_response_for_stop_token, +) + + +class Llama3Renderer(Renderer): + """Renderer for Llama 3 Instruct models. + + Format:: + + <|begin_of_text|><|start_header_id|>system<|end_header_id|> + + You are a helpful AI assistant<|eot_id|><|start_header_id|>user<|end_header_id|> + + What can you help me with?<|eot_id|><|start_header_id|>assistant<|end_header_id|> + + Note: We intentionally differ from HF's stock Llama template: + + - HF prepends "Cutting Knowledge Date..." to system messages; we don't + (add manually if needed) + + Tool calling is NOT supported for Llama 3. The Llama 3 tool calling format + uses bare JSON without delimiters, making it impossible to reliably distinguish + tool calls from regular JSON content in model responses. Use a different model + or develop your own renderer if you need tool calling. + """ + + @property + def has_extension_property(self) -> bool: + """Llama3 satisfies the extension property - no content is stripped from history.""" + return True + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + role = message["role"] + header_str = f"<|start_header_id|>{role}<|end_header_id|>\n\n" + output_str = ensure_text(message["content"]) + "<|eot_id|>" + + header = tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(header_str, add_special_tokens=False) + ) + output: list[tinker.ModelInputChunk] = [ + tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(output_str, add_special_tokens=False) + ) + ] + return RenderedMessage(header=header, output=output) + + @property + def _bos_tokens(self) -> list[int]: + return self.tokenizer.encode("<|begin_of_text|>", add_special_tokens=False) + + @property + def _end_message_token(self) -> int: + (token,) = self.tokenizer.encode("<|eot_id|>", add_special_tokens=False) + return token + + def get_stop_sequences(self) -> list[int]: + return [self._end_message_token] + + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + return parse_response_for_stop_token( + response, self.tokenizer, self._end_message_token + ) diff --git a/src/art/tinker/cookbook_v/renderers/qwen3.py b/src/art/tinker/cookbook_v/renderers/qwen3.py new file mode 100644 index 000000000..0e9479046 --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/qwen3.py @@ -0,0 +1,552 @@ +""" +Qwen3 family renderers - text and vision-language models. + +Includes: +- Qwen3Renderer: Base Qwen3 with thinking enabled +- Qwen3DisableThinkingRenderer: Qwen3 with thinking disabled +- Qwen3InstructRenderer: Qwen3 instruct 2507 models (no tag) +- Qwen3VLRenderer: Vision-language Qwen3 with thinking +- Qwen3VLInstructRenderer: Vision-language instruct models +""" + +import json +from typing import cast + +import tinker + +from ..image_processing_utils import ImageProcessor +from ..tokenizer_utils import Tokenizer +from .base import ( + ImagePart, + ImageProcessorProtocol, + Message, + RenderContext, + RenderedMessage, + Renderer, + TextPart, + ToolSpec, + UnparsedToolCall, + _tool_call_payload, + image_to_chunk, + parse_content_blocks, + parse_response_for_stop_token, + remove_thinking, +) + + +def _merge_consecutive_text_parts( + chunks: list[ImagePart | TextPart], +) -> list[ImagePart | TextPart]: + """Merge consecutive TextParts into single parts. + + This ensures text is tokenized as a single string, matching HuggingFace's + apply_chat_template behavior which tokenizes the full rendered string at once. + Without merging, tokenization boundaries between chunks can produce different + token sequences (though they decode to identical strings). + """ + if not chunks: + return chunks + + merged: list[ImagePart | TextPart] = [chunks[0]] + for chunk in chunks[1:]: + if chunk["type"] == "text" and merged[-1]["type"] == "text": + merged[-1] = TextPart(type="text", text=merged[-1]["text"] + chunk["text"]) + else: + merged.append(chunk) + return merged + + +class Qwen3Renderer(Renderer): + """ + Renderer for Qwen3 models with thinking enabled. + + This renderer is designed to match HuggingFace's Qwen3 chat template behavior + (with enable_thinking=True, which is the default). This ensures compatibility + with the OpenAI-compatible /chat/completions endpoint, which uses HF templates. + + Reference: https://huggingface.co/Qwen/Qwen3-8B/blob/main/tokenizer_config.json + + Format: + <|im_start|>system + You are Qwen, created by Alibaba Cloud.<|im_end|> + <|im_start|>user + What can you help me with?<|im_end|> + <|im_start|>assistant + + [reasoning content] + + I can help you with...<|im_end|> + + The default strip_thinking_from_history=True matches HF behavior where thinking + blocks are stripped from historical assistant messages in multi-turn conversations. + Use strip_thinking_from_history=False for multi-turn RL to get the extension property. + """ + + def __init__(self, tokenizer: Tokenizer, strip_thinking_from_history: bool = True): + """ + Args: + tokenizer: The tokenizer to use for encoding. + strip_thinking_from_history: When True (default), strips ... blocks + from assistant messages in multi-turn history. This matches HuggingFace's + Qwen3 chat template behavior. Set to False to preserve thinking in history + (useful for multi-turn RL where you need the extension property). + + Note: When strip_thinking_from_history=True, this renderer produces identical + tokens to HuggingFace's apply_chat_template with enable_thinking=True. + + See /rl/sequence-extension in the docs for details on how strip_thinking_from_history + affects multi-turn RL compute efficiency. + """ + super().__init__(tokenizer) + self.strip_thinking_from_history = strip_thinking_from_history + + @property + def has_extension_property(self) -> bool: + """Extension property depends on strip_thinking_from_history setting. + + When strip_thinking_from_history=False, thinking blocks are preserved in + history, so each successive observation is a prefix extension of the previous. + + When strip_thinking_from_history=True (default), thinking blocks are stripped + from historical messages, breaking the extension property. + """ + return not self.strip_thinking_from_history + + def _get_qwen_role_for_message(self, message: Message) -> str: + """Get the role to use for rendering a message in Qwen format. + + Per HuggingFace Qwen3 chat template, tool messages are rendered with role "user". + """ + role = message["role"] + if role == "tool": + return "user" + return role + + def _wrap_qwen_tool_response(self, content: str) -> str: + """Wrap tool response content in Qwen's tags.""" + return f"\n{content}\n" + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + maybe_newline = "\n" if ctx.idx > 0 else "" + + role = self._get_qwen_role_for_message(message) + header_str = f"{maybe_newline}<|im_start|>{role}\n" + + content = message["content"] + + if isinstance(content, list): + # Structured content - handle with list operations + parts = content + if ( + self.strip_thinking_from_history + and message["role"] == "assistant" + and not ctx.is_last + ): + # Remove thinking parts for historical messages + parts = remove_thinking(parts) + # Render parts in order, preserving interleaved thinking/text structure. + # No separator needed - whitespace is preserved in TextPart for roundtrip identity. + rendered_parts = [] + for p in parts: + if p["type"] == "thinking": + rendered_parts.append(f"{p['thinking']}") + elif p["type"] == "text": + rendered_parts.append(p["text"]) + # ToolCallPart handled via message's tool_calls field + output_content = "".join(rendered_parts) + else: + # String content - pass through as-is. + # Note: strip_thinking_from_history only works with list-based content. + # For stripping to work on historical messages, use structured content + # with ThinkingPart separated from text (as returned by parse_response). + output_content = content + + # Handle tool response wrapping + if message["role"] == "tool": + output_content = self._wrap_qwen_tool_response(output_content) + + # Handle tool_calls field + if "tool_calls" in message: + # Add leading newline to match HF template behavior + output_content += "\n" + "\n".join( + [ + f"\n{json.dumps(_tool_call_payload(tool_call))}\n" + for tool_call in message["tool_calls"] + ] + ) + output_content += "<|im_end|>" + header = tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(header_str, add_special_tokens=False) + ) + output: list[tinker.ModelInputChunk] = [ + tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(output_content, add_special_tokens=False) + ) + ] + return RenderedMessage(header=header, output=output) + + @property + def _end_message_token(self) -> int: + tokens = self.tokenizer.encode("<|im_end|>", add_special_tokens=False) + assert len(tokens) == 1, ( + f"Expected single token for <|im_end|>, got {len(tokens)}" + ) + return tokens[0] + + def get_stop_sequences(self) -> list[int]: + return [self._end_message_token] + + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + assistant_message, parse_success = parse_response_for_stop_token( + response, self.tokenizer, self._end_message_token + ) + if not parse_success: + return assistant_message, False + + # Parse ... and ... blocks together + # to preserve ordering. Tool calls use Qwen's format: + # - https://qwen.readthedocs.io/en/latest/getting_started/concepts.html#tool-calling + # - https://github.com/QwenLM/Qwen-Agent/blob/main/qwen_agent/llm/fncall_prompts/nous_fncall_prompt.py#L279-L282 + assert isinstance(assistant_message["content"], str) + content = assistant_message["content"] + + # Parse all blocks in one pass, preserving order + parts = parse_content_blocks(content) + + if parts is not None: + assistant_message["content"] = parts + + # Also populate tool_calls and unparsed_tool_calls fields for backward compatibility + # TODO: Consider moving away from TypedDicts for part types - current approach + # relies on runtime type checking (p["type"] == "tool_call") without static guarantees. + tool_calls = [p["tool_call"] for p in parts if p["type"] == "tool_call"] + if tool_calls: + assistant_message["tool_calls"] = tool_calls + + unparsed = [ + UnparsedToolCall(raw_text=p["raw_text"], error=p["error"]) + for p in parts + if p["type"] == "unparsed_tool_call" + ] + if unparsed: + assistant_message["unparsed_tool_calls"] = unparsed + else: + # No special blocks found - keep as string for backward compatibility + assistant_message["content"] = content + + return assistant_message, True + + def to_openai_message(self, message: Message) -> dict: + """Convert a Message to OpenAI API format with reasoning_content for thinking. + + Qwen3's HF template accepts either: + - message['reasoning_content'] as a separate field + - ... embedded in content + + We use reasoning_content for cleaner separation. + """ + result: dict = {"role": message["role"]} + + content = message["content"] + if isinstance(content, str): + result["content"] = content + else: + # Extract thinking into reasoning_content, keep text in content + thinking_parts = [] + text_parts = [] + for p in content: + if p["type"] == "thinking": + thinking_parts.append(p["thinking"]) + elif p["type"] == "text": + text_parts.append(p["text"]) + # Skip tool_call/unparsed_tool_call - handled via tool_calls field + + result["content"] = "".join(text_parts) + if thinking_parts: + result["reasoning_content"] = "".join(thinking_parts) + + # Handle tool_calls + if "tool_calls" in message and message["tool_calls"]: + result["tool_calls"] = [ + { + "type": "function", + "id": tc.id, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in message["tool_calls"] + ] + + # Handle tool response fields + if message["role"] == "tool": + if "tool_call_id" in message: + result["tool_call_id"] = message["tool_call_id"] + if "name" in message: + result["name"] = message["name"] + + return result + + def create_conversation_prefix_with_tools( + self, tools: list[ToolSpec], system_prompt: str = "" + ) -> list[Message]: + """Create system message with Qwen3 tool specifications. + + Qwen3 uses XML `` tags containing JSON tool definitions in OpenAI format, + appended to the system message content. + + References: + - https://qwen.readthedocs.io/en/latest/getting_started/concepts.html#tool-calling + - https://huggingface.co/Qwen/Qwen3-8B/blob/main/tokenizer_config.json + """ + tools_text = "" + if tools: + # Each tool is wrapped in {"type": "function", "function": {...}} per OpenAI format + # Use separators=(", ", ": ") to match HF's tojson filter output + tool_lines = "\n".join( + json.dumps( + {"type": "function", "function": tool}, separators=(", ", ": ") + ) + for tool in tools + ) + tools_text = f"""# Tools + +You may call one or more functions to assist with the user query. + +You are provided with function signatures within XML tags: + +{tool_lines} + + +For each function call, return a json object with function name and arguments within XML tags: + +{{"name": , "arguments": }} +""" + + # Add separator between system prompt and tools if system prompt exists + if system_prompt: + content = system_prompt + "\n\n" + tools_text + else: + content = tools_text + + return [Message(role="system", content=content)] + + +class Qwen3DisableThinkingRenderer(Qwen3Renderer): + """ + Renderer for Qwen3 hybrid models with thinking disabled. + + This renderer matches HuggingFace's Qwen3 chat template behavior with + enable_thinking=False (or thinking=False for apply_chat_template). It adds + empty \\n\\n\\n\\n blocks to assistant messages, signaling to + the model that it should respond directly without extended reasoning. + + Use this renderer when you want to train or sample from Qwen3 models in + "non-thinking" mode while maintaining compatibility with the OpenAI endpoint. + """ + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + # Get the base rendered message + rendered = super().render_message(message, ctx) + + # Add empty thinking block to header for last assistant message + # This goes in header (weight=0) so observation matches generation prompt. + if message["role"] == "assistant" and ctx.is_last: + content = message.get("content", "") + if isinstance(content, str): + has_think = "" in content + else: + has_think = any(p["type"] == "thinking" for p in content) + + if not has_think: + empty_think_tokens = self.tokenizer.encode( + "\n\n\n\n", add_special_tokens=False + ) + old_header_tokens = ( + list(rendered.header.tokens) if rendered.header else [] + ) + new_header = tinker.EncodedTextChunk( + tokens=old_header_tokens + empty_think_tokens + ) + rendered = RenderedMessage( + header=new_header, + output=rendered.output, + stop_overlap=rendered.stop_overlap, + ) + + return rendered + + +class Qwen3InstructRenderer(Qwen3Renderer): + """ + Renderer for Qwen3 instruct 2507 models. Unlike the earlier Qwen3 models, these models do not + use the tag at all. + + Inherits from Qwen3Renderer. ThinkingPart in content is still handled (rendered as + ...) in case the conversation includes thinking. + """ + + @property + def has_extension_property(self) -> bool: + """Qwen3 Instruct always satisfies extension - no thinking to strip from history.""" + # NOTE: If callers include ThinkingPart in history, Qwen3Renderer may still strip it + # when strip_thinking_from_history=True, so extension can break. + # This is a rare case that'll only occur if we prompt the instruct model + # with a conversation from a different model. + return True + + +class Qwen3VLRenderer(Qwen3Renderer): + """ + Vision-language renderer for Qwen3-VL models with thinking support. + + Format like this: + <|im_start|>system + You are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|> + <|im_start|>user + What can you help me with?<|im_end|> + <|im_start|>assistant + + + + I can help you with...<|im_end|> + + The default strip_thinking_from_history=True matches the non-VL Qwen3Renderer behavior. + """ + + image_processor: ImageProcessor + + def __init__( + self, + tokenizer: Tokenizer, + image_processor: ImageProcessor, + strip_thinking_from_history: bool = True, + merge_text_chunks: bool = True, + ): + self.tokenizer = tokenizer + self.image_processor = image_processor + self.strip_thinking_from_history = strip_thinking_from_history + self.merge_text_chunks = merge_text_chunks + + def _preprocess_message_parts( + self, message: Message, *, strip_thinking: bool = False + ) -> list[ImagePart | TextPart]: + """Convert message content to list form for VL rendering. + + Converts ThinkingPart to ... text (or strips if strip_thinking=True). + Wraps images with vision tokens. ToolCallPart is not supported in VL content list + (use message's tool_calls field instead). + """ + content = message["content"] + if isinstance(content, str): + base_parts: list[ImagePart | TextPart] = [ + TextPart(type="text", text=content) + ] + else: + # Convert structured content to ImagePart/TextPart list + base_parts: list[ImagePart | TextPart] = [] + for p in content: + if p["type"] == "text": + base_parts.append(cast(TextPart, p)) + elif p["type"] == "image": + base_parts.append(cast(ImagePart, p)) + elif p["type"] == "thinking": + if not strip_thinking: + # Render thinking as ... text + base_parts.append( + TextPart( + type="text", text=f"{p['thinking']}" + ) + ) + # else: strip thinking by not appending + # ToolCallPart and UnparsedToolCallPart are handled via message's tool_calls field + + # Wrap images with vision tokens + chunks: list[ImagePart | TextPart] = [] + for content_chunk in base_parts: + if content_chunk["type"] == "image": + chunks.append(TextPart(type="text", text="<|vision_start|>")) + + chunks.append(content_chunk) + + if content_chunk["type"] == "image": + chunks.append(TextPart(type="text", text="<|vision_end|>")) + + return chunks + + def _wrap_qwen_tool_response_chunks( + self, chunks: list[ImagePart | TextPart] + ) -> list[ImagePart | TextPart]: + """Wrap content chunks in Qwen's tags for multimodal messages.""" + return ( + [TextPart(type="text", text="\n")] + + chunks + + [TextPart(type="text", text="\n")] + ) + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + maybe_newline = "\n" if ctx.idx > 0 else "" + + role = self._get_qwen_role_for_message(message) + header_str = f"{maybe_newline}<|im_start|>{role}\n" + + # Strip thinking from history for non-last assistant messages (matching non-VL behavior) + strip_thinking = ( + self.strip_thinking_from_history + and message["role"] == "assistant" + and not ctx.is_last + ) + output_chunks = self._preprocess_message_parts( + message, strip_thinking=strip_thinking + ) + + # Handle tool response wrapping + if message["role"] == "tool": + output_chunks = self._wrap_qwen_tool_response_chunks(output_chunks) + + if "tool_calls" in message: + # Add leading newline to match HF template behavior + output_chunks += [ + TextPart( + type="text", + text="\n" + + "\n".join( + [ + f"\n{json.dumps(_tool_call_payload(tool_call))}\n" + for tool_call in message["tool_calls"] + ] + ), + ) + ] + output_chunks += [TextPart(type="text", text="<|im_end|>")] + + if self.merge_text_chunks: + output_chunks = _merge_consecutive_text_parts(output_chunks) + + output_chunks_encoded: list[tinker.ModelInputChunk] = [ + image_to_chunk( + image_or_str=x["image"], + image_processor=cast(ImageProcessorProtocol, self.image_processor), + ) + if x["type"] == "image" + else tinker.EncodedTextChunk( + tokens=self.tokenizer.encode(x["text"], add_special_tokens=False) + ) + for x in output_chunks + ] + + header = tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(header_str, add_special_tokens=False) + ) + return RenderedMessage(header=header, output=output_chunks_encoded) + + +class Qwen3VLInstructRenderer(Qwen3VLRenderer): + """ + Renderer for Qwen3-VL Instruct models. + + Unlike the Qwen3-VL Thinking models, The Qwen3-VL Instruct models do not use the tag. + """ + + pass diff --git a/src/art/tinker/cookbook_v/renderers/role_colon.py b/src/art/tinker/cookbook_v/renderers/role_colon.py new file mode 100644 index 000000000..8f384f8d6 --- /dev/null +++ b/src/art/tinker/cookbook_v/renderers/role_colon.py @@ -0,0 +1,85 @@ +"""Simple role:content format renderer.""" + +import tinker + +from .base import ( + Message, + RenderContext, + RenderedMessage, + Renderer, + ToolSpec, + ensure_text, +) + + +class RoleColonRenderer(Renderer): + """Simple role:content format renderer. + + Format:: + + User: + + Assistant: + + This is basically the format used by DeepSeek R1-Zero, and similar to the format + used by Anthropic, except that they use "Human" instead of "User". + """ + + @property + def has_extension_property(self) -> bool: + """RoleColon satisfies the extension property - no content is stripped from history.""" + return True + + def render_message(self, message: Message, ctx: RenderContext) -> RenderedMessage: + header_str = message["role"].capitalize() + ":" + output_str = " " + ensure_text(message["content"]) + "\n\n" + # stop_overlap completes the stop sequence "\n\nUser:" for assistant messages. + # For non-assistant messages, we use a placeholder that's never actually concatenated. + stop_overlap_str = "User:" if message["role"] == "assistant" else "" + header = tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(header_str, add_special_tokens=False) + ) + output: list[tinker.ModelInputChunk] = [ + tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(output_str, add_special_tokens=False) + ) + ] + stop_overlap = tinker.types.EncodedTextChunk( + tokens=self.tokenizer.encode(stop_overlap_str, add_special_tokens=False) + ) + return RenderedMessage(header=header, output=output, stop_overlap=stop_overlap) + + def get_stop_sequences(self) -> list[str]: + return ["\n\nUser:"] + + def parse_response(self, response: list[int]) -> tuple[Message, bool]: + import logging + + logger = logging.getLogger(__name__) + + str_response = self.tokenizer.decode(response) + splitted = str_response.split("\n\nUser:") + if len(splitted) == 1: + logger.debug(f"Response is not a valid assistant response: {str_response}") + return Message(role="assistant", content=str_response.strip()), False + elif len(splitted) == 2: + before, _after = splitted + return Message(role="assistant", content=before.strip()), True + else: + raise ValueError( + f"When parsing response, expected to split into 1 or 2 pieces using stop tokens, but got {len(splitted)}. " + "You probably are using the wrong stop tokens when sampling" + ) + + @property + def _bos_tokens(self) -> list[int]: + bos_token_str = self.tokenizer.bos_token + if bos_token_str is None: + return [] + assert isinstance(bos_token_str, str) + return self.tokenizer.encode(bos_token_str, add_special_tokens=False) + + def create_conversation_prefix_with_tools( + self, tools: list[ToolSpec], system_prompt: str = "" + ) -> list[Message]: + raise NotImplementedError("RoleColonRenderer does not support tool calling") diff --git a/src/art/tinker/cookbook_v/tokenizer_utils.py b/src/art/tinker/cookbook_v/tokenizer_utils.py new file mode 100644 index 000000000..53d91fef2 --- /dev/null +++ b/src/art/tinker/cookbook_v/tokenizer_utils.py @@ -0,0 +1,38 @@ +""" +Utilities for working with tokenizers. Create new types to avoid needing to import AutoTokenizer and PreTrainedTokenizer. + + +Avoid importing AutoTokenizer and PreTrainedTokenizer until runtime, because they're slow imports. +""" + +from __future__ import annotations + +from functools import cache +from typing import TYPE_CHECKING, Any, TypeAlias + +if TYPE_CHECKING: + # this import takes a few seconds, so avoid it on the module import when possible + from transformers.tokenization_utils import PreTrainedTokenizer + + Tokenizer: TypeAlias = PreTrainedTokenizer +else: + # make it importable from other files as a type in runtime + Tokenizer: TypeAlias = Any + + +@cache +def get_tokenizer(model_name: str) -> Tokenizer: + from transformers.models.auto.tokenization_auto import AutoTokenizer + + model_name = model_name.split(":")[0] + + # Avoid gating of Llama 3 models: + if model_name.startswith("meta-llama/Llama-3"): + model_name = "thinkingmachineslabinc/meta-llama-3-instruct-tokenizer" + + kwargs: dict[str, Any] = {} + if model_name == "moonshotai/Kimi-K2-Thinking": + kwargs["trust_remote_code"] = True + kwargs["revision"] = "612681931a8c906ddb349f8ad0f582cb552189cd" + + return AutoTokenizer.from_pretrained(model_name, use_fast=True, **kwargs) diff --git a/src/art/tinker/cookbook_v/utils/__init__.py b/src/art/tinker/cookbook_v/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/art/tinker/cookbook_v/utils/misc_utils.py b/src/art/tinker/cookbook_v/utils/misc_utils.py new file mode 100644 index 000000000..8e8120f64 --- /dev/null +++ b/src/art/tinker/cookbook_v/utils/misc_utils.py @@ -0,0 +1,96 @@ +""" +Small utilities requiring only basic python libraries. +""" + +from contextlib import contextmanager +import importlib +import logging +import time +from typing import Any, Sequence, TypeVar, cast + +import numpy as np + +logger = logging.getLogger(__name__) + +T = TypeVar("T") + + +@contextmanager +def timed(key: str, metrics: dict[str, Any]): + logger.info(f"Starting {key}") + tstart = time.time() + yield + logger.info(f"{key} took {time.time() - tstart:.2f} seconds") + metrics[f"time/{key}"] = time.time() - tstart + + +safezip = cast(type[zip], lambda *args, **kwargs: zip(*args, **kwargs, strict=True)) + + +def dict_mean(list_of_dicts: list[dict[str, float | int]]) -> dict[str, float]: + key2values = {} + for d in list_of_dicts: + for k, v in d.items(): + key2values.setdefault(k, []).append(v) + return {k: float(np.mean(values)) for k, values in key2values.items()} + + +def all_same(xs: list[Any]) -> bool: + return all(x == xs[0] for x in xs) + + +def lookup_func(path_to_func: str, default_module: str | None = None): + """ + path.to.module:func_name or func_name (assumes default_module) + """ + colon_count = path_to_func.count(":") + if colon_count == 0 and default_module is not None: + module_name = default_module + func_name = path_to_func + elif colon_count == 1: + module_name, func_name = path_to_func.rsplit(":", 1) + else: + raise ValueError(f"Invalid path: {path_to_func}") + module = importlib.import_module(module_name) + return getattr(module, func_name) + + +def split_list(lst: Sequence[T], num_splits: int) -> list[list[T]]: + """ + Split a sequence into a list of lists, where the sizes are as equal as possible, + and the long and short lists are as uniformly distributed as possible. + + Args: + lst: The sequence to split + num_splits: Number of sublists to create + + Returns: + A list of sublists with sizes differing by at most 1 + + Raises: + ValueError: If num_splits > len(lst) or num_splits <= 0 + + Examples: + >>> split_list([1, 2, 3, 4, 5], 2) + [[1, 2, 3], [4, 5]] + >>> split_list([1, 2, 3, 4, 5], 3) + [[1, 2], [3, 4], [5]] + """ + if num_splits <= 0: + raise ValueError(f"num_splits must be positive, got {num_splits}") + if num_splits > len(lst): + raise ValueError( + f"Cannot split list of length {len(lst)} into {num_splits} parts" + ) + + edges = np.linspace(0, len(lst), num_splits + 1).astype(int) + return [list(lst[edges[i] : edges[i + 1]]) for i in range(num_splits)] + + +def concat_lists(list_of_lists: list[list[Any]]) -> list[Any]: + return [item for sublist in list_of_lists for item in sublist] + + +def not_none(x: T | None) -> T: + assert x is not None, f"{x=} must not be None" + return x diff --git a/src/art/tinker/server.py b/src/art/tinker/server.py index 6be096ad4..3a8fddff8 100644 --- a/src/art/tinker/server.py +++ b/src/art/tinker/server.py @@ -1,7 +1,6 @@ import asyncio from dataclasses import dataclass, field import os -import re import socket import time import uuid @@ -18,20 +17,9 @@ from openai.types.chat.completion_create_params import CompletionCreateParams from openai.types.completion_usage import CompletionUsage import tinker -from tinker_cookbook import renderers import uvicorn -# Patch Tinker's Qwen3InstructRenderer which mistakenly expects "args" instead of "arguments" in tool calls. -_parse_tool_call = renderers.Qwen3InstructRenderer._parse_tool_call - - -def _patched_parse_tool_call( - self, tool_call_str: str -) -> list[renderers.ToolCall] | None: - return _parse_tool_call(self, tool_call_str.replace('"arguments": ', '"args": ')) - - -renderers.Qwen3InstructRenderer._parse_tool_call = _patched_parse_tool_call # ty:ignore[invalid-assignment] +from art.tinker.cookbook_v import renderers @dataclass @@ -135,37 +123,30 @@ async def chat_completions( "Tokens and logprobs must have the same length" ) message, _ = renderer.parse_response(sequence.tokens) + openai_message = renderer.to_openai_message(message) + tool_calls = ( + [ + ChatCompletionMessageFunctionToolCall( + type="function", + id=tool_call.get("id") or "", + function=Function( + name=tool_call["function"]["name"], + arguments=tool_call["function"]["arguments"], + ), + ) + for tool_call in openai_message.get("tool_calls", []) + ] + if openai_message.get("tool_calls") + else None + ) choices.append( Choice( finish_reason=sequence.stop_reason, index=i, message=ChatCompletionMessage( - # the qwen renderer does not strip tool calls - # from the content, so we remove them here. - content=( - re.sub( - r"(?:\n?.*?)+\s*$", - "", - message["content"], - flags=re.DOTALL, - ) - if message["content"] - else message["content"] - ) - or None, + content=openai_message.get("content") or None, role="assistant", - tool_calls=[ - ChatCompletionMessageFunctionToolCall( - type="function", - id=tool_call.id or "", - function=Function( - name=tool_call.function.name, - arguments=tool_call.function.arguments, - ), - ) - for tool_call in message.get("tool_calls", []) - ] - or None, + tool_calls=tool_calls, ), logprobs=ChoiceLogprobs( content=[ diff --git a/src/art/tinker/service.py b/src/art/tinker/service.py index 0bd90a0c8..702bdce77 100644 --- a/src/art/tinker/service.py +++ b/src/art/tinker/service.py @@ -10,10 +10,11 @@ import tinker from tinker.lib.public_interfaces.rest_client import RestClient as TinkerRestClient -from tinker_cookbook import renderers, tokenizer_utils import torch import yaml +from art.tinker.cookbook_v import renderers, tokenizer_utils + from .. import dev, types from ..loss import loss_fn, shift_tensor from ..preprocessing.inputs import TrainInputs, create_train_inputs diff --git a/src/art/tinker_native/backend.py b/src/art/tinker_native/backend.py index 291621b6c..7cb568c28 100644 --- a/src/art/tinker_native/backend.py +++ b/src/art/tinker_native/backend.py @@ -23,9 +23,10 @@ from openai.types.chat.completion_create_params import CompletionCreateParams from openai.types.completion_usage import CompletionUsage import tinker -from tinker_cookbook import renderers, tokenizer_utils import uvicorn +from art.tinker.cookbook_v import renderers, tokenizer_utils + from .. import dev from ..backend import Backend from ..model import Model, TrainableModel @@ -369,25 +370,26 @@ async def chat_completions(body: CompletionCreateParams) -> ChatCompletion: parsed_message = parse_completion_to_openai_message( list(sequence.tokens), state.renderer ) + content = parsed_message.get("content") tool_calls: list[ChatCompletionMessageToolCallUnion] | None = None if parsed_message.get("tool_calls"): tool_calls = [ ChatCompletionMessageFunctionToolCall( type="function", - id=tool_call["id"], + id=tool_call.get("id") or f"call_{idx}", function=Function( name=tool_call["function"]["name"], arguments=tool_call["function"]["arguments"], ), ) - for tool_call in parsed_message["tool_calls"] + for idx, tool_call in enumerate(parsed_message["tool_calls"]) ] choices.append( Choice( finish_reason=sequence.stop_reason, index=i, message=ChatCompletionMessage( - content=parsed_message.get("content", ""), + content=content or None, role="assistant", tool_calls=tool_calls, ), diff --git a/src/art/tinker_native/data.py b/src/art/tinker_native/data.py index 994d6e39a..c4386d5fa 100644 --- a/src/art/tinker_native/data.py +++ b/src/art/tinker_native/data.py @@ -1,63 +1,22 @@ from __future__ import annotations -import json -import re from typing import Any, Iterable, cast from openai.types.chat.chat_completion import Choice import tinker -from tinker_cookbook import renderers import torch +from art.tinker.cookbook_v import renderers + from ..trajectories import History, Trajectory, TrajectoryGroup, get_messages from ..types import MessagesAndChoices -def _create_conversation_prefix_with_tools_fallback( - tools: list[dict[str, Any]], system_prompt: str = "" -) -> list[dict[str, Any]]: - """Fallback implementation for create_conversation_prefix_with_tools. - - Used when the installed tinker_cookbook version doesn't have this method. - Implements the Qwen3 tool format. - """ - tools_text = "" - if tools: - # Each tool is wrapped in {"type": "function", "function": {...}} per OpenAI format - tool_lines = "\n".join( - json.dumps({"type": "function", "function": tool}, separators=(", ", ": ")) - for tool in tools - ) - tools_text = f"""# Tools - -You may call one or more functions to assist with the user query. - -You are provided with function signatures within XML tags: - -{tool_lines} - - -For each function call, return a json object with function name and arguments within XML tags: - -{{"name": , "arguments": }} -""" - - # Add separator between system prompt and tools if system prompt exists - if system_prompt: - content = system_prompt + "\n\n" + tools_text - else: - content = tools_text - - return [{"role": "system", "content": content}] - - def create_conversation_prefix_with_tools( renderer: Any, tools: list[dict[str, Any]], system_prompt: str = "" ) -> list[dict[str, Any]]: - """Create conversation prefix with tools, using renderer method or fallback.""" - if hasattr(renderer, "create_conversation_prefix_with_tools"): - return renderer.create_conversation_prefix_with_tools(tools, system_prompt) - return _create_conversation_prefix_with_tools_fallback(tools, system_prompt) + """Create conversation prefix with tools using the renderer implementation.""" + return renderer.create_conversation_prefix_with_tools(tools, system_prompt) def compute_advantages( @@ -146,75 +105,12 @@ def convert_openai_messages_to_renderer_format( return converted -def _extract_gpt_oss_tool_calls(content: str) -> tuple[str, list[dict[str, Any]]]: - tool_calls = [] - cleaned_content = content - - pattern = r"(\{[^}]*\})(?:<\|call\|>)?" - - matches = list(re.finditer(pattern, content)) - for i, match in enumerate(matches): - func_name = match.group(1) - args_json = match.group(2) - - tool_calls.append( - { - "id": f"call_{i}", - "type": "function", - "function": { - "name": func_name, - "arguments": args_json, - }, - } - ) - - cleaned_content = cleaned_content.replace(match.group(0), "").strip() - - return cleaned_content, tool_calls - - def parse_completion_to_openai_message( completion_tokens: list[int], renderer: Any, ) -> dict[str, Any]: message, _ = renderer.parse_response(completion_tokens) - - result: dict[str, Any] = {"role": "assistant"} - - content = message.get("content", "") - if isinstance(content, str): - result["content"] = content - else: - text_parts = [] - for part in content: - if part["type"] == "text": - text_parts.append(part["text"]) - elif part["type"] == "thinking": - text_parts.append(part["thinking"]) - result["content"] = "".join(text_parts) - - if "tool_calls" in message and message["tool_calls"]: - result["tool_calls"] = [ - { - "id": tool_call.id or f"call_{i}", - "type": "function", - "function": { - "name": tool_call.function.name, - "arguments": tool_call.function.arguments, - }, - } - for i, tool_call in enumerate(message["tool_calls"]) - ] - else: - if result.get("content") and " bool: diff --git a/tests/integration/test_multi_checkpoint_training.py b/tests/integration/test_multi_checkpoint_training.py index 5d74da0e4..8c07ca00c 100644 --- a/tests/integration/test_multi_checkpoint_training.py +++ b/tests/integration/test_multi_checkpoint_training.py @@ -23,6 +23,7 @@ import art from art.local import LocalBackend +from art.tinker import TinkerBackend from art.types import LocalTrainResult, ServerlessTrainResult, TrainResult # Use a small model for fast testing @@ -116,7 +117,7 @@ async def test_tinker_backend(): """Test multi-checkpoint inference with TinkerBackend.""" model_name = f"test-multi-ckpt-tinker-{uuid.uuid4().hex[:8]}" with tempfile.TemporaryDirectory() as tmpdir: - backend = art.TinkerBackend(path=tmpdir) + backend = TinkerBackend(path=tmpdir) model = art.TrainableModel( name=model_name, project="integration-tests", diff --git a/tests/integration/test_tinker_native_backend.py b/tests/integration/test_tinker_native_backend.py index bbaa72729..5812ea1fb 100644 --- a/tests/integration/test_tinker_native_backend.py +++ b/tests/integration/test_tinker_native_backend.py @@ -8,6 +8,7 @@ import pytest import art +from art.tinker_native import TinkerNativeBackend DEFAULT_BASE_MODEL = "Qwen/Qwen3-30B-A3B-Instruct-2507" @@ -57,8 +58,8 @@ async def simple_rollout( async def test_tinker_native_backend(): model_name = f"test-tinker-native-{uuid.uuid4().hex[:8]}" with tempfile.TemporaryDirectory() as tmpdir: - backend = art.TinkerNativeBackend(path=tmpdir) # type: ignore[attr-defined] - model = art.TrainableModel( # type: ignore[attr-defined] + backend = TinkerNativeBackend(path=tmpdir) + model = art.TrainableModel( name=model_name, project="integration-tests", base_model=get_base_model(), diff --git a/uv.lock b/uv.lock index fcc458ce5..02787a11c 100644 --- a/uv.lock +++ b/uv.lock @@ -42,42 +42,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f8/bb/be8146c196ad6e4dec78385d91e92591f8a433576c4e04c342a636fcd811/accelerate-1.7.0-py3-none-any.whl", hash = "sha256:cf57165cca28769c6cf2650812371c81b18e05743dfa3c748524b1bb4f2b272f", size = 362095, upload-time = "2025-05-15T10:00:49.914Z" }, ] -[[package]] -name = "aioboto3" -version = "15.5.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiobotocore", extra = ["boto3"] }, - { name = "aiofiles" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a2/01/92e9ab00f36e2899315f49eefcd5b4685fbb19016c7f19a9edf06da80bb0/aioboto3-15.5.0.tar.gz", hash = "sha256:ea8d8787d315594842fbfcf2c4dce3bac2ad61be275bc8584b2ce9a3402a6979", size = 255069, upload-time = "2025-10-30T13:37:16.122Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e5/3e/e8f5b665bca646d43b916763c901e00a07e40f7746c9128bdc912a089424/aioboto3-15.5.0-py3-none-any.whl", hash = "sha256:cc880c4d6a8481dd7e05da89f41c384dbd841454fc1998ae25ca9c39201437a6", size = 35913, upload-time = "2025-10-30T13:37:14.549Z" }, -] - -[[package]] -name = "aiobotocore" -version = "2.25.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "aioitertools" }, - { name = "botocore" }, - { name = "jmespath" }, - { name = "multidict" }, - { name = "python-dateutil" }, - { name = "wrapt" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/62/94/2e4ec48cf1abb89971cb2612d86f979a6240520f0a659b53a43116d344dc/aiobotocore-2.25.1.tar.gz", hash = "sha256:ea9be739bfd7ece8864f072ec99bb9ed5c7e78ebb2b0b15f29781fbe02daedbc", size = 120560, upload-time = "2025-10-28T22:33:21.787Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/95/2a/d275ec4ce5cd0096665043995a7d76f5d0524853c76a3d04656de49f8808/aiobotocore-2.25.1-py3-none-any.whl", hash = "sha256:eb6daebe3cbef5b39a0bb2a97cffbe9c7cb46b2fcc399ad141f369f3c2134b1f", size = 86039, upload-time = "2025-10-28T22:33:19.949Z" }, -] - -[package.optional-dependencies] -boto3 = [ - { name = "boto3" }, -] - [[package]] name = "aiodns" version = "3.6.1" @@ -230,15 +194,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1a/99/84ba7273339d0f3dfa57901b846489d2e5c2cd731470167757f1935fffbd/aiohttp_retry-2.9.1-py3-none-any.whl", hash = "sha256:66d2759d1921838256a05a3f80ad7e724936f083e35be5abb5e16eed6be6dc54", size = 9981, upload-time = "2024-11-06T10:44:52.917Z" }, ] -[[package]] -name = "aioitertools" -version = "0.13.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fd/3c/53c4a17a05fb9ea2313ee1777ff53f5e001aefd5cc85aa2f4c2d982e1e38/aioitertools-0.13.0.tar.gz", hash = "sha256:620bd241acc0bbb9ec819f1ab215866871b4bbd1f73836a55f799200ee86950c", size = 19322, upload-time = "2025-11-06T22:17:07.609Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/10/a1/510b0a7fadc6f43a6ce50152e69dbd86415240835868bb0bd9b5b88b1e06/aioitertools-0.13.0-py3-none-any.whl", hash = "sha256:0be0292b856f08dfac90e31f4739432f4cb6d7520ab9eb73e143f4f2fa5259be", size = 24182, upload-time = "2025-11-06T22:17:06.502Z" }, -] - [[package]] name = "aiosignal" version = "1.4.0" @@ -312,15 +267,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5d/77/073e8ac488f335aec7001952825275582fb8f433737e90f24eeef9d878f6/anthropic-0.71.0-py3-none-any.whl", hash = "sha256:85c5015fcdbdc728390f11b17642a65a4365d03b12b799b18b6cc57e71fdb327", size = 355035, upload-time = "2025-10-16T15:54:38.238Z" }, ] -[[package]] -name = "antlr4-python3-runtime" -version = "4.13.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/33/5f/2cdf6f7aca3b20d3f316e9f505292e1f256a32089bd702034c29ebde6242/antlr4_python3_runtime-4.13.2.tar.gz", hash = "sha256:909b647e1d2fc2b70180ac586df3933e38919c85f98ccc656a96cd3f25ef3916", size = 117467, upload-time = "2024-08-03T19:00:12.757Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/89/03/a851e84fcbb85214dc637b6378121ef9a0dd61b4c65264675d8a5c9b1ae7/antlr4_python3_runtime-4.13.2-py3-none-any.whl", hash = "sha256:fe3835eb8d33daece0e799090eda89719dbccee7aa39ef94eed3818cafa5a7e8", size = 144462, upload-time = "2024-08-03T19:00:11.134Z" }, -] - [[package]] name = "anyio" version = "4.12.0" @@ -621,19 +567,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/46/81/d8c22cd7e5e1c6a7d48e41a1d1d46c92f17dae70a54d9814f746e6027dec/bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9", size = 152930, upload-time = "2022-10-09T15:36:34.635Z" }, ] -[[package]] -name = "beautifulsoup4" -version = "4.14.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "soupsieve" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/c3/b0/1c6a16426d389813b48d95e26898aff79abbde42ad353958ad95cc8c9b21/beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86", size = 627737, upload-time = "2025-11-30T15:08:26.084Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, -] - [[package]] name = "bitsandbytes" version = "0.49.0" @@ -770,21 +703,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b0/5c/dbd00727a3dd165d7e0e8af40e630cd7e45d77b525a3218afaff8a87358e/blake3-1.0.8-cp314-cp314t-win_amd64.whl", hash = "sha256:421b99cdf1ff2d1bf703bc56c454f4b286fce68454dd8711abbcb5a0df90c19a", size = 215133, upload-time = "2025-10-14T06:47:16.069Z" }, ] -[[package]] -name = "blobfile" -version = "3.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "filelock" }, - { name = "lxml" }, - { name = "pycryptodomex" }, - { name = "urllib3" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f0/6d/2e7567da75ddbb24fe979f52284b708da349d67a41042635af36071a5a6b/blobfile-3.1.0.tar.gz", hash = "sha256:d45b6b1fa3b0920732314c23ddbdb4f494ca12f787c2b6eb6bba6faa51382671", size = 77229, upload-time = "2025-09-06T00:36:15.583Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/77/a7/51af11120d75af2828f8eede0b13a4caff650d708ac50e62d000aefe1ffb/blobfile-3.1.0-py3-none-any.whl", hash = "sha256:2b4c5e766ebb7dfa20e4990cf6ec3d2106bdc91d632fb9377f170a234c5a5c6a", size = 75741, upload-time = "2025-09-06T00:36:14.11Z" }, -] - [[package]] name = "boto3" version = "1.40.61" @@ -1109,24 +1027,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] -[[package]] -name = "chess" -version = "1.11.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/93/09/7d04d7581ae3bb8b598017941781bceb7959dd1b13e3ebf7b6a2cd843bc9/chess-1.11.2.tar.gz", hash = "sha256:a8b43e5678fdb3000695bdaa573117ad683761e5ca38e591c4826eba6d25bb39", size = 6131385, upload-time = "2025-02-25T19:10:27.328Z" } - -[[package]] -name = "chz" -version = "0.4.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/3a/6c/09c8ca50c40e18be211f25ad6dcdb81f8110ba2d611cd0375f5fb65fb762/chz-0.4.0.tar.gz", hash = "sha256:5380039e6970a1056c2140288aafa41a33f26d5e4c685117be80f7e260c8d679", size = 82473, upload-time = "2025-11-24T00:55:10.634Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/eb/77789ad6f1807328a61c205881580546af597f60334f1f96fd4f3bb6e929/chz-0.4.0-py3-none-any.whl", hash = "sha256:5db5ffe42f6be38f1c37e1b18f0d5559572ee8a8dc941116e67f1bd5396e2a9b", size = 56277, upload-time = "2025-11-24T00:55:09.381Z" }, -] - [[package]] name = "cint" version = "1.0.0" @@ -2086,15 +1986,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c7/4e/ce75a57ff3aebf6fc1f4e9d508b8e5810618a33d900ad6c19eb30b290b97/fonttools-4.61.1-py3-none-any.whl", hash = "sha256:17d2bf5d541add43822bcf0c43d7d847b160c9bb01d15d5007d84e2217aaa371", size = 1148996, upload-time = "2025-12-12T17:31:21.03Z" }, ] -[[package]] -name = "frozendict" -version = "2.4.7" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/90/b2/2a3d1374b7780999d3184e171e25439a8358c47b481f68be883c14086b4c/frozendict-2.4.7.tar.gz", hash = "sha256:e478fb2a1391a56c8a6e10cc97c4a9002b410ecd1ac28c18d780661762e271bd", size = 317082, upload-time = "2025-11-11T22:40:14.251Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/38/74/f94141b38a51a553efef7f510fc213894161ae49b88bffd037f8d2a7cb2f/frozendict-2.4.7-py3-none-any.whl", hash = "sha256:972af65924ea25cf5b4d9326d549e69a9a4918d8a76a9d3a7cd174d98b237550", size = 16264, upload-time = "2025-11-11T22:40:12.836Z" }, -] - [[package]] name = "frozenlist" version = "1.8.0" @@ -2919,53 +2810,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ce/ff/3b59672c47c6284e8005b42e84ceba13864aa0f39f067c973d1af02f5d91/InquirerPy-0.3.4-py3-none-any.whl", hash = "sha256:c65fdfbac1fa00e3ee4fb10679f4d3ed7a012abf4833910e63c295827fe2a7d4", size = 67677, upload-time = "2022-06-27T23:11:17.723Z" }, ] -[[package]] -name = "inspect-ai" -version = "0.3.163" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aioboto3" }, - { name = "aiohttp" }, - { name = "anyio" }, - { name = "beautifulsoup4" }, - { name = "boto3" }, - { name = "click" }, - { name = "debugpy" }, - { name = "docstring-parser" }, - { name = "frozendict" }, - { name = "fsspec" }, - { name = "httpx" }, - { name = "ijson" }, - { name = "jsonlines" }, - { name = "jsonpatch" }, - { name = "jsonpath-ng" }, - { name = "jsonref" }, - { name = "jsonschema" }, - { name = "mmh3" }, - { name = "nest-asyncio2" }, - { name = "numpy" }, - { name = "platformdirs" }, - { name = "psutil" }, - { name = "pydantic" }, - { name = "python-dotenv" }, - { name = "pyyaml" }, - { name = "rich" }, - { name = "s3fs" }, - { name = "semver" }, - { name = "shortuuid" }, - { name = "sniffio" }, - { name = "tenacity" }, - { name = "textual" }, - { name = "tiktoken" }, - { name = "typing-extensions" }, - { name = "universal-pathlib" }, - { name = "zipp" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d8/1f/ceaff3a92c03196cc2503a1ec8cc865ca4695a7e25a20f3c9fb9892664da/inspect_ai-0.3.163.tar.gz", hash = "sha256:4a3b131a1d48430bf6d64ab9842fababf1ce66d64aa126f96ab09f399c4f9f61", size = 43358268, upload-time = "2026-01-21T20:36:44.792Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/a0/bc25e3c895ff462f8b901784813a4241bdef8ed6aed66837f757a5e36747/inspect_ai-0.3.163-py3-none-any.whl", hash = "sha256:c09fd251d184a77f7a69fdd75695c457ed1c328fee4dafeabd9232f7309c6741", size = 34559953, upload-time = "2026-01-21T20:36:36.141Z" }, -] - [[package]] name = "interegular" version = "0.3.3" @@ -3251,27 +3095,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, ] -[[package]] -name = "joblib" -version = "1.5.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/41/f2/d34e8b3a08a9cc79a50b2208a93dce981fe615b64d5a4d4abee421d898df/joblib-1.5.3.tar.gz", hash = "sha256:8561a3269e6801106863fd0d6d84bb737be9e7631e33aaed3fb9ce5953688da3", size = 331603, upload-time = "2025-12-15T08:41:46.427Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7b/91/984aca2ec129e2757d1e4e3c81c3fcda9d0f85b74670a094cc443d9ee949/joblib-1.5.3-py3-none-any.whl", hash = "sha256:5fc3c5039fc5ca8c0276333a188bbd59d6b7ab37fe6632daa76bc7f9ec18e713", size = 309071, upload-time = "2025-12-15T08:41:44.973Z" }, -] - -[[package]] -name = "jsonlines" -version = "4.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "attrs" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/35/87/bcda8e46c88d0e34cad2f09ee2d0c7f5957bccdb9791b0b934ec84d84be4/jsonlines-4.0.0.tar.gz", hash = "sha256:0c6d2c09117550c089995247f605ae4cf77dd1533041d366351f6f298822ea74", size = 11359, upload-time = "2023-09-01T12:34:44.187Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f8/62/d9ba6323b9202dd2fe166beab8a86d29465c41a0288cbe229fac60c1ab8d/jsonlines-4.0.0-py3-none-any.whl", hash = "sha256:185b334ff2ca5a91362993f42e83588a360cf95ce4b71a73548502bda52a7c55", size = 8701, upload-time = "2023-09-01T12:34:42.563Z" }, -] - [[package]] name = "jsonpatch" version = "1.33" @@ -3284,18 +3107,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" }, ] -[[package]] -name = "jsonpath-ng" -version = "1.7.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "ply" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/6d/86/08646239a313f895186ff0a4573452038eed8c86f54380b3ebac34d32fb2/jsonpath-ng-1.7.0.tar.gz", hash = "sha256:f6f5f7fd4e5ff79c785f1573b394043b39849fb2bb47bcead935d12b00beab3c", size = 37838, upload-time = "2024-10-11T15:41:42.404Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/35/5a/73ecb3d82f8615f32ccdadeb9356726d6cae3a4bbc840b437ceb95708063/jsonpath_ng-1.7.0-py3-none-any.whl", hash = "sha256:f3d7f9e848cba1b6da28c55b1c26ff915dc9e0b1ba7e752a53d6da8d5cbd00b6", size = 30105, upload-time = "2024-11-20T17:58:30.418Z" }, -] - [[package]] name = "jsonpointer" version = "3.0.0" @@ -3305,15 +3116,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, ] -[[package]] -name = "jsonref" -version = "1.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/aa/0d/c1f3277e90ccdb50d33ed5ba1ec5b3f0a242ed8c1b1a85d3afeb68464dca/jsonref-1.1.0.tar.gz", hash = "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552", size = 8814, upload-time = "2023-01-16T16:10:04.455Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/ec/e1db9922bceb168197a558a2b8c03a7963f1afe93517ddd3cf99f202f996/jsonref-1.1.0-py3-none-any.whl", hash = "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9", size = 9425, upload-time = "2023-01-16T16:10:02.255Z" }, -] - [[package]] name = "jsonschema" version = "4.25.1" @@ -3634,31 +3436,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2d/00/d90b10b962b4277f5e64a78b6609968859ff86889f5b898c1a778c06ec00/lark-1.2.2-py3-none-any.whl", hash = "sha256:c2276486b02f0f1b90be155f2c8ba4a8e194d42775786db622faccd652d8e80c", size = 111036, upload-time = "2024-08-13T19:48:58.603Z" }, ] -[[package]] -name = "latex2sympy2-extended" -version = "1.11.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "antlr4-python3-runtime" }, - { name = "sympy" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/30/75/456da2da05f6380ea96e6ea804ab2c03e41fc3ed80052307fe8efe6ea20e/latex2sympy2_extended-1.11.0.tar.gz", hash = "sha256:9695657c81b50abba2636638638618db59f4663ed2a4a12d62cef74a40e28fec", size = 207023, upload-time = "2026-01-10T01:43:21.319Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/61/f75cd1fa54d8434276126034aed54dd120747de9a8fa013cdd79545ccbeb/latex2sympy2_extended-1.11.0-py3-none-any.whl", hash = "sha256:aebb77d52ce269e25028e4bea89ddb14d242ba36bcf7b636496fb5fd9728d234", size = 209050, upload-time = "2026-01-10T01:43:19.458Z" }, -] - -[[package]] -name = "linkify-it-py" -version = "2.0.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "uc-micro-py" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" }, -] - [[package]] name = "litellm" version = "1.80.12" @@ -3747,108 +3524,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0c/29/0348de65b8cc732daa3e33e67806420b2ae89bdce2b04af740289c5c6c8c/loguru-0.7.3-py3-none-any.whl", hash = "sha256:31a33c10c8e1e10422bfd431aeb5d351c7cf7fa671e3c4df004162264b28220c", size = 61595, upload-time = "2024-12-06T11:20:54.538Z" }, ] -[[package]] -name = "lxml" -version = "6.0.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/aa/88/262177de60548e5a2bfc46ad28232c9e9cbde697bd94132aeb80364675cb/lxml-6.0.2.tar.gz", hash = "sha256:cd79f3367bd74b317dda655dc8fcfa304d9eb6e4fb06b7168c5cf27f96e0cd62", size = 4073426, upload-time = "2025-09-22T04:04:59.287Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/77/d5/becbe1e2569b474a23f0c672ead8a29ac50b2dc1d5b9de184831bda8d14c/lxml-6.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:13e35cbc684aadf05d8711a5d1b5857c92e5e580efa9a0d2be197199c8def607", size = 8634365, upload-time = "2025-09-22T04:00:45.672Z" }, - { url = "https://files.pythonhosted.org/packages/28/66/1ced58f12e804644426b85d0bb8a4478ca77bc1761455da310505f1a3526/lxml-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b1675e096e17c6fe9c0e8c81434f5736c0739ff9ac6123c87c2d452f48fc938", size = 4650793, upload-time = "2025-09-22T04:00:47.783Z" }, - { url = "https://files.pythonhosted.org/packages/11/84/549098ffea39dfd167e3f174b4ce983d0eed61f9d8d25b7bf2a57c3247fc/lxml-6.0.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8ac6e5811ae2870953390452e3476694196f98d447573234592d30488147404d", size = 4944362, upload-time = "2025-09-22T04:00:49.845Z" }, - { url = "https://files.pythonhosted.org/packages/ac/bd/f207f16abf9749d2037453d56b643a7471d8fde855a231a12d1e095c4f01/lxml-6.0.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5aa0fc67ae19d7a64c3fe725dc9a1bb11f80e01f78289d05c6f62545affec438", size = 5083152, upload-time = "2025-09-22T04:00:51.709Z" }, - { url = "https://files.pythonhosted.org/packages/15/ae/bd813e87d8941d52ad5b65071b1affb48da01c4ed3c9c99e40abb266fbff/lxml-6.0.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de496365750cc472b4e7902a485d3f152ecf57bd3ba03ddd5578ed8ceb4c5964", size = 5023539, upload-time = "2025-09-22T04:00:53.593Z" }, - { url = "https://files.pythonhosted.org/packages/02/cd/9bfef16bd1d874fbe0cb51afb00329540f30a3283beb9f0780adbb7eec03/lxml-6.0.2-cp311-cp311-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:200069a593c5e40b8f6fc0d84d86d970ba43138c3e68619ffa234bc9bb806a4d", size = 5344853, upload-time = "2025-09-22T04:00:55.524Z" }, - { url = "https://files.pythonhosted.org/packages/b8/89/ea8f91594bc5dbb879734d35a6f2b0ad50605d7fb419de2b63d4211765cc/lxml-6.0.2-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7d2de809c2ee3b888b59f995625385f74629707c9355e0ff856445cdcae682b7", size = 5225133, upload-time = "2025-09-22T04:00:57.269Z" }, - { url = "https://files.pythonhosted.org/packages/b9/37/9c735274f5dbec726b2db99b98a43950395ba3d4a1043083dba2ad814170/lxml-6.0.2-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:b2c3da8d93cf5db60e8858c17684c47d01fee6405e554fb55018dd85fc23b178", size = 4677944, upload-time = "2025-09-22T04:00:59.052Z" }, - { url = "https://files.pythonhosted.org/packages/20/28/7dfe1ba3475d8bfca3878365075abe002e05d40dfaaeb7ec01b4c587d533/lxml-6.0.2-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:442de7530296ef5e188373a1ea5789a46ce90c4847e597856570439621d9c553", size = 5284535, upload-time = "2025-09-22T04:01:01.335Z" }, - { url = "https://files.pythonhosted.org/packages/e7/cf/5f14bc0de763498fc29510e3532bf2b4b3a1c1d5d0dff2e900c16ba021ef/lxml-6.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2593c77efde7bfea7f6389f1ab249b15ed4aa5bc5cb5131faa3b843c429fbedb", size = 5067343, upload-time = "2025-09-22T04:01:03.13Z" }, - { url = "https://files.pythonhosted.org/packages/1c/b0/bb8275ab5472f32b28cfbbcc6db7c9d092482d3439ca279d8d6fa02f7025/lxml-6.0.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:3e3cb08855967a20f553ff32d147e14329b3ae70ced6edc2f282b94afbc74b2a", size = 4725419, upload-time = "2025-09-22T04:01:05.013Z" }, - { url = "https://files.pythonhosted.org/packages/25/4c/7c222753bc72edca3b99dbadba1b064209bc8ed4ad448af990e60dcce462/lxml-6.0.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ed6c667fcbb8c19c6791bbf40b7268ef8ddf5a96940ba9404b9f9a304832f6c", size = 5275008, upload-time = "2025-09-22T04:01:07.327Z" }, - { url = "https://files.pythonhosted.org/packages/6c/8c/478a0dc6b6ed661451379447cdbec77c05741a75736d97e5b2b729687828/lxml-6.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b8f18914faec94132e5b91e69d76a5c1d7b0c73e2489ea8929c4aaa10b76bbf7", size = 5248906, upload-time = "2025-09-22T04:01:09.452Z" }, - { url = "https://files.pythonhosted.org/packages/2d/d9/5be3a6ab2784cdf9accb0703b65e1b64fcdd9311c9f007630c7db0cfcce1/lxml-6.0.2-cp311-cp311-win32.whl", hash = "sha256:6605c604e6daa9e0d7f0a2137bdc47a2e93b59c60a65466353e37f8272f47c46", size = 3610357, upload-time = "2025-09-22T04:01:11.102Z" }, - { url = "https://files.pythonhosted.org/packages/e2/7d/ca6fb13349b473d5732fb0ee3eec8f6c80fc0688e76b7d79c1008481bf1f/lxml-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e5867f2651016a3afd8dd2c8238baa66f1e2802f44bc17e236f547ace6647078", size = 4036583, upload-time = "2025-09-22T04:01:12.766Z" }, - { url = "https://files.pythonhosted.org/packages/ab/a2/51363b5ecd3eab46563645f3a2c3836a2fc67d01a1b87c5017040f39f567/lxml-6.0.2-cp311-cp311-win_arm64.whl", hash = "sha256:4197fb2534ee05fd3e7afaab5d8bfd6c2e186f65ea7f9cd6a82809c887bd1285", size = 3680591, upload-time = "2025-09-22T04:01:14.874Z" }, - { url = "https://files.pythonhosted.org/packages/f3/c8/8ff2bc6b920c84355146cd1ab7d181bc543b89241cfb1ebee824a7c81457/lxml-6.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a59f5448ba2ceccd06995c95ea59a7674a10de0810f2ce90c9006f3cbc044456", size = 8661887, upload-time = "2025-09-22T04:01:17.265Z" }, - { url = "https://files.pythonhosted.org/packages/37/6f/9aae1008083bb501ef63284220ce81638332f9ccbfa53765b2b7502203cf/lxml-6.0.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e8113639f3296706fbac34a30813929e29247718e88173ad849f57ca59754924", size = 4667818, upload-time = "2025-09-22T04:01:19.688Z" }, - { url = "https://files.pythonhosted.org/packages/f1/ca/31fb37f99f37f1536c133476674c10b577e409c0a624384147653e38baf2/lxml-6.0.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a8bef9b9825fa8bc816a6e641bb67219489229ebc648be422af695f6e7a4fa7f", size = 4950807, upload-time = "2025-09-22T04:01:21.487Z" }, - { url = "https://files.pythonhosted.org/packages/da/87/f6cb9442e4bada8aab5ae7e1046264f62fdbeaa6e3f6211b93f4c0dd97f1/lxml-6.0.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:65ea18d710fd14e0186c2f973dc60bb52039a275f82d3c44a0e42b43440ea534", size = 5109179, upload-time = "2025-09-22T04:01:23.32Z" }, - { url = "https://files.pythonhosted.org/packages/c8/20/a7760713e65888db79bbae4f6146a6ae5c04e4a204a3c48896c408cd6ed2/lxml-6.0.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c371aa98126a0d4c739ca93ceffa0fd7a5d732e3ac66a46e74339acd4d334564", size = 5023044, upload-time = "2025-09-22T04:01:25.118Z" }, - { url = "https://files.pythonhosted.org/packages/a2/b0/7e64e0460fcb36471899f75831509098f3fd7cd02a3833ac517433cb4f8f/lxml-6.0.2-cp312-cp312-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:700efd30c0fa1a3581d80a748157397559396090a51d306ea59a70020223d16f", size = 5359685, upload-time = "2025-09-22T04:01:27.398Z" }, - { url = "https://files.pythonhosted.org/packages/b9/e1/e5df362e9ca4e2f48ed6411bd4b3a0ae737cc842e96877f5bf9428055ab4/lxml-6.0.2-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c33e66d44fe60e72397b487ee92e01da0d09ba2d66df8eae42d77b6d06e5eba0", size = 5654127, upload-time = "2025-09-22T04:01:29.629Z" }, - { url = "https://files.pythonhosted.org/packages/c6/d1/232b3309a02d60f11e71857778bfcd4acbdb86c07db8260caf7d008b08f8/lxml-6.0.2-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:90a345bbeaf9d0587a3aaffb7006aa39ccb6ff0e96a57286c0cb2fd1520ea192", size = 5253958, upload-time = "2025-09-22T04:01:31.535Z" }, - { url = "https://files.pythonhosted.org/packages/35/35/d955a070994725c4f7d80583a96cab9c107c57a125b20bb5f708fe941011/lxml-6.0.2-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:064fdadaf7a21af3ed1dcaa106b854077fbeada827c18f72aec9346847cd65d0", size = 4711541, upload-time = "2025-09-22T04:01:33.801Z" }, - { url = "https://files.pythonhosted.org/packages/1e/be/667d17363b38a78c4bd63cfd4b4632029fd68d2c2dc81f25ce9eb5224dd5/lxml-6.0.2-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fbc74f42c3525ac4ffa4b89cbdd00057b6196bcefe8bce794abd42d33a018092", size = 5267426, upload-time = "2025-09-22T04:01:35.639Z" }, - { url = "https://files.pythonhosted.org/packages/ea/47/62c70aa4a1c26569bc958c9ca86af2bb4e1f614e8c04fb2989833874f7ae/lxml-6.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6ddff43f702905a4e32bc24f3f2e2edfe0f8fde3277d481bffb709a4cced7a1f", size = 5064917, upload-time = "2025-09-22T04:01:37.448Z" }, - { url = "https://files.pythonhosted.org/packages/bd/55/6ceddaca353ebd0f1908ef712c597f8570cc9c58130dbb89903198e441fd/lxml-6.0.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6da5185951d72e6f5352166e3da7b0dc27aa70bd1090b0eb3f7f7212b53f1bb8", size = 4788795, upload-time = "2025-09-22T04:01:39.165Z" }, - { url = "https://files.pythonhosted.org/packages/cf/e8/fd63e15da5e3fd4c2146f8bbb3c14e94ab850589beab88e547b2dbce22e1/lxml-6.0.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:57a86e1ebb4020a38d295c04fc79603c7899e0df71588043eb218722dabc087f", size = 5676759, upload-time = "2025-09-22T04:01:41.506Z" }, - { url = "https://files.pythonhosted.org/packages/76/47/b3ec58dc5c374697f5ba37412cd2728f427d056315d124dd4b61da381877/lxml-6.0.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:2047d8234fe735ab77802ce5f2297e410ff40f5238aec569ad7c8e163d7b19a6", size = 5255666, upload-time = "2025-09-22T04:01:43.363Z" }, - { url = "https://files.pythonhosted.org/packages/19/93/03ba725df4c3d72afd9596eef4a37a837ce8e4806010569bedfcd2cb68fd/lxml-6.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6f91fd2b2ea15a6800c8e24418c0775a1694eefc011392da73bc6cef2623b322", size = 5277989, upload-time = "2025-09-22T04:01:45.215Z" }, - { url = "https://files.pythonhosted.org/packages/c6/80/c06de80bfce881d0ad738576f243911fccf992687ae09fd80b734712b39c/lxml-6.0.2-cp312-cp312-win32.whl", hash = "sha256:3ae2ce7d6fedfb3414a2b6c5e20b249c4c607f72cb8d2bb7cc9c6ec7c6f4e849", size = 3611456, upload-time = "2025-09-22T04:01:48.243Z" }, - { url = "https://files.pythonhosted.org/packages/f7/d7/0cdfb6c3e30893463fb3d1e52bc5f5f99684a03c29a0b6b605cfae879cd5/lxml-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:72c87e5ee4e58a8354fb9c7c84cbf95a1c8236c127a5d1b7683f04bed8361e1f", size = 4011793, upload-time = "2025-09-22T04:01:50.042Z" }, - { url = "https://files.pythonhosted.org/packages/ea/7b/93c73c67db235931527301ed3785f849c78991e2e34f3fd9a6663ffda4c5/lxml-6.0.2-cp312-cp312-win_arm64.whl", hash = "sha256:61cb10eeb95570153e0c0e554f58df92ecf5109f75eacad4a95baa709e26c3d6", size = 3672836, upload-time = "2025-09-22T04:01:52.145Z" }, - { url = "https://files.pythonhosted.org/packages/53/fd/4e8f0540608977aea078bf6d79f128e0e2c2bba8af1acf775c30baa70460/lxml-6.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9b33d21594afab46f37ae58dfadd06636f154923c4e8a4d754b0127554eb2e77", size = 8648494, upload-time = "2025-09-22T04:01:54.242Z" }, - { url = "https://files.pythonhosted.org/packages/5d/f4/2a94a3d3dfd6c6b433501b8d470a1960a20ecce93245cf2db1706adf6c19/lxml-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c8963287d7a4c5c9a432ff487c52e9c5618667179c18a204bdedb27310f022f", size = 4661146, upload-time = "2025-09-22T04:01:56.282Z" }, - { url = "https://files.pythonhosted.org/packages/25/2e/4efa677fa6b322013035d38016f6ae859d06cac67437ca7dc708a6af7028/lxml-6.0.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1941354d92699fb5ffe6ed7b32f9649e43c2feb4b97205f75866f7d21aa91452", size = 4946932, upload-time = "2025-09-22T04:01:58.989Z" }, - { url = "https://files.pythonhosted.org/packages/ce/0f/526e78a6d38d109fdbaa5049c62e1d32fdd70c75fb61c4eadf3045d3d124/lxml-6.0.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bb2f6ca0ae2d983ded09357b84af659c954722bbf04dea98030064996d156048", size = 5100060, upload-time = "2025-09-22T04:02:00.812Z" }, - { url = "https://files.pythonhosted.org/packages/81/76/99de58d81fa702cc0ea7edae4f4640416c2062813a00ff24bd70ac1d9c9b/lxml-6.0.2-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb2a12d704f180a902d7fa778c6d71f36ceb7b0d317f34cdc76a5d05aa1dd1df", size = 5019000, upload-time = "2025-09-22T04:02:02.671Z" }, - { url = "https://files.pythonhosted.org/packages/b5/35/9e57d25482bc9a9882cb0037fdb9cc18f4b79d85df94fa9d2a89562f1d25/lxml-6.0.2-cp313-cp313-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:6ec0e3f745021bfed19c456647f0298d60a24c9ff86d9d051f52b509663feeb1", size = 5348496, upload-time = "2025-09-22T04:02:04.904Z" }, - { url = "https://files.pythonhosted.org/packages/a6/8e/cb99bd0b83ccc3e8f0f528e9aa1f7a9965dfec08c617070c5db8d63a87ce/lxml-6.0.2-cp313-cp313-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:846ae9a12d54e368933b9759052d6206a9e8b250291109c48e350c1f1f49d916", size = 5643779, upload-time = "2025-09-22T04:02:06.689Z" }, - { url = "https://files.pythonhosted.org/packages/d0/34/9e591954939276bb679b73773836c6684c22e56d05980e31d52a9a8deb18/lxml-6.0.2-cp313-cp313-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ef9266d2aa545d7374938fb5c484531ef5a2ec7f2d573e62f8ce722c735685fd", size = 5244072, upload-time = "2025-09-22T04:02:08.587Z" }, - { url = "https://files.pythonhosted.org/packages/8d/27/b29ff065f9aaca443ee377aff699714fcbffb371b4fce5ac4ca759e436d5/lxml-6.0.2-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:4077b7c79f31755df33b795dc12119cb557a0106bfdab0d2c2d97bd3cf3dffa6", size = 4718675, upload-time = "2025-09-22T04:02:10.783Z" }, - { url = "https://files.pythonhosted.org/packages/2b/9f/f756f9c2cd27caa1a6ef8c32ae47aadea697f5c2c6d07b0dae133c244fbe/lxml-6.0.2-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a7c5d5e5f1081955358533be077166ee97ed2571d6a66bdba6ec2f609a715d1a", size = 5255171, upload-time = "2025-09-22T04:02:12.631Z" }, - { url = "https://files.pythonhosted.org/packages/61/46/bb85ea42d2cb1bd8395484fd72f38e3389611aa496ac7772da9205bbda0e/lxml-6.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8f8d0cbd0674ee89863a523e6994ac25fd5be9c8486acfc3e5ccea679bad2679", size = 5057175, upload-time = "2025-09-22T04:02:14.718Z" }, - { url = "https://files.pythonhosted.org/packages/95/0c/443fc476dcc8e41577f0af70458c50fe299a97bb6b7505bb1ae09aa7f9ac/lxml-6.0.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2cbcbf6d6e924c28f04a43f3b6f6e272312a090f269eff68a2982e13e5d57659", size = 4785688, upload-time = "2025-09-22T04:02:16.957Z" }, - { url = "https://files.pythonhosted.org/packages/48/78/6ef0b359d45bb9697bc5a626e1992fa5d27aa3f8004b137b2314793b50a0/lxml-6.0.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dfb874cfa53340009af6bdd7e54ebc0d21012a60a4e65d927c2e477112e63484", size = 5660655, upload-time = "2025-09-22T04:02:18.815Z" }, - { url = "https://files.pythonhosted.org/packages/ff/ea/e1d33808f386bc1339d08c0dcada6e4712d4ed8e93fcad5f057070b7988a/lxml-6.0.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fb8dae0b6b8b7f9e96c26fdd8121522ce5de9bb5538010870bd538683d30e9a2", size = 5247695, upload-time = "2025-09-22T04:02:20.593Z" }, - { url = "https://files.pythonhosted.org/packages/4f/47/eba75dfd8183673725255247a603b4ad606f4ae657b60c6c145b381697da/lxml-6.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:358d9adae670b63e95bc59747c72f4dc97c9ec58881d4627fe0120da0f90d314", size = 5269841, upload-time = "2025-09-22T04:02:22.489Z" }, - { url = "https://files.pythonhosted.org/packages/76/04/5c5e2b8577bc936e219becb2e98cdb1aca14a4921a12995b9d0c523502ae/lxml-6.0.2-cp313-cp313-win32.whl", hash = "sha256:e8cd2415f372e7e5a789d743d133ae474290a90b9023197fd78f32e2dc6873e2", size = 3610700, upload-time = "2025-09-22T04:02:24.465Z" }, - { url = "https://files.pythonhosted.org/packages/fe/0a/4643ccc6bb8b143e9f9640aa54e38255f9d3b45feb2cbe7ae2ca47e8782e/lxml-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:b30d46379644fbfc3ab81f8f82ae4de55179414651f110a1514f0b1f8f6cb2d7", size = 4010347, upload-time = "2025-09-22T04:02:26.286Z" }, - { url = "https://files.pythonhosted.org/packages/31/ef/dcf1d29c3f530577f61e5fe2f1bd72929acf779953668a8a47a479ae6f26/lxml-6.0.2-cp313-cp313-win_arm64.whl", hash = "sha256:13dcecc9946dca97b11b7c40d29fba63b55ab4170d3c0cf8c0c164343b9bfdcf", size = 3671248, upload-time = "2025-09-22T04:02:27.918Z" }, - { url = "https://files.pythonhosted.org/packages/03/15/d4a377b385ab693ce97b472fe0c77c2b16ec79590e688b3ccc71fba19884/lxml-6.0.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:b0c732aa23de8f8aec23f4b580d1e52905ef468afb4abeafd3fec77042abb6fe", size = 8659801, upload-time = "2025-09-22T04:02:30.113Z" }, - { url = "https://files.pythonhosted.org/packages/c8/e8/c128e37589463668794d503afaeb003987373c5f94d667124ffd8078bbd9/lxml-6.0.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4468e3b83e10e0317a89a33d28f7aeba1caa4d1a6fd457d115dd4ffe90c5931d", size = 4659403, upload-time = "2025-09-22T04:02:32.119Z" }, - { url = "https://files.pythonhosted.org/packages/00/ce/74903904339decdf7da7847bb5741fc98a5451b42fc419a86c0c13d26fe2/lxml-6.0.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:abd44571493973bad4598a3be7e1d807ed45aa2adaf7ab92ab7c62609569b17d", size = 4966974, upload-time = "2025-09-22T04:02:34.155Z" }, - { url = "https://files.pythonhosted.org/packages/1f/d3/131dec79ce61c5567fecf82515bd9bc36395df42501b50f7f7f3bd065df0/lxml-6.0.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:370cd78d5855cfbffd57c422851f7d3864e6ae72d0da615fca4dad8c45d375a5", size = 5102953, upload-time = "2025-09-22T04:02:36.054Z" }, - { url = "https://files.pythonhosted.org/packages/3a/ea/a43ba9bb750d4ffdd885f2cd333572f5bb900cd2408b67fdda07e85978a0/lxml-6.0.2-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:901e3b4219fa04ef766885fb40fa516a71662a4c61b80c94d25336b4934b71c0", size = 5055054, upload-time = "2025-09-22T04:02:38.154Z" }, - { url = "https://files.pythonhosted.org/packages/60/23/6885b451636ae286c34628f70a7ed1fcc759f8d9ad382d132e1c8d3d9bfd/lxml-6.0.2-cp314-cp314-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:a4bf42d2e4cf52c28cc1812d62426b9503cdb0c87a6de81442626aa7d69707ba", size = 5352421, upload-time = "2025-09-22T04:02:40.413Z" }, - { url = "https://files.pythonhosted.org/packages/48/5b/fc2ddfc94ddbe3eebb8e9af6e3fd65e2feba4967f6a4e9683875c394c2d8/lxml-6.0.2-cp314-cp314-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2c7fdaa4d7c3d886a42534adec7cfac73860b89b4e5298752f60aa5984641a0", size = 5673684, upload-time = "2025-09-22T04:02:42.288Z" }, - { url = "https://files.pythonhosted.org/packages/29/9c/47293c58cc91769130fbf85531280e8cc7868f7fbb6d92f4670071b9cb3e/lxml-6.0.2-cp314-cp314-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:98a5e1660dc7de2200b00d53fa00bcd3c35a3608c305d45a7bbcaf29fa16e83d", size = 5252463, upload-time = "2025-09-22T04:02:44.165Z" }, - { url = "https://files.pythonhosted.org/packages/9b/da/ba6eceb830c762b48e711ded880d7e3e89fc6c7323e587c36540b6b23c6b/lxml-6.0.2-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:dc051506c30b609238d79eda75ee9cab3e520570ec8219844a72a46020901e37", size = 4698437, upload-time = "2025-09-22T04:02:46.524Z" }, - { url = "https://files.pythonhosted.org/packages/a5/24/7be3f82cb7990b89118d944b619e53c656c97dc89c28cfb143fdb7cd6f4d/lxml-6.0.2-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8799481bbdd212470d17513a54d568f44416db01250f49449647b5ab5b5dccb9", size = 5269890, upload-time = "2025-09-22T04:02:48.812Z" }, - { url = "https://files.pythonhosted.org/packages/1b/bd/dcfb9ea1e16c665efd7538fc5d5c34071276ce9220e234217682e7d2c4a5/lxml-6.0.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9261bb77c2dab42f3ecd9103951aeca2c40277701eb7e912c545c1b16e0e4917", size = 5097185, upload-time = "2025-09-22T04:02:50.746Z" }, - { url = "https://files.pythonhosted.org/packages/21/04/a60b0ff9314736316f28316b694bccbbabe100f8483ad83852d77fc7468e/lxml-6.0.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:65ac4a01aba353cfa6d5725b95d7aed6356ddc0a3cd734de00124d285b04b64f", size = 4745895, upload-time = "2025-09-22T04:02:52.968Z" }, - { url = "https://files.pythonhosted.org/packages/d6/bd/7d54bd1846e5a310d9c715921c5faa71cf5c0853372adf78aee70c8d7aa2/lxml-6.0.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b22a07cbb82fea98f8a2fd814f3d1811ff9ed76d0fc6abc84eb21527596e7cc8", size = 5695246, upload-time = "2025-09-22T04:02:54.798Z" }, - { url = "https://files.pythonhosted.org/packages/fd/32/5643d6ab947bc371da21323acb2a6e603cedbe71cb4c99c8254289ab6f4e/lxml-6.0.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:d759cdd7f3e055d6bc8d9bec3ad905227b2e4c785dc16c372eb5b5e83123f48a", size = 5260797, upload-time = "2025-09-22T04:02:57.058Z" }, - { url = "https://files.pythonhosted.org/packages/33/da/34c1ec4cff1eea7d0b4cd44af8411806ed943141804ac9c5d565302afb78/lxml-6.0.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:945da35a48d193d27c188037a05fec5492937f66fb1958c24fc761fb9d40d43c", size = 5277404, upload-time = "2025-09-22T04:02:58.966Z" }, - { url = "https://files.pythonhosted.org/packages/82/57/4eca3e31e54dc89e2c3507e1cd411074a17565fa5ffc437c4ae0a00d439e/lxml-6.0.2-cp314-cp314-win32.whl", hash = "sha256:be3aaa60da67e6153eb15715cc2e19091af5dc75faef8b8a585aea372507384b", size = 3670072, upload-time = "2025-09-22T04:03:38.05Z" }, - { url = "https://files.pythonhosted.org/packages/e3/e0/c96cf13eccd20c9421ba910304dae0f619724dcf1702864fd59dd386404d/lxml-6.0.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa25afbadead523f7001caf0c2382afd272c315a033a7b06336da2637d92d6ed", size = 4080617, upload-time = "2025-09-22T04:03:39.835Z" }, - { url = "https://files.pythonhosted.org/packages/d5/5d/b3f03e22b3d38d6f188ef044900a9b29b2fe0aebb94625ce9fe244011d34/lxml-6.0.2-cp314-cp314-win_arm64.whl", hash = "sha256:063eccf89df5b24e361b123e257e437f9e9878f425ee9aae3144c77faf6da6d8", size = 3754930, upload-time = "2025-09-22T04:03:41.565Z" }, - { url = "https://files.pythonhosted.org/packages/5e/5c/42c2c4c03554580708fc738d13414801f340c04c3eff90d8d2d227145275/lxml-6.0.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:6162a86d86893d63084faaf4ff937b3daea233e3682fb4474db07395794fa80d", size = 8910380, upload-time = "2025-09-22T04:03:01.645Z" }, - { url = "https://files.pythonhosted.org/packages/bf/4f/12df843e3e10d18d468a7557058f8d3733e8b6e12401f30b1ef29360740f/lxml-6.0.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:414aaa94e974e23a3e92e7ca5b97d10c0cf37b6481f50911032c69eeb3991bba", size = 4775632, upload-time = "2025-09-22T04:03:03.814Z" }, - { url = "https://files.pythonhosted.org/packages/e4/0c/9dc31e6c2d0d418483cbcb469d1f5a582a1cd00a1f4081953d44051f3c50/lxml-6.0.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:48461bd21625458dd01e14e2c38dd0aea69addc3c4f960c30d9f59d7f93be601", size = 4975171, upload-time = "2025-09-22T04:03:05.651Z" }, - { url = "https://files.pythonhosted.org/packages/e7/2b/9b870c6ca24c841bdd887504808f0417aa9d8d564114689266f19ddf29c8/lxml-6.0.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:25fcc59afc57d527cfc78a58f40ab4c9b8fd096a9a3f964d2781ffb6eb33f4ed", size = 5110109, upload-time = "2025-09-22T04:03:07.452Z" }, - { url = "https://files.pythonhosted.org/packages/bf/0c/4f5f2a4dd319a178912751564471355d9019e220c20d7db3fb8307ed8582/lxml-6.0.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5179c60288204e6ddde3f774a93350177e08876eaf3ab78aa3a3649d43eb7d37", size = 5041061, upload-time = "2025-09-22T04:03:09.297Z" }, - { url = "https://files.pythonhosted.org/packages/12/64/554eed290365267671fe001a20d72d14f468ae4e6acef1e179b039436967/lxml-6.0.2-cp314-cp314t-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:967aab75434de148ec80597b75062d8123cadf2943fb4281f385141e18b21338", size = 5306233, upload-time = "2025-09-22T04:03:11.651Z" }, - { url = "https://files.pythonhosted.org/packages/7a/31/1d748aa275e71802ad9722df32a7a35034246b42c0ecdd8235412c3396ef/lxml-6.0.2-cp314-cp314t-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d100fcc8930d697c6561156c6810ab4a508fb264c8b6779e6e61e2ed5e7558f9", size = 5604739, upload-time = "2025-09-22T04:03:13.592Z" }, - { url = "https://files.pythonhosted.org/packages/8f/41/2c11916bcac09ed561adccacceaedd2bf0e0b25b297ea92aab99fd03d0fa/lxml-6.0.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ca59e7e13e5981175b8b3e4ab84d7da57993eeff53c07764dcebda0d0e64ecd", size = 5225119, upload-time = "2025-09-22T04:03:15.408Z" }, - { url = "https://files.pythonhosted.org/packages/99/05/4e5c2873d8f17aa018e6afde417c80cc5d0c33be4854cce3ef5670c49367/lxml-6.0.2-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:957448ac63a42e2e49531b9d6c0fa449a1970dbc32467aaad46f11545be9af1d", size = 4633665, upload-time = "2025-09-22T04:03:17.262Z" }, - { url = "https://files.pythonhosted.org/packages/0f/c9/dcc2da1bebd6275cdc723b515f93edf548b82f36a5458cca3578bc899332/lxml-6.0.2-cp314-cp314t-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b7fc49c37f1786284b12af63152fe1d0990722497e2d5817acfe7a877522f9a9", size = 5234997, upload-time = "2025-09-22T04:03:19.14Z" }, - { url = "https://files.pythonhosted.org/packages/9c/e2/5172e4e7468afca64a37b81dba152fc5d90e30f9c83c7c3213d6a02a5ce4/lxml-6.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e19e0643cc936a22e837f79d01a550678da8377d7d801a14487c10c34ee49c7e", size = 5090957, upload-time = "2025-09-22T04:03:21.436Z" }, - { url = "https://files.pythonhosted.org/packages/a5/b3/15461fd3e5cd4ddcb7938b87fc20b14ab113b92312fc97afe65cd7c85de1/lxml-6.0.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:1db01e5cf14345628e0cbe71067204db658e2fb8e51e7f33631f5f4735fefd8d", size = 4764372, upload-time = "2025-09-22T04:03:23.27Z" }, - { url = "https://files.pythonhosted.org/packages/05/33/f310b987c8bf9e61c4dd8e8035c416bd3230098f5e3cfa69fc4232de7059/lxml-6.0.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:875c6b5ab39ad5291588aed6925fac99d0097af0dd62f33c7b43736043d4a2ec", size = 5634653, upload-time = "2025-09-22T04:03:25.767Z" }, - { url = "https://files.pythonhosted.org/packages/70/ff/51c80e75e0bc9382158133bdcf4e339b5886c6ee2418b5199b3f1a61ed6d/lxml-6.0.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:cdcbed9ad19da81c480dfd6dd161886db6096083c9938ead313d94b30aadf272", size = 5233795, upload-time = "2025-09-22T04:03:27.62Z" }, - { url = "https://files.pythonhosted.org/packages/56/4d/4856e897df0d588789dd844dbed9d91782c4ef0b327f96ce53c807e13128/lxml-6.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:80dadc234ebc532e09be1975ff538d154a7fa61ea5031c03d25178855544728f", size = 5257023, upload-time = "2025-09-22T04:03:30.056Z" }, - { url = "https://files.pythonhosted.org/packages/0f/85/86766dfebfa87bea0ab78e9ff7a4b4b45225df4b4d3b8cc3c03c5cd68464/lxml-6.0.2-cp314-cp314t-win32.whl", hash = "sha256:da08e7bb297b04e893d91087df19638dc7a6bb858a954b0cc2b9f5053c922312", size = 3911420, upload-time = "2025-09-22T04:03:32.198Z" }, - { url = "https://files.pythonhosted.org/packages/fe/1a/b248b355834c8e32614650b8008c69ffeb0ceb149c793961dd8c0b991bb3/lxml-6.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:252a22982dca42f6155125ac76d3432e548a7625d56f5a273ee78a5057216eca", size = 4406837, upload-time = "2025-09-22T04:03:34.027Z" }, - { url = "https://files.pythonhosted.org/packages/92/aa/df863bcc39c5e0946263454aba394de8a9084dbaff8ad143846b0d844739/lxml-6.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:bb4c1847b303835d89d785a18801a883436cdfd5dc3d62947f9c49e24f0f5a2c", size = 3822205, upload-time = "2025-09-22T04:03:36.249Z" }, - { url = "https://files.pythonhosted.org/packages/0b/11/29d08bc103a62c0eba8016e7ed5aeebbf1e4312e83b0b1648dd203b0e87d/lxml-6.0.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1c06035eafa8404b5cf475bb37a9f6088b0aca288d4ccc9d69389750d5543700", size = 3949829, upload-time = "2025-09-22T04:04:45.608Z" }, - { url = "https://files.pythonhosted.org/packages/12/b3/52ab9a3b31e5ab8238da241baa19eec44d2ab426532441ee607165aebb52/lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c7d13103045de1bdd6fe5d61802565f1a3537d70cd3abf596aa0af62761921ee", size = 4226277, upload-time = "2025-09-22T04:04:47.754Z" }, - { url = "https://files.pythonhosted.org/packages/a0/33/1eaf780c1baad88224611df13b1c2a9dfa460b526cacfe769103ff50d845/lxml-6.0.2-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a3c150a95fbe5ac91de323aa756219ef9cf7fde5a3f00e2281e30f33fa5fa4f", size = 4330433, upload-time = "2025-09-22T04:04:49.907Z" }, - { url = "https://files.pythonhosted.org/packages/7a/c1/27428a2ff348e994ab4f8777d3a0ad510b6b92d37718e5887d2da99952a2/lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60fa43be34f78bebb27812ed90f1925ec99560b0fa1decdb7d12b84d857d31e9", size = 4272119, upload-time = "2025-09-22T04:04:51.801Z" }, - { url = "https://files.pythonhosted.org/packages/f0/d0/3020fa12bcec4ab62f97aab026d57c2f0cfd480a558758d9ca233bb6a79d/lxml-6.0.2-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:21c73b476d3cfe836be731225ec3421fa2f048d84f6df6a8e70433dff1376d5a", size = 4417314, upload-time = "2025-09-22T04:04:55.024Z" }, - { url = "https://files.pythonhosted.org/packages/6c/77/d7f491cbc05303ac6801651aabeb262d43f319288c1ea96c66b1d2692ff3/lxml-6.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:27220da5be049e936c3aca06f174e8827ca6445a4353a1995584311487fc4e3e", size = 3518768, upload-time = "2025-09-22T04:04:57.097Z" }, -] - [[package]] name = "mako" version = "1.3.10" @@ -3873,11 +3548,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, ] -[package.optional-dependencies] -linkify = [ - { name = "linkify-it-py" }, -] - [[package]] name = "markupsafe" version = "3.0.3" @@ -3952,18 +3622,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" }, ] -[[package]] -name = "math-verify" -version = "0.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "latex2sympy2-extended" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/4f/12/b8d13b581e110ac2f724a2351a8361a70fa36d057eb945d6379e8747c256/math_verify-0.9.0.tar.gz", hash = "sha256:45ac6c61344ba056b9e99a660a4bc8d044ed408f730aed68c60435aa5eec4645", size = 60329, upload-time = "2026-01-10T01:48:33.056Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/62/76/6b4969bccc842b6567f7e6ee015684b9428a9b7fcbdf479e73716f43597f/math_verify-0.9.0-py3-none-any.whl", hash = "sha256:3703e7c4885354027fa84409d762a596a2906d1fd4deb78361876bd905a76194", size = 29967, upload-time = "2026-01-10T01:48:31.674Z" }, -] - [[package]] name = "matplotlib" version = "3.10.8" @@ -4065,18 +3723,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e2/fc/6dc7659c2ae5ddf280477011f4213a74f806862856b796ef08f028e664bf/mcp-1.25.0-py3-none-any.whl", hash = "sha256:b37c38144a666add0862614cc79ec276e97d72aa8ca26d622818d4e278b9721a", size = 233076, upload-time = "2025-12-19T10:19:55.416Z" }, ] -[[package]] -name = "mdit-py-plugins" -version = "0.5.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markdown-it-py" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" }, -] - [[package]] name = "mdurl" version = "0.1.2" @@ -4160,102 +3806,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/67/b3/73cc2f584ac612a476096d35a61eed75ee7ed8b4e320b0c36cf60a14d4eb/mlx_metal-0.30.1-py3-none-macosx_26_0_arm64.whl", hash = "sha256:e0b151a0053ac00b4226710bfb6dbf54b87283fb01e10fb3877f9ea969f680aa", size = 44981160, upload-time = "2025-12-18T00:15:47.518Z" }, ] -[[package]] -name = "mmh3" -version = "5.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a7/af/f28c2c2f51f31abb4725f9a64bc7863d5f491f6539bd26aee2a1d21a649e/mmh3-5.2.0.tar.gz", hash = "sha256:1efc8fec8478e9243a78bb993422cf79f8ff85cb4cf6b79647480a31e0d950a8", size = 33582, upload-time = "2025-07-29T07:43:48.49Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/87/399567b3796e134352e11a8b973cd470c06b2ecfad5468fe580833be442b/mmh3-5.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7901c893e704ee3c65f92d39b951f8f34ccf8e8566768c58103fb10e55afb8c1", size = 56107, upload-time = "2025-07-29T07:41:57.07Z" }, - { url = "https://files.pythonhosted.org/packages/c3/09/830af30adf8678955b247d97d3d9543dd2fd95684f3cd41c0cd9d291da9f/mmh3-5.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4a5f5536b1cbfa72318ab3bfc8a8188b949260baed186b75f0abc75b95d8c051", size = 40635, upload-time = "2025-07-29T07:41:57.903Z" }, - { url = "https://files.pythonhosted.org/packages/07/14/eaba79eef55b40d653321765ac5e8f6c9ac38780b8a7c2a2f8df8ee0fb72/mmh3-5.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cedac4f4054b8f7859e5aed41aaa31ad03fce6851901a7fdc2af0275ac533c10", size = 40078, upload-time = "2025-07-29T07:41:58.772Z" }, - { url = "https://files.pythonhosted.org/packages/bb/26/83a0f852e763f81b2265d446b13ed6d49ee49e1fc0c47b9655977e6f3d81/mmh3-5.2.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:eb756caf8975882630ce4e9fbbeb9d3401242a72528230422c9ab3a0d278e60c", size = 97262, upload-time = "2025-07-29T07:41:59.678Z" }, - { url = "https://files.pythonhosted.org/packages/00/7d/b7133b10d12239aeaebf6878d7eaf0bf7d3738c44b4aba3c564588f6d802/mmh3-5.2.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:097e13c8b8a66c5753c6968b7640faefe85d8e38992703c1f666eda6ef4c3762", size = 103118, upload-time = "2025-07-29T07:42:01.197Z" }, - { url = "https://files.pythonhosted.org/packages/7b/3e/62f0b5dce2e22fd5b7d092aba285abd7959ea2b17148641e029f2eab1ffa/mmh3-5.2.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a7c0c7845566b9686480e6a7e9044db4afb60038d5fabd19227443f0104eeee4", size = 106072, upload-time = "2025-07-29T07:42:02.601Z" }, - { url = "https://files.pythonhosted.org/packages/66/84/ea88bb816edfe65052c757a1c3408d65c4201ddbd769d4a287b0f1a628b2/mmh3-5.2.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:61ac226af521a572700f863d6ecddc6ece97220ce7174e311948ff8c8919a363", size = 112925, upload-time = "2025-07-29T07:42:03.632Z" }, - { url = "https://files.pythonhosted.org/packages/2e/13/c9b1c022807db575fe4db806f442d5b5784547e2e82cff36133e58ea31c7/mmh3-5.2.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:582f9dbeefe15c32a5fa528b79b088b599a1dfe290a4436351c6090f90ddebb8", size = 120583, upload-time = "2025-07-29T07:42:04.991Z" }, - { url = "https://files.pythonhosted.org/packages/8a/5f/0e2dfe1a38f6a78788b7eb2b23432cee24623aeabbc907fed07fc17d6935/mmh3-5.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2ebfc46b39168ab1cd44670a32ea5489bcbc74a25795c61b6d888c5c2cf654ed", size = 99127, upload-time = "2025-07-29T07:42:05.929Z" }, - { url = "https://files.pythonhosted.org/packages/77/27/aefb7d663b67e6a0c4d61a513c83e39ba2237e8e4557fa7122a742a23de5/mmh3-5.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1556e31e4bd0ac0c17eaf220be17a09c171d7396919c3794274cb3415a9d3646", size = 98544, upload-time = "2025-07-29T07:42:06.87Z" }, - { url = "https://files.pythonhosted.org/packages/ab/97/a21cc9b1a7c6e92205a1b5fa030cdf62277d177570c06a239eca7bd6dd32/mmh3-5.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:81df0dae22cd0da87f1c978602750f33d17fb3d21fb0f326c89dc89834fea79b", size = 106262, upload-time = "2025-07-29T07:42:07.804Z" }, - { url = "https://files.pythonhosted.org/packages/43/18/db19ae82ea63c8922a880e1498a75342311f8aa0c581c4dd07711473b5f7/mmh3-5.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:eba01ec3bd4a49b9ac5ca2bc6a73ff5f3af53374b8556fcc2966dd2af9eb7779", size = 109824, upload-time = "2025-07-29T07:42:08.735Z" }, - { url = "https://files.pythonhosted.org/packages/9f/f5/41dcf0d1969125fc6f61d8618b107c79130b5af50b18a4651210ea52ab40/mmh3-5.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e9a011469b47b752e7d20de296bb34591cdfcbe76c99c2e863ceaa2aa61113d2", size = 97255, upload-time = "2025-07-29T07:42:09.706Z" }, - { url = "https://files.pythonhosted.org/packages/32/b3/cce9eaa0efac1f0e735bb178ef9d1d2887b4927fe0ec16609d5acd492dda/mmh3-5.2.0-cp311-cp311-win32.whl", hash = "sha256:bc44fc2b886243d7c0d8daeb37864e16f232e5b56aaec27cc781d848264cfd28", size = 40779, upload-time = "2025-07-29T07:42:10.546Z" }, - { url = "https://files.pythonhosted.org/packages/7c/e9/3fa0290122e6d5a7041b50ae500b8a9f4932478a51e48f209a3879fe0b9b/mmh3-5.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:8ebf241072cf2777a492d0e09252f8cc2b3edd07dfdb9404b9757bffeb4f2cee", size = 41549, upload-time = "2025-07-29T07:42:11.399Z" }, - { url = "https://files.pythonhosted.org/packages/3a/54/c277475b4102588e6f06b2e9095ee758dfe31a149312cdbf62d39a9f5c30/mmh3-5.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:b5f317a727bba0e633a12e71228bc6a4acb4f471a98b1c003163b917311ea9a9", size = 39336, upload-time = "2025-07-29T07:42:12.209Z" }, - { url = "https://files.pythonhosted.org/packages/bf/6a/d5aa7edb5c08e0bd24286c7d08341a0446f9a2fbbb97d96a8a6dd81935ee/mmh3-5.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:384eda9361a7bf83a85e09447e1feafe081034af9dd428893701b959230d84be", size = 56141, upload-time = "2025-07-29T07:42:13.456Z" }, - { url = "https://files.pythonhosted.org/packages/08/49/131d0fae6447bc4a7299ebdb1a6fb9d08c9f8dcf97d75ea93e8152ddf7ab/mmh3-5.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2c9da0d568569cc87315cb063486d761e38458b8ad513fedd3dc9263e1b81bcd", size = 40681, upload-time = "2025-07-29T07:42:14.306Z" }, - { url = "https://files.pythonhosted.org/packages/8f/6f/9221445a6bcc962b7f5ff3ba18ad55bba624bacdc7aa3fc0a518db7da8ec/mmh3-5.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86d1be5d63232e6eb93c50881aea55ff06eb86d8e08f9b5417c8c9b10db9db96", size = 40062, upload-time = "2025-07-29T07:42:15.08Z" }, - { url = "https://files.pythonhosted.org/packages/1e/d4/6bb2d0fef81401e0bb4c297d1eb568b767de4ce6fc00890bc14d7b51ecc4/mmh3-5.2.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bf7bee43e17e81671c447e9c83499f53d99bf440bc6d9dc26a841e21acfbe094", size = 97333, upload-time = "2025-07-29T07:42:16.436Z" }, - { url = "https://files.pythonhosted.org/packages/44/e0/ccf0daff8134efbb4fbc10a945ab53302e358c4b016ada9bf97a6bdd50c1/mmh3-5.2.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7aa18cdb58983ee660c9c400b46272e14fa253c675ed963d3812487f8ca42037", size = 103310, upload-time = "2025-07-29T07:42:17.796Z" }, - { url = "https://files.pythonhosted.org/packages/02/63/1965cb08a46533faca0e420e06aff8bbaf9690a6f0ac6ae6e5b2e4544687/mmh3-5.2.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9d032488fcec32d22be6542d1a836f00247f40f320844dbb361393b5b22773", size = 106178, upload-time = "2025-07-29T07:42:19.281Z" }, - { url = "https://files.pythonhosted.org/packages/c2/41/c883ad8e2c234013f27f92061200afc11554ea55edd1bcf5e1accd803a85/mmh3-5.2.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e1861fb6b1d0453ed7293200139c0a9011eeb1376632e048e3766945b13313c5", size = 113035, upload-time = "2025-07-29T07:42:20.356Z" }, - { url = "https://files.pythonhosted.org/packages/df/b5/1ccade8b1fa625d634a18bab7bf08a87457e09d5ec8cf83ca07cbea9d400/mmh3-5.2.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:99bb6a4d809aa4e528ddfe2c85dd5239b78b9dd14be62cca0329db78505e7b50", size = 120784, upload-time = "2025-07-29T07:42:21.377Z" }, - { url = "https://files.pythonhosted.org/packages/77/1c/919d9171fcbdcdab242e06394464ccf546f7d0f3b31e0d1e3a630398782e/mmh3-5.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1f8d8b627799f4e2fcc7c034fed8f5f24dc7724ff52f69838a3d6d15f1ad4765", size = 99137, upload-time = "2025-07-29T07:42:22.344Z" }, - { url = "https://files.pythonhosted.org/packages/66/8a/1eebef5bd6633d36281d9fc83cf2e9ba1ba0e1a77dff92aacab83001cee4/mmh3-5.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b5995088dd7023d2d9f310a0c67de5a2b2e06a570ecfd00f9ff4ab94a67cde43", size = 98664, upload-time = "2025-07-29T07:42:23.269Z" }, - { url = "https://files.pythonhosted.org/packages/13/41/a5d981563e2ee682b21fb65e29cc0f517a6734a02b581359edd67f9d0360/mmh3-5.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1a5f4d2e59d6bba8ef01b013c472741835ad961e7c28f50c82b27c57748744a4", size = 106459, upload-time = "2025-07-29T07:42:24.238Z" }, - { url = "https://files.pythonhosted.org/packages/24/31/342494cd6ab792d81e083680875a2c50fa0c5df475ebf0b67784f13e4647/mmh3-5.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fd6e6c3d90660d085f7e73710eab6f5545d4854b81b0135a3526e797009dbda3", size = 110038, upload-time = "2025-07-29T07:42:25.629Z" }, - { url = "https://files.pythonhosted.org/packages/28/44/efda282170a46bb4f19c3e2b90536513b1d821c414c28469a227ca5a1789/mmh3-5.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c4a2f3d83879e3de2eb8cbf562e71563a8ed15ee9b9c2e77ca5d9f73072ac15c", size = 97545, upload-time = "2025-07-29T07:42:27.04Z" }, - { url = "https://files.pythonhosted.org/packages/68/8f/534ae319c6e05d714f437e7206f78c17e66daca88164dff70286b0e8ea0c/mmh3-5.2.0-cp312-cp312-win32.whl", hash = "sha256:2421b9d665a0b1ad724ec7332fb5a98d075f50bc51a6ff854f3a1882bd650d49", size = 40805, upload-time = "2025-07-29T07:42:28.032Z" }, - { url = "https://files.pythonhosted.org/packages/b8/f6/f6abdcfefcedab3c964868048cfe472764ed358c2bf6819a70dd4ed4ed3a/mmh3-5.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:72d80005b7634a3a2220f81fbeb94775ebd12794623bb2e1451701ea732b4aa3", size = 41597, upload-time = "2025-07-29T07:42:28.894Z" }, - { url = "https://files.pythonhosted.org/packages/15/fd/f7420e8cbce45c259c770cac5718badf907b302d3a99ec587ba5ce030237/mmh3-5.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:3d6bfd9662a20c054bc216f861fa330c2dac7c81e7fb8307b5e32ab5b9b4d2e0", size = 39350, upload-time = "2025-07-29T07:42:29.794Z" }, - { url = "https://files.pythonhosted.org/packages/d8/fa/27f6ab93995ef6ad9f940e96593c5dd24744d61a7389532b0fec03745607/mmh3-5.2.0-cp313-cp313-android_21_arm64_v8a.whl", hash = "sha256:e79c00eba78f7258e5b354eccd4d7907d60317ced924ea4a5f2e9d83f5453065", size = 40874, upload-time = "2025-07-29T07:42:30.662Z" }, - { url = "https://files.pythonhosted.org/packages/11/9c/03d13bcb6a03438bc8cac3d2e50f80908d159b31a4367c2e1a7a077ded32/mmh3-5.2.0-cp313-cp313-android_21_x86_64.whl", hash = "sha256:956127e663d05edbeec54df38885d943dfa27406594c411139690485128525de", size = 42012, upload-time = "2025-07-29T07:42:31.539Z" }, - { url = "https://files.pythonhosted.org/packages/4e/78/0865d9765408a7d504f1789944e678f74e0888b96a766d578cb80b040999/mmh3-5.2.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:c3dca4cb5b946ee91b3d6bb700d137b1cd85c20827f89fdf9c16258253489044", size = 39197, upload-time = "2025-07-29T07:42:32.374Z" }, - { url = "https://files.pythonhosted.org/packages/3e/12/76c3207bd186f98b908b6706c2317abb73756d23a4e68ea2bc94825b9015/mmh3-5.2.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:e651e17bfde5840e9e4174b01e9e080ce49277b70d424308b36a7969d0d1af73", size = 39840, upload-time = "2025-07-29T07:42:33.227Z" }, - { url = "https://files.pythonhosted.org/packages/5d/0d/574b6cce5555c9f2b31ea189ad44986755eb14e8862db28c8b834b8b64dc/mmh3-5.2.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:9f64bf06f4bf623325fda3a6d02d36cd69199b9ace99b04bb2d7fd9f89688504", size = 40644, upload-time = "2025-07-29T07:42:34.099Z" }, - { url = "https://files.pythonhosted.org/packages/52/82/3731f8640b79c46707f53ed72034a58baad400be908c87b0088f1f89f986/mmh3-5.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ddc63328889bcaee77b743309e5c7d2d52cee0d7d577837c91b6e7cc9e755e0b", size = 56153, upload-time = "2025-07-29T07:42:35.031Z" }, - { url = "https://files.pythonhosted.org/packages/4f/34/e02dca1d4727fd9fdeaff9e2ad6983e1552804ce1d92cc796e5b052159bb/mmh3-5.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bb0fdc451fb6d86d81ab8f23d881b8d6e37fc373a2deae1c02d27002d2ad7a05", size = 40684, upload-time = "2025-07-29T07:42:35.914Z" }, - { url = "https://files.pythonhosted.org/packages/8f/36/3dee40767356e104967e6ed6d102ba47b0b1ce2a89432239b95a94de1b89/mmh3-5.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b29044e1ffdb84fe164d0a7ea05c7316afea93c00f8ed9449cf357c36fc4f814", size = 40057, upload-time = "2025-07-29T07:42:36.755Z" }, - { url = "https://files.pythonhosted.org/packages/31/58/228c402fccf76eb39a0a01b8fc470fecf21965584e66453b477050ee0e99/mmh3-5.2.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:58981d6ea9646dbbf9e59a30890cbf9f610df0e4a57dbfe09215116fd90b0093", size = 97344, upload-time = "2025-07-29T07:42:37.675Z" }, - { url = "https://files.pythonhosted.org/packages/34/82/fc5ce89006389a6426ef28e326fc065b0fbaaed230373b62d14c889f47ea/mmh3-5.2.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7e5634565367b6d98dc4aa2983703526ef556b3688ba3065edb4b9b90ede1c54", size = 103325, upload-time = "2025-07-29T07:42:38.591Z" }, - { url = "https://files.pythonhosted.org/packages/09/8c/261e85777c6aee1ebd53f2f17e210e7481d5b0846cd0b4a5c45f1e3761b8/mmh3-5.2.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0271ac12415afd3171ab9a3c7cbfc71dee2c68760a7dc9d05bf8ed6ddfa3a7a", size = 106240, upload-time = "2025-07-29T07:42:39.563Z" }, - { url = "https://files.pythonhosted.org/packages/70/73/2f76b3ad8a3d431824e9934403df36c0ddacc7831acf82114bce3c4309c8/mmh3-5.2.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:45b590e31bc552c6f8e2150ff1ad0c28dd151e9f87589e7eaf508fbdd8e8e908", size = 113060, upload-time = "2025-07-29T07:42:40.585Z" }, - { url = "https://files.pythonhosted.org/packages/9f/b9/7ea61a34e90e50a79a9d87aa1c0b8139a7eaf4125782b34b7d7383472633/mmh3-5.2.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bdde97310d59604f2a9119322f61b31546748499a21b44f6715e8ced9308a6c5", size = 120781, upload-time = "2025-07-29T07:42:41.618Z" }, - { url = "https://files.pythonhosted.org/packages/0f/5b/ae1a717db98c7894a37aeedbd94b3f99e6472a836488f36b6849d003485b/mmh3-5.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc9c5f280438cf1c1a8f9abb87dc8ce9630a964120cfb5dd50d1e7ce79690c7a", size = 99174, upload-time = "2025-07-29T07:42:42.587Z" }, - { url = "https://files.pythonhosted.org/packages/e3/de/000cce1d799fceebb6d4487ae29175dd8e81b48e314cba7b4da90bcf55d7/mmh3-5.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c903e71fd8debb35ad2a4184c1316b3cb22f64ce517b4e6747f25b0a34e41266", size = 98734, upload-time = "2025-07-29T07:42:43.996Z" }, - { url = "https://files.pythonhosted.org/packages/79/19/0dc364391a792b72fbb22becfdeacc5add85cc043cd16986e82152141883/mmh3-5.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:eed4bba7ff8a0d37106ba931ab03bdd3915fbb025bcf4e1f0aa02bc8114960c5", size = 106493, upload-time = "2025-07-29T07:42:45.07Z" }, - { url = "https://files.pythonhosted.org/packages/3c/b1/bc8c28e4d6e807bbb051fefe78e1156d7f104b89948742ad310612ce240d/mmh3-5.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1fdb36b940e9261aff0b5177c5b74a36936b902f473180f6c15bde26143681a9", size = 110089, upload-time = "2025-07-29T07:42:46.122Z" }, - { url = "https://files.pythonhosted.org/packages/3b/a2/d20f3f5c95e9c511806686c70d0a15479cc3941c5f322061697af1c1ff70/mmh3-5.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7303aab41e97adcf010a09efd8f1403e719e59b7705d5e3cfed3dd7571589290", size = 97571, upload-time = "2025-07-29T07:42:47.18Z" }, - { url = "https://files.pythonhosted.org/packages/7b/23/665296fce4f33488deec39a750ffd245cfc07aafb0e3ef37835f91775d14/mmh3-5.2.0-cp313-cp313-win32.whl", hash = "sha256:03e08c6ebaf666ec1e3d6ea657a2d363bb01effd1a9acfe41f9197decaef0051", size = 40806, upload-time = "2025-07-29T07:42:48.166Z" }, - { url = "https://files.pythonhosted.org/packages/59/b0/92e7103f3b20646e255b699e2d0327ce53a3f250e44367a99dc8be0b7c7a/mmh3-5.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:7fddccd4113e7b736706e17a239a696332360cbaddf25ae75b57ba1acce65081", size = 41600, upload-time = "2025-07-29T07:42:49.371Z" }, - { url = "https://files.pythonhosted.org/packages/99/22/0b2bd679a84574647de538c5b07ccaa435dbccc37815067fe15b90fe8dad/mmh3-5.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:fa0c966ee727aad5406d516375593c5f058c766b21236ab8985693934bb5085b", size = 39349, upload-time = "2025-07-29T07:42:50.268Z" }, - { url = "https://files.pythonhosted.org/packages/f7/ca/a20db059a8a47048aaf550da14a145b56e9c7386fb8280d3ce2962dcebf7/mmh3-5.2.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:e5015f0bb6eb50008bed2d4b1ce0f2a294698a926111e4bb202c0987b4f89078", size = 39209, upload-time = "2025-07-29T07:42:51.559Z" }, - { url = "https://files.pythonhosted.org/packages/98/dd/e5094799d55c7482d814b979a0fd608027d0af1b274bfb4c3ea3e950bfd5/mmh3-5.2.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:e0f3ed828d709f5b82d8bfe14f8856120718ec4bd44a5b26102c3030a1e12501", size = 39843, upload-time = "2025-07-29T07:42:52.536Z" }, - { url = "https://files.pythonhosted.org/packages/f4/6b/7844d7f832c85400e7cc89a1348e4e1fdd38c5a38415bb5726bbb8fcdb6c/mmh3-5.2.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:f35727c5118aba95f0397e18a1a5b8405425581bfe53e821f0fb444cbdc2bc9b", size = 40648, upload-time = "2025-07-29T07:42:53.392Z" }, - { url = "https://files.pythonhosted.org/packages/1f/bf/71f791f48a21ff3190ba5225807cbe4f7223360e96862c376e6e3fb7efa7/mmh3-5.2.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bc244802ccab5220008cb712ca1508cb6a12f0eb64ad62997156410579a1770", size = 56164, upload-time = "2025-07-29T07:42:54.267Z" }, - { url = "https://files.pythonhosted.org/packages/70/1f/f87e3d34d83032b4f3f0f528c6d95a98290fcacf019da61343a49dccfd51/mmh3-5.2.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ff3d50dc3fe8a98059f99b445dfb62792b5d006c5e0b8f03c6de2813b8376110", size = 40692, upload-time = "2025-07-29T07:42:55.234Z" }, - { url = "https://files.pythonhosted.org/packages/a6/e2/db849eaed07117086f3452feca8c839d30d38b830ac59fe1ce65af8be5ad/mmh3-5.2.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:37a358cc881fe796e099c1db6ce07ff757f088827b4e8467ac52b7a7ffdca647", size = 40068, upload-time = "2025-07-29T07:42:56.158Z" }, - { url = "https://files.pythonhosted.org/packages/df/6b/209af927207af77425b044e32f77f49105a0b05d82ff88af6971d8da4e19/mmh3-5.2.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b9a87025121d1c448f24f27ff53a5fe7b6ef980574b4a4f11acaabe702420d63", size = 97367, upload-time = "2025-07-29T07:42:57.037Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e0/78adf4104c425606a9ce33fb351f790c76a6c2314969c4a517d1ffc92196/mmh3-5.2.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ba55d6ca32eeef8b2625e1e4bfc3b3db52bc63014bd7e5df8cc11bf2b036b12", size = 103306, upload-time = "2025-07-29T07:42:58.522Z" }, - { url = "https://files.pythonhosted.org/packages/a3/79/c2b89f91b962658b890104745b1b6c9ce38d50a889f000b469b91eeb1b9e/mmh3-5.2.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9ff37ba9f15637e424c2ab57a1a590c52897c845b768e4e0a4958084ec87f22", size = 106312, upload-time = "2025-07-29T07:42:59.552Z" }, - { url = "https://files.pythonhosted.org/packages/4b/14/659d4095528b1a209be90934778c5ffe312177d51e365ddcbca2cac2ec7c/mmh3-5.2.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a094319ec0db52a04af9fdc391b4d39a1bc72bc8424b47c4411afb05413a44b5", size = 113135, upload-time = "2025-07-29T07:43:00.745Z" }, - { url = "https://files.pythonhosted.org/packages/8d/6f/cd7734a779389a8a467b5c89a48ff476d6f2576e78216a37551a97e9e42a/mmh3-5.2.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c5584061fd3da584659b13587f26c6cad25a096246a481636d64375d0c1f6c07", size = 120775, upload-time = "2025-07-29T07:43:02.124Z" }, - { url = "https://files.pythonhosted.org/packages/1d/ca/8256e3b96944408940de3f9291d7e38a283b5761fe9614d4808fcf27bd62/mmh3-5.2.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ecbfc0437ddfdced5e7822d1ce4855c9c64f46819d0fdc4482c53f56c707b935", size = 99178, upload-time = "2025-07-29T07:43:03.182Z" }, - { url = "https://files.pythonhosted.org/packages/8a/32/39e2b3cf06b6e2eb042c984dab8680841ac2a0d3ca6e0bea30db1f27b565/mmh3-5.2.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:7b986d506a8e8ea345791897ba5d8ba0d9d8820cd4fc3e52dbe6de19388de2e7", size = 98738, upload-time = "2025-07-29T07:43:04.207Z" }, - { url = "https://files.pythonhosted.org/packages/61/d3/7bbc8e0e8cf65ebbe1b893ffa0467b7ecd1bd07c3bbf6c9db4308ada22ec/mmh3-5.2.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:38d899a156549da8ef6a9f1d6f7ef231228d29f8f69bce2ee12f5fba6d6fd7c5", size = 106510, upload-time = "2025-07-29T07:43:05.656Z" }, - { url = "https://files.pythonhosted.org/packages/10/99/b97e53724b52374e2f3859046f0eb2425192da356cb19784d64bc17bb1cf/mmh3-5.2.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:d86651fa45799530885ba4dab3d21144486ed15285e8784181a0ab37a4552384", size = 110053, upload-time = "2025-07-29T07:43:07.204Z" }, - { url = "https://files.pythonhosted.org/packages/ac/62/3688c7d975ed195155671df68788c83fed6f7909b6ec4951724c6860cb97/mmh3-5.2.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c463d7c1c4cfc9d751efeaadd936bbba07b5b0ed81a012b3a9f5a12f0872bd6e", size = 97546, upload-time = "2025-07-29T07:43:08.226Z" }, - { url = "https://files.pythonhosted.org/packages/ca/3b/c6153250f03f71a8b7634cded82939546cdfba02e32f124ff51d52c6f991/mmh3-5.2.0-cp314-cp314-win32.whl", hash = "sha256:bb4fe46bdc6104fbc28db7a6bacb115ee6368ff993366bbd8a2a7f0076e6f0c0", size = 41422, upload-time = "2025-07-29T07:43:09.216Z" }, - { url = "https://files.pythonhosted.org/packages/74/01/a27d98bab083a435c4c07e9d1d720d4c8a578bf4c270bae373760b1022be/mmh3-5.2.0-cp314-cp314-win_amd64.whl", hash = "sha256:7c7f0b342fd06044bedd0b6e72177ddc0076f54fd89ee239447f8b271d919d9b", size = 42135, upload-time = "2025-07-29T07:43:10.183Z" }, - { url = "https://files.pythonhosted.org/packages/cb/c9/dbba5507e95429b8b380e2ba091eff5c20a70a59560934dff0ad8392b8c8/mmh3-5.2.0-cp314-cp314-win_arm64.whl", hash = "sha256:3193752fc05ea72366c2b63ff24b9a190f422e32d75fdeae71087c08fff26115", size = 39879, upload-time = "2025-07-29T07:43:11.106Z" }, - { url = "https://files.pythonhosted.org/packages/b5/d1/c8c0ef839c17258b9de41b84f663574fabcf8ac2007b7416575e0f65ff6e/mmh3-5.2.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:69fc339d7202bea69ef9bd7c39bfdf9fdabc8e6822a01eba62fb43233c1b3932", size = 57696, upload-time = "2025-07-29T07:43:11.989Z" }, - { url = "https://files.pythonhosted.org/packages/2f/55/95e2b9ff201e89f9fe37036037ab61a6c941942b25cdb7b6a9df9b931993/mmh3-5.2.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:12da42c0a55c9d86ab566395324213c319c73ecb0c239fad4726324212b9441c", size = 41421, upload-time = "2025-07-29T07:43:13.269Z" }, - { url = "https://files.pythonhosted.org/packages/77/79/9be23ad0b7001a4b22752e7693be232428ecc0a35068a4ff5c2f14ef8b20/mmh3-5.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f7f9034c7cf05ddfaac8d7a2e63a3c97a840d4615d0a0e65ba8bdf6f8576e3be", size = 40853, upload-time = "2025-07-29T07:43:14.888Z" }, - { url = "https://files.pythonhosted.org/packages/ac/1b/96b32058eda1c1dee8264900c37c359a7325c1f11f5ff14fd2be8e24eff9/mmh3-5.2.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:11730eeb16dfcf9674fdea9bb6b8e6dd9b40813b7eb839bc35113649eef38aeb", size = 109694, upload-time = "2025-07-29T07:43:15.816Z" }, - { url = "https://files.pythonhosted.org/packages/8d/6f/a2ae44cd7dad697b6dea48390cbc977b1e5ca58fda09628cbcb2275af064/mmh3-5.2.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:932a6eec1d2e2c3c9e630d10f7128d80e70e2d47fe6b8c7ea5e1afbd98733e65", size = 117438, upload-time = "2025-07-29T07:43:16.865Z" }, - { url = "https://files.pythonhosted.org/packages/a0/08/bfb75451c83f05224a28afeaf3950c7b793c0b71440d571f8e819cfb149a/mmh3-5.2.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ca975c51c5028947bbcfc24966517aac06a01d6c921e30f7c5383c195f87991", size = 120409, upload-time = "2025-07-29T07:43:18.207Z" }, - { url = "https://files.pythonhosted.org/packages/9f/ea/8b118b69b2ff8df568f742387d1a159bc654a0f78741b31437dd047ea28e/mmh3-5.2.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5b0b58215befe0f0e120b828f7645e97719bbba9f23b69e268ed0ac7adde8645", size = 125909, upload-time = "2025-07-29T07:43:19.39Z" }, - { url = "https://files.pythonhosted.org/packages/3e/11/168cc0b6a30650032e351a3b89b8a47382da541993a03af91e1ba2501234/mmh3-5.2.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29c2b9ce61886809d0492a274a5a53047742dea0f703f9c4d5d223c3ea6377d3", size = 135331, upload-time = "2025-07-29T07:43:20.435Z" }, - { url = "https://files.pythonhosted.org/packages/31/05/e3a9849b1c18a7934c64e831492c99e67daebe84a8c2f2c39a7096a830e3/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:a367d4741ac0103f8198c82f429bccb9359f543ca542b06a51f4f0332e8de279", size = 110085, upload-time = "2025-07-29T07:43:21.92Z" }, - { url = "https://files.pythonhosted.org/packages/d9/d5/a96bcc306e3404601418b2a9a370baec92af84204528ba659fdfe34c242f/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:5a5dba98e514fb26241868f6eb90a7f7ca0e039aed779342965ce24ea32ba513", size = 111195, upload-time = "2025-07-29T07:43:23.066Z" }, - { url = "https://files.pythonhosted.org/packages/af/29/0fd49801fec5bff37198684e0849b58e0dab3a2a68382a357cfffb0fafc3/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:941603bfd75a46023807511c1ac2f1b0f39cccc393c15039969806063b27e6db", size = 116919, upload-time = "2025-07-29T07:43:24.178Z" }, - { url = "https://files.pythonhosted.org/packages/2d/04/4f3c32b0a2ed762edca45d8b46568fc3668e34f00fb1e0a3b5451ec1281c/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:132dd943451a7c7546978863d2f5a64977928410782e1a87d583cb60eb89e667", size = 123160, upload-time = "2025-07-29T07:43:25.26Z" }, - { url = "https://files.pythonhosted.org/packages/91/76/3d29eaa38821730633d6a240d36fa8ad2807e9dfd432c12e1a472ed211eb/mmh3-5.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f698733a8a494466432d611a8f0d1e026f5286dee051beea4b3c3146817e35d5", size = 110206, upload-time = "2025-07-29T07:43:26.699Z" }, - { url = "https://files.pythonhosted.org/packages/44/1c/ccf35892684d3a408202e296e56843743e0b4fb1629e59432ea88cdb3909/mmh3-5.2.0-cp314-cp314t-win32.whl", hash = "sha256:6d541038b3fc360ec538fc116de87462627944765a6750308118f8b509a8eec7", size = 41970, upload-time = "2025-07-29T07:43:27.666Z" }, - { url = "https://files.pythonhosted.org/packages/75/b2/b9e4f1e5adb5e21eb104588fcee2cd1eaa8308255173481427d5ecc4284e/mmh3-5.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:e912b19cf2378f2967d0c08e86ff4c6c360129887f678e27e4dde970d21b3f4d", size = 43063, upload-time = "2025-07-29T07:43:28.582Z" }, - { url = "https://files.pythonhosted.org/packages/6a/fc/0e61d9a4e29c8679356795a40e48f647b4aad58d71bfc969f0f8f56fb912/mmh3-5.2.0-cp314-cp314t-win_arm64.whl", hash = "sha256:e7884931fe5e788163e7b3c511614130c2c59feffdc21112290a194487efb2e9", size = 40455, upload-time = "2025-07-29T07:43:29.563Z" }, -] - [[package]] name = "model-hosting-container-standards" version = "0.1.12" @@ -4648,15 +4198,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, ] -[[package]] -name = "nest-asyncio2" -version = "1.7.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2d/eb/ecf8bbf9d22a4e8f7be1628336fe0202da7660790053aa28abeb6c15eb14/nest_asyncio2-1.7.1.tar.gz", hash = "sha256:a1fe5bbbd20894dcceb1842322d74992c5834d5ab692af2c4f59a9a4fcf75fe8", size = 13797, upload-time = "2025-11-20T20:46:07.085Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8c/48/c1f1ddcfd04bba60470235c2f83733ecff43ebe068dc7715aab60bc92ad8/nest_asyncio2-1.7.1-py3-none-any.whl", hash = "sha256:f83bc1744c3cfa7d47fd29431e5e168db6cb76eda1bb20108955c32f60d7eddf", size = 7504, upload-time = "2025-11-20T20:46:05.704Z" }, -] - [[package]] name = "networkx" version = "3.6.1" @@ -4692,21 +4233,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/df/93/a7b983643d1253bb223234b5b226e69de6cda02b76cdca7770f684b795f5/ninja-1.13.0-py3-none-win_arm64.whl", hash = "sha256:3c0b40b1f0bba764644385319028650087b4c1b18cdfa6f45cb39a3669b81aa9", size = 290806, upload-time = "2025-08-11T15:10:18.018Z" }, ] -[[package]] -name = "nltk" -version = "3.9.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "click" }, - { name = "joblib" }, - { name = "regex" }, - { name = "tqdm" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f9/76/3a5e4312c19a028770f86fd7c058cf9f4ec4321c6cf7526bab998a5b683c/nltk-3.9.2.tar.gz", hash = "sha256:0f409e9b069ca4177c1903c3e843eef90c7e92992fa4931ae607da6de49e1419", size = 2887629, upload-time = "2025-10-01T07:19:23.764Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/60/90/81ac364ef94209c100e12579629dc92bf7a709a84af32f8c551b02c07e94/nltk-3.9.2-py3-none-any.whl", hash = "sha256:1e209d2b3009110635ed9709a67a1a3e33a10f799490fa71cf4bec218c11c88a", size = 1513404, upload-time = "2025-10-01T07:19:21.648Z" }, -] - [[package]] name = "numba" version = "0.61.2" @@ -5078,6 +4604,17 @@ skypilot = [ { name = "semver" }, { name = "skypilot", extra = ["cudo", "do", "fluidstack", "gcp", "kubernetes", "lambda", "paperspace", "runpod"] }, ] +tinker = [ + { name = "fastapi" }, + { name = "huggingface-hub" }, + { name = "numpy" }, + { name = "pillow" }, + { name = "pydantic" }, + { name = "tinker" }, + { name = "torch" }, + { name = "transformers" }, + { name = "uvicorn" }, +] [package.dev-dependencies] dev = [ @@ -5095,12 +4632,6 @@ dev = [ { name = "ruff" }, { name = "ty" }, ] -tinker = [ - { name = "fastapi" }, - { name = "tinker" }, - { name = "tinker-cookbook" }, - { name = "uvicorn" }, -] [package.metadata] requires-dist = [ @@ -5108,8 +4639,10 @@ requires-dist = [ { name = "awscli", marker = "extra == 'backend'", specifier = ">=1.38.1" }, { name = "bitsandbytes", marker = "extra == 'backend'", specifier = ">=0.45.2" }, { name = "duckdb", marker = "extra == 'backend'", specifier = ">=1.0.0" }, + { name = "fastapi", marker = "extra == 'tinker'", specifier = ">=0.128.0" }, { name = "gql", marker = "extra == 'backend'", specifier = "<4" }, { name = "hf-xet", marker = "extra == 'backend'", specifier = ">=1.1.0" }, + { name = "huggingface-hub", marker = "extra == 'tinker'" }, { name = "langchain-core", marker = "extra == 'langgraph'", specifier = ">=0.3.51" }, { name = "langchain-openai", marker = "extra == 'langgraph'", specifier = ">=0.3.27" }, { name = "langgraph", marker = "extra == 'langgraph'", specifier = ">=0.6.2" }, @@ -5118,10 +4651,13 @@ requires-dist = [ { name = "nbclient", marker = "extra == 'backend'", specifier = ">=0.10.1" }, { name = "nbmake", marker = "extra == 'backend'", specifier = ">=1.5.5" }, { name = "nest-asyncio", specifier = ">=1.6.0" }, + { name = "numpy", marker = "extra == 'tinker'" }, { name = "openai", specifier = ">=2.14.0" }, { name = "peft", marker = "extra == 'backend'", specifier = ">=0.14.0" }, + { name = "pillow", marker = "extra == 'tinker'" }, { name = "polars", specifier = ">=1.26.0" }, { name = "pyarrow", marker = "extra == 'backend'", specifier = ">=15.0.0" }, + { name = "pydantic", marker = "extra == 'tinker'", specifier = ">=2.12.5" }, { name = "pytest", marker = "extra == 'backend'", specifier = ">=8.4.1" }, { name = "seaborn", marker = "extra == 'plotting'", specifier = ">=0.13.2" }, { name = "semver", marker = "extra == 'skypilot'", specifier = ">=3.0.4" }, @@ -5129,18 +4665,22 @@ requires-dist = [ { name = "setuptools", marker = "extra == 'backend'", specifier = ">=78.1.0" }, { name = "skypilot", extras = ["cudo", "do", "fluidstack", "gcp", "lambda", "kubernetes", "paperspace", "runpod"], marker = "extra == 'skypilot'", specifier = "==0.10.5" }, { name = "tblib", specifier = ">=3.0.0" }, + { name = "tinker", marker = "extra == 'tinker'", specifier = ">=0.8.1" }, { name = "torch", marker = "extra == 'backend'", specifier = ">=2.8.0" }, + { name = "torch", marker = "extra == 'tinker'", specifier = ">=2.8.0" }, { name = "torchao", marker = "extra == 'backend'", specifier = "==0.14.1" }, { name = "transformers", marker = "extra == 'backend'", specifier = ">=4.55.2,<=4.57.3" }, + { name = "transformers", marker = "extra == 'tinker'", specifier = ">=4.55.2,<=4.57.3" }, { name = "trl", marker = "extra == 'backend'", specifier = "==0.20.0" }, { name = "typer", specifier = ">=0.15.2" }, { name = "unsloth", marker = "extra == 'backend'", specifier = "==2025.12.9" }, { name = "unsloth-zoo", marker = "extra == 'backend'", specifier = "==2025.12.7" }, + { name = "uvicorn", marker = "extra == 'tinker'", specifier = ">=0.35.0" }, { name = "vllm", marker = "extra == 'backend'", specifier = "==0.13.0" }, { name = "wandb", marker = "extra == 'backend'", specifier = "==0.23.1" }, { name = "weave", specifier = ">=0.52.23" }, ] -provides-extras = ["plotting", "backend", "skypilot", "langgraph"] +provides-extras = ["plotting", "backend", "skypilot", "langgraph", "tinker"] [package.metadata.requires-dev] dev = [ @@ -5158,12 +4698,6 @@ dev = [ { name = "ruff", specifier = ">=0.12.1" }, { name = "ty", specifier = ">=0.0.14" }, ] -tinker = [ - { name = "fastapi", specifier = ">=0.128.0" }, - { name = "tinker", specifier = ">=0.8.1" }, - { name = "tinker-cookbook", specifier = ">=0.1.0" }, - { name = "uvicorn", specifier = ">=0.35.0" }, -] [[package]] name = "orjson" @@ -5417,15 +4951,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3b/a4/ab6b7589382ca3df236e03faa71deac88cae040af60c071a78d254a62172/passlib-1.7.4-py2.py3-none-any.whl", hash = "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1", size = 525554, upload-time = "2020-10-08T19:00:49.856Z" }, ] -[[package]] -name = "pathlib-abc" -version = "0.5.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d6/cb/448649d7f25d228bf0be3a04590ab7afa77f15e056f8fa976ed05ec9a78f/pathlib_abc-0.5.2.tar.gz", hash = "sha256:fcd56f147234645e2c59c7ae22808b34c364bb231f685ddd9f96885aed78a94c", size = 33342, upload-time = "2025-10-10T18:37:20.524Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/29/c028a0731e202035f0e2e0bfbf1a3e46ad6c628cbb17f6f1cc9eea5d9ff1/pathlib_abc-0.5.2-py3-none-any.whl", hash = "sha256:4c9d94cf1b23af417ce7c0417b43333b06a106c01000b286c99de230d95eefbb", size = 19070, upload-time = "2025-10-10T18:37:19.437Z" }, -] - [[package]] name = "pathspec" version = "0.12.1" @@ -5647,15 +5172,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] -[[package]] -name = "ply" -version = "3.11" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e5/69/882ee5c9d017149285cab114ebeab373308ef0f874fcdac9beb90e0ac4da/ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3", size = 159130, upload-time = "2018-02-15T19:01:31.097Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce", size = 49567, upload-time = "2018-02-15T19:01:27.172Z" }, -] - [[package]] name = "polars" version = "1.36.1" @@ -6345,36 +5861,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, ] -[[package]] -name = "pycryptodomex" -version = "3.23.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c9/85/e24bf90972a30b0fcd16c73009add1d7d7cd9140c2498a68252028899e41/pycryptodomex-3.23.0.tar.gz", hash = "sha256:71909758f010c82bc99b0abf4ea12012c98962fbf0583c2164f8b84533c2e4da", size = 4922157, upload-time = "2025-05-17T17:23:41.434Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/00/10edb04777069a42490a38c137099d4b17ba6e36a4e6e28bdc7470e9e853/pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7b37e08e3871efe2187bc1fd9320cc81d87caf19816c648f24443483005ff886", size = 2498764, upload-time = "2025-05-17T17:22:21.453Z" }, - { url = "https://files.pythonhosted.org/packages/6b/3f/2872a9c2d3a27eac094f9ceaa5a8a483b774ae69018040ea3240d5b11154/pycryptodomex-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:91979028227543010d7b2ba2471cf1d1e398b3f183cb105ac584df0c36dac28d", size = 1643012, upload-time = "2025-05-17T17:22:23.702Z" }, - { url = "https://files.pythonhosted.org/packages/70/af/774c2e2b4f6570fbf6a4972161adbb183aeeaa1863bde31e8706f123bf92/pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8962204c47464d5c1c4038abeadd4514a133b28748bcd9fa5b6d62e3cec6fa", size = 2187643, upload-time = "2025-05-17T17:22:26.37Z" }, - { url = "https://files.pythonhosted.org/packages/de/a3/71065b24cb889d537954cedc3ae5466af00a2cabcff8e29b73be047e9a19/pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a33986a0066860f7fcf7c7bd2bc804fa90e434183645595ae7b33d01f3c91ed8", size = 2273762, upload-time = "2025-05-17T17:22:28.313Z" }, - { url = "https://files.pythonhosted.org/packages/c9/0b/ff6f43b7fbef4d302c8b981fe58467b8871902cdc3eb28896b52421422cc/pycryptodomex-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7947ab8d589e3178da3d7cdeabe14f841b391e17046954f2fbcd941705762b5", size = 2313012, upload-time = "2025-05-17T17:22:30.57Z" }, - { url = "https://files.pythonhosted.org/packages/02/de/9d4772c0506ab6da10b41159493657105d3f8bb5c53615d19452afc6b315/pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c25e30a20e1b426e1f0fa00131c516f16e474204eee1139d1603e132acffc314", size = 2186856, upload-time = "2025-05-17T17:22:32.819Z" }, - { url = "https://files.pythonhosted.org/packages/28/ad/8b30efcd6341707a234e5eba5493700a17852ca1ac7a75daa7945fcf6427/pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:da4fa650cef02db88c2b98acc5434461e027dce0ae8c22dd5a69013eaf510006", size = 2347523, upload-time = "2025-05-17T17:22:35.386Z" }, - { url = "https://files.pythonhosted.org/packages/0f/02/16868e9f655b7670dbb0ac4f2844145cbc42251f916fc35c414ad2359849/pycryptodomex-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:58b851b9effd0d072d4ca2e4542bf2a4abcf13c82a29fd2c93ce27ee2a2e9462", size = 2272825, upload-time = "2025-05-17T17:22:37.632Z" }, - { url = "https://files.pythonhosted.org/packages/ca/18/4ca89ac737230b52ac8ffaca42f9c6f1fd07c81a6cd821e91af79db60632/pycryptodomex-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:a9d446e844f08299236780f2efa9898c818fe7e02f17263866b8550c7d5fb328", size = 1772078, upload-time = "2025-05-17T17:22:40Z" }, - { url = "https://files.pythonhosted.org/packages/73/34/13e01c322db027682e00986873eca803f11c56ade9ba5bbf3225841ea2d4/pycryptodomex-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bc65bdd9fc8de7a35a74cab1c898cab391a4add33a8fe740bda00f5976ca4708", size = 1803656, upload-time = "2025-05-17T17:22:42.139Z" }, - { url = "https://files.pythonhosted.org/packages/54/68/9504c8796b1805d58f4425002bcca20f12880e6fa4dc2fc9a668705c7a08/pycryptodomex-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c885da45e70139464f082018ac527fdaad26f1657a99ee13eecdce0f0ca24ab4", size = 1707172, upload-time = "2025-05-17T17:22:44.704Z" }, - { url = "https://files.pythonhosted.org/packages/dd/9c/1a8f35daa39784ed8adf93a694e7e5dc15c23c741bbda06e1d45f8979e9e/pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:06698f957fe1ab229a99ba2defeeae1c09af185baa909a31a5d1f9d42b1aaed6", size = 2499240, upload-time = "2025-05-17T17:22:46.953Z" }, - { url = "https://files.pythonhosted.org/packages/7a/62/f5221a191a97157d240cf6643747558759126c76ee92f29a3f4aee3197a5/pycryptodomex-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2c2537863eccef2d41061e82a881dcabb04944c5c06c5aa7110b577cc487545", size = 1644042, upload-time = "2025-05-17T17:22:49.098Z" }, - { url = "https://files.pythonhosted.org/packages/8c/fd/5a054543c8988d4ed7b612721d7e78a4b9bf36bc3c5ad45ef45c22d0060e/pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43c446e2ba8df8889e0e16f02211c25b4934898384c1ec1ec04d7889c0333587", size = 2186227, upload-time = "2025-05-17T17:22:51.139Z" }, - { url = "https://files.pythonhosted.org/packages/c8/a9/8862616a85cf450d2822dbd4fff1fcaba90877907a6ff5bc2672cafe42f8/pycryptodomex-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f489c4765093fb60e2edafdf223397bc716491b2b69fe74367b70d6999257a5c", size = 2272578, upload-time = "2025-05-17T17:22:53.676Z" }, - { url = "https://files.pythonhosted.org/packages/46/9f/bda9c49a7c1842820de674ab36c79f4fbeeee03f8ff0e4f3546c3889076b/pycryptodomex-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bdc69d0d3d989a1029df0eed67cc5e8e5d968f3724f4519bd03e0ec68df7543c", size = 2312166, upload-time = "2025-05-17T17:22:56.585Z" }, - { url = "https://files.pythonhosted.org/packages/03/cc/870b9bf8ca92866ca0186534801cf8d20554ad2a76ca959538041b7a7cf4/pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6bbcb1dd0f646484939e142462d9e532482bc74475cecf9c4903d4e1cd21f003", size = 2185467, upload-time = "2025-05-17T17:22:59.237Z" }, - { url = "https://files.pythonhosted.org/packages/96/e3/ce9348236d8e669fea5dd82a90e86be48b9c341210f44e25443162aba187/pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:8a4fcd42ccb04c31268d1efeecfccfd1249612b4de6374205376b8f280321744", size = 2346104, upload-time = "2025-05-17T17:23:02.112Z" }, - { url = "https://files.pythonhosted.org/packages/a5/e9/e869bcee87beb89040263c416a8a50204f7f7a83ac11897646c9e71e0daf/pycryptodomex-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:55ccbe27f049743a4caf4f4221b166560d3438d0b1e5ab929e07ae1702a4d6fd", size = 2271038, upload-time = "2025-05-17T17:23:04.872Z" }, - { url = "https://files.pythonhosted.org/packages/8d/67/09ee8500dd22614af5fbaa51a4aee6e342b5fa8aecf0a6cb9cbf52fa6d45/pycryptodomex-3.23.0-cp37-abi3-win32.whl", hash = "sha256:189afbc87f0b9f158386bf051f720e20fa6145975f1e76369303d0f31d1a8d7c", size = 1771969, upload-time = "2025-05-17T17:23:07.115Z" }, - { url = "https://files.pythonhosted.org/packages/69/96/11f36f71a865dd6df03716d33bd07a67e9d20f6b8d39820470b766af323c/pycryptodomex-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:52e5ca58c3a0b0bd5e100a9fbc8015059b05cffc6c66ce9d98b4b45e023443b9", size = 1803124, upload-time = "2025-05-17T17:23:09.267Z" }, - { url = "https://files.pythonhosted.org/packages/f9/93/45c1cdcbeb182ccd2e144c693eaa097763b08b38cded279f0053ed53c553/pycryptodomex-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:02d87b80778c171445d67e23d1caef279bf4b25c3597050ccd2e13970b57fd51", size = 1707161, upload-time = "2025-05-17T17:23:11.414Z" }, -] - [[package]] name = "pydantic" version = "2.12.5" @@ -6563,12 +6049,6 @@ crypto = [ { name = "cryptography" }, ] -[[package]] -name = "pylatexenc" -version = "2.10" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5d/ab/34ec41718af73c00119d0351b7a2531d2ebddb51833a36448fc7b862be60/pylatexenc-2.10.tar.gz", hash = "sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3", size = 162597, upload-time = "2021-04-06T07:56:07.854Z" } - [[package]] name = "pynacl" version = "1.6.1" @@ -7422,20 +6902,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/5c/ce583cfbba69f4f989658c7e984b1175d4e1f5f19132d9554a5ff7031647/runpod-1.8.1-py3-none-any.whl", hash = "sha256:2cc36ce80c02b7b6f54216154345e5064bfa510718acfc684cd9f56ac506d518", size = 157526, upload-time = "2025-11-19T22:54:06.968Z" }, ] -[[package]] -name = "s3fs" -version = "2025.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiobotocore" }, - { name = "aiohttp" }, - { name = "fsspec" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ee/f3/8e6371436666aedfd16e63ff68a51b8a8fcf5f33a0eee33c35e0b2476b27/s3fs-2025.9.0.tar.gz", hash = "sha256:6d44257ef19ea64968d0720744c4af7a063a05f5c1be0e17ce943bef7302bc30", size = 77823, upload-time = "2025-09-02T19:18:21.781Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/37/b3/ca7d58ca25b1bb6df57e6cbd0ca8d6437a4b9ce1cd35adc8a6b2949c113b/s3fs-2025.9.0-py3-none-any.whl", hash = "sha256:c33c93d48f66ed440dbaf6600be149cdf8beae4b6f8f0201a209c5801aeb7e30", size = 30319, upload-time = "2025-09-02T19:18:20.563Z" }, -] - [[package]] name = "s3transfer" version = "0.14.0" @@ -7735,15 +7201,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, ] -[[package]] -name = "shortuuid" -version = "1.0.13" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8c/e2/bcf761f3bff95856203f9559baf3741c416071dd200c0fc19fad7f078f86/shortuuid-1.0.13.tar.gz", hash = "sha256:3bb9cf07f606260584b1df46399c0b87dd84773e7b25912b7e391e30797c5e72", size = 9662, upload-time = "2024-03-11T20:11:06.879Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/44/21d6bf170bf40b41396480d8d49ad640bca3f2b02139cd52aa1e272830a5/shortuuid-1.0.13-py3-none-any.whl", hash = "sha256:a482a497300b49b4953e15108a7913244e1bb0d41f9d332f5e9925dba33a3c5a", size = 10529, upload-time = "2024-03-11T20:11:04.807Z" }, -] - [[package]] name = "simpleeval" version = "1.0.3" @@ -7956,15 +7413,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/32/46/9cb0e58b2deb7f82b84065f37f3bffeb12413f947f9388e4cac22c4621ce/sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0", size = 29575, upload-time = "2021-05-16T22:03:41.177Z" }, ] -[[package]] -name = "soupsieve" -version = "2.8.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7b/ae/2d9c981590ed9999a0d91755b47fc74f74de286b0f5cee14c9269041e6c4/soupsieve-2.8.3.tar.gz", hash = "sha256:3267f1eeea4251fb42728b6dfb746edc9acaffc4a45b27e19450b676586e8349", size = 118627, upload-time = "2026-01-20T04:27:02.457Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/46/2c/1462b1d0a634697ae9e55b3cecdcb64788e8b7d63f54d923fcd0bb140aed/soupsieve-2.8.3-py3-none-any.whl", hash = "sha256:ed64f2ba4eebeab06cc4962affce381647455978ffc1e36bb79a545b91f45a95", size = 37016, upload-time = "2026-01-20T04:27:01.012Z" }, -] - [[package]] name = "sqlalchemy" version = "2.0.45" @@ -8117,50 +7565,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, ] -[[package]] -name = "termcolor" -version = "3.3.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/46/79/cf31d7a93a8fdc6aa0fbb665be84426a8c5a557d9240b6239e9e11e35fc5/termcolor-3.3.0.tar.gz", hash = "sha256:348871ca648ec6a9a983a13ab626c0acce02f515b9e1983332b17af7979521c5", size = 14434, upload-time = "2025-12-29T12:55:21.882Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" }, -] - -[[package]] -name = "textarena" -version = "0.7.4" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "chess" }, - { name = "nltk" }, - { name = "openai" }, - { name = "python-dotenv" }, - { name = "requests" }, - { name = "rich" }, - { name = "websockets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ba/04/4a3ca42093d0be2a9c377ae3335a6c6baac1d278ae932562ec69f339d172/textarena-0.7.4.tar.gz", hash = "sha256:28bb9170d7718f2ae05e4515bea82262422731e563fc7318a9e7983de0cadd4f", size = 954969, upload-time = "2025-10-16T14:41:55.981Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/26/b4/9a9ba65154aff853c75b3d7324319d168ad9c69c6097f4aa3c16da7d9ef3/textarena-0.7.4-py3-none-any.whl", hash = "sha256:684784e78278e518066f67557ee93b47c238d16cbbd15d3abdaa3147562d3024", size = 1073570, upload-time = "2025-10-16T14:41:53.965Z" }, -] - -[[package]] -name = "textual" -version = "7.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "markdown-it-py", extra = ["linkify"] }, - { name = "mdit-py-plugins" }, - { name = "platformdirs" }, - { name = "pygments" }, - { name = "rich" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/6f/ee/620c887bfad9d6eba062dfa3b6b0e735e0259102e2667b19f21625ef598d/textual-7.3.0.tar.gz", hash = "sha256:3169e8ba5518a979b0771e60be380ab1a6c344f30a2126e360e6f38d009a3de4", size = 1590692, upload-time = "2026-01-15T16:32:02.342Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c3/1f/abeb4e5cb36b99dd37db72beb2a74d58598ccb35aaadf14624ee967d4a6b/textual-7.3.0-py3-none-any.whl", hash = "sha256:db235cecf969c87fe5a9c04d83595f506affc9db81f3a53ab849534d726d330a", size = 716374, upload-time = "2026-01-15T16:31:58.233Z" }, -] - [[package]] name = "tiktoken" version = "0.12.0" @@ -8236,34 +7640,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e7/00/0282156cf66331e3f2dc0f8cb7020886fdbe6843771d3afac810c94f2638/tinker-0.9.0-py3-none-any.whl", hash = "sha256:e7c4a476a3c68799654021807cd9e1a4b3954f664b30f60fe613caeb774d7f94", size = 168536, upload-time = "2026-01-26T22:33:57.478Z" }, ] -[[package]] -name = "tinker-cookbook" -version = "0.1.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "anyio" }, - { name = "blobfile" }, - { name = "chz" }, - { name = "cloudpickle" }, - { name = "datasets" }, - { name = "inspect-ai" }, - { name = "math-verify" }, - { name = "numpy" }, - { name = "pylatexenc" }, - { name = "rich" }, - { name = "scipy" }, - { name = "sympy" }, - { name = "termcolor" }, - { name = "textarena" }, - { name = "tinker" }, - { name = "torch" }, - { name = "transformers" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/3b/ca/99cf38c9e2b5abaadeeb522d1018aa37de76920bc3359eab07cf614a68e7/tinker_cookbook-0.1.0.tar.gz", hash = "sha256:0e60f934e73b7de64b6f54b0dcaec0d7fa4f0025525ddb5dfb47e1605fd3979f", size = 668978, upload-time = "2025-12-04T20:59:11.346Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2b/e2/71220ef68b2c7ddec23701ca3ff9312ec4b9f6e5603b5dfc5c1b7ab53814/tinker_cookbook-0.1.0-py3-none-any.whl", hash = "sha256:47b3f71e831758452be7950a132454314cdf3b688eba262403dc84365cf9b417", size = 379260, upload-time = "2025-12-04T20:59:09.421Z" }, -] - [[package]] name = "tokenizers" version = "0.22.1" @@ -8724,15 +8100,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" }, ] -[[package]] -name = "uc-micro-py" -version = "1.0.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" }, -] - [[package]] name = "ujson" version = "5.11.0" @@ -8802,19 +8169,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/5b/8c5e33228f7f83f05719964db59f3f9f276d272dc43752fa3bbf0df53e7b/ujson-5.11.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:416389ec19ef5f2013592f791486bef712ebce0cd59299bf9df1ba40bb2f6e04", size = 43835, upload-time = "2025-08-20T11:56:55.237Z" }, ] -[[package]] -name = "universal-pathlib" -version = "0.3.8" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "fsspec" }, - { name = "pathlib-abc" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/6e/ec/764b0d4593c6a8f5f66b347a19b5db9486dd0024b5e3339d468064a90c76/universal_pathlib-0.3.8.tar.gz", hash = "sha256:ead2b65bca3df6e11c3b7cb36fc9846340bc3c2db4ef57131550260422b0a3e8", size = 258837, upload-time = "2026-01-11T22:13:53.328Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/86/2c/fc9416619a418e94576aef84ef263906a24f76a21a1c3e96ddae25c82df9/universal_pathlib-0.3.8-py3-none-any.whl", hash = "sha256:dac4fd9a3df918d85bb6da678e794b5dfa9ecdb5ff74675b497553dbe50134b8", size = 82608, upload-time = "2026-01-11T22:13:51.313Z" }, -] - [[package]] name = "unsloth" version = "2025.12.9" @@ -9381,65 +8735,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" }, ] -[[package]] -name = "wrapt" -version = "1.17.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/8f/aeb76c5b46e273670962298c23e7ddde79916cb74db802131d49a85e4b7d/wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0", size = 55547, upload-time = "2025-08-12T05:53:21.714Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/52/db/00e2a219213856074a213503fdac0511203dceefff26e1daa15250cc01a0/wrapt-1.17.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:273a736c4645e63ac582c60a56b0acb529ef07f78e08dc6bfadf6a46b19c0da7", size = 53482, upload-time = "2025-08-12T05:51:45.79Z" }, - { url = "https://files.pythonhosted.org/packages/5e/30/ca3c4a5eba478408572096fe9ce36e6e915994dd26a4e9e98b4f729c06d9/wrapt-1.17.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5531d911795e3f935a9c23eb1c8c03c211661a5060aab167065896bbf62a5f85", size = 38674, upload-time = "2025-08-12T05:51:34.629Z" }, - { url = "https://files.pythonhosted.org/packages/31/25/3e8cc2c46b5329c5957cec959cb76a10718e1a513309c31399a4dad07eb3/wrapt-1.17.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0610b46293c59a3adbae3dee552b648b984176f8562ee0dba099a56cfbe4df1f", size = 38959, upload-time = "2025-08-12T05:51:56.074Z" }, - { url = "https://files.pythonhosted.org/packages/5d/8f/a32a99fc03e4b37e31b57cb9cefc65050ea08147a8ce12f288616b05ef54/wrapt-1.17.3-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b32888aad8b6e68f83a8fdccbf3165f5469702a7544472bdf41f582970ed3311", size = 82376, upload-time = "2025-08-12T05:52:32.134Z" }, - { url = "https://files.pythonhosted.org/packages/31/57/4930cb8d9d70d59c27ee1332a318c20291749b4fba31f113c2f8ac49a72e/wrapt-1.17.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8cccf4f81371f257440c88faed6b74f1053eef90807b77e31ca057b2db74edb1", size = 83604, upload-time = "2025-08-12T05:52:11.663Z" }, - { url = "https://files.pythonhosted.org/packages/a8/f3/1afd48de81d63dd66e01b263a6fbb86e1b5053b419b9b33d13e1f6d0f7d0/wrapt-1.17.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8a210b158a34164de8bb68b0e7780041a903d7b00c87e906fb69928bf7890d5", size = 82782, upload-time = "2025-08-12T05:52:12.626Z" }, - { url = "https://files.pythonhosted.org/packages/1e/d7/4ad5327612173b144998232f98a85bb24b60c352afb73bc48e3e0d2bdc4e/wrapt-1.17.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:79573c24a46ce11aab457b472efd8d125e5a51da2d1d24387666cd85f54c05b2", size = 82076, upload-time = "2025-08-12T05:52:33.168Z" }, - { url = "https://files.pythonhosted.org/packages/bb/59/e0adfc831674a65694f18ea6dc821f9fcb9ec82c2ce7e3d73a88ba2e8718/wrapt-1.17.3-cp311-cp311-win32.whl", hash = "sha256:c31eebe420a9a5d2887b13000b043ff6ca27c452a9a22fa71f35f118e8d4bf89", size = 36457, upload-time = "2025-08-12T05:53:03.936Z" }, - { url = "https://files.pythonhosted.org/packages/83/88/16b7231ba49861b6f75fc309b11012ede4d6b0a9c90969d9e0db8d991aeb/wrapt-1.17.3-cp311-cp311-win_amd64.whl", hash = "sha256:0b1831115c97f0663cb77aa27d381237e73ad4f721391a9bfb2fe8bc25fa6e77", size = 38745, upload-time = "2025-08-12T05:53:02.885Z" }, - { url = "https://files.pythonhosted.org/packages/9a/1e/c4d4f3398ec073012c51d1c8d87f715f56765444e1a4b11e5180577b7e6e/wrapt-1.17.3-cp311-cp311-win_arm64.whl", hash = "sha256:5a7b3c1ee8265eb4c8f1b7d29943f195c00673f5ab60c192eba2d4a7eae5f46a", size = 36806, upload-time = "2025-08-12T05:52:53.368Z" }, - { url = "https://files.pythonhosted.org/packages/9f/41/cad1aba93e752f1f9268c77270da3c469883d56e2798e7df6240dcb2287b/wrapt-1.17.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0", size = 53998, upload-time = "2025-08-12T05:51:47.138Z" }, - { url = "https://files.pythonhosted.org/packages/60/f8/096a7cc13097a1869fe44efe68dace40d2a16ecb853141394047f0780b96/wrapt-1.17.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba", size = 39020, upload-time = "2025-08-12T05:51:35.906Z" }, - { url = "https://files.pythonhosted.org/packages/33/df/bdf864b8997aab4febb96a9ae5c124f700a5abd9b5e13d2a3214ec4be705/wrapt-1.17.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd", size = 39098, upload-time = "2025-08-12T05:51:57.474Z" }, - { url = "https://files.pythonhosted.org/packages/9f/81/5d931d78d0eb732b95dc3ddaeeb71c8bb572fb01356e9133916cd729ecdd/wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828", size = 88036, upload-time = "2025-08-12T05:52:34.784Z" }, - { url = "https://files.pythonhosted.org/packages/ca/38/2e1785df03b3d72d34fc6252d91d9d12dc27a5c89caef3335a1bbb8908ca/wrapt-1.17.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9", size = 88156, upload-time = "2025-08-12T05:52:13.599Z" }, - { url = "https://files.pythonhosted.org/packages/b3/8b/48cdb60fe0603e34e05cffda0b2a4adab81fd43718e11111a4b0100fd7c1/wrapt-1.17.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396", size = 87102, upload-time = "2025-08-12T05:52:14.56Z" }, - { url = "https://files.pythonhosted.org/packages/3c/51/d81abca783b58f40a154f1b2c56db1d2d9e0d04fa2d4224e357529f57a57/wrapt-1.17.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc", size = 87732, upload-time = "2025-08-12T05:52:36.165Z" }, - { url = "https://files.pythonhosted.org/packages/9e/b1/43b286ca1392a006d5336412d41663eeef1ad57485f3e52c767376ba7e5a/wrapt-1.17.3-cp312-cp312-win32.whl", hash = "sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe", size = 36705, upload-time = "2025-08-12T05:53:07.123Z" }, - { url = "https://files.pythonhosted.org/packages/28/de/49493f962bd3c586ab4b88066e967aa2e0703d6ef2c43aa28cb83bf7b507/wrapt-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c", size = 38877, upload-time = "2025-08-12T05:53:05.436Z" }, - { url = "https://files.pythonhosted.org/packages/f1/48/0f7102fe9cb1e8a5a77f80d4f0956d62d97034bbe88d33e94699f99d181d/wrapt-1.17.3-cp312-cp312-win_arm64.whl", hash = "sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6", size = 36885, upload-time = "2025-08-12T05:52:54.367Z" }, - { url = "https://files.pythonhosted.org/packages/fc/f6/759ece88472157acb55fc195e5b116e06730f1b651b5b314c66291729193/wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0", size = 54003, upload-time = "2025-08-12T05:51:48.627Z" }, - { url = "https://files.pythonhosted.org/packages/4f/a9/49940b9dc6d47027dc850c116d79b4155f15c08547d04db0f07121499347/wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77", size = 39025, upload-time = "2025-08-12T05:51:37.156Z" }, - { url = "https://files.pythonhosted.org/packages/45/35/6a08de0f2c96dcdd7fe464d7420ddb9a7655a6561150e5fc4da9356aeaab/wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7", size = 39108, upload-time = "2025-08-12T05:51:58.425Z" }, - { url = "https://files.pythonhosted.org/packages/0c/37/6faf15cfa41bf1f3dba80cd3f5ccc6622dfccb660ab26ed79f0178c7497f/wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277", size = 88072, upload-time = "2025-08-12T05:52:37.53Z" }, - { url = "https://files.pythonhosted.org/packages/78/f2/efe19ada4a38e4e15b6dff39c3e3f3f73f5decf901f66e6f72fe79623a06/wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d", size = 88214, upload-time = "2025-08-12T05:52:15.886Z" }, - { url = "https://files.pythonhosted.org/packages/40/90/ca86701e9de1622b16e09689fc24b76f69b06bb0150990f6f4e8b0eeb576/wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa", size = 87105, upload-time = "2025-08-12T05:52:17.914Z" }, - { url = "https://files.pythonhosted.org/packages/fd/e0/d10bd257c9a3e15cbf5523025252cc14d77468e8ed644aafb2d6f54cb95d/wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050", size = 87766, upload-time = "2025-08-12T05:52:39.243Z" }, - { url = "https://files.pythonhosted.org/packages/e8/cf/7d848740203c7b4b27eb55dbfede11aca974a51c3d894f6cc4b865f42f58/wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8", size = 36711, upload-time = "2025-08-12T05:53:10.074Z" }, - { url = "https://files.pythonhosted.org/packages/57/54/35a84d0a4d23ea675994104e667ceff49227ce473ba6a59ba2c84f250b74/wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb", size = 38885, upload-time = "2025-08-12T05:53:08.695Z" }, - { url = "https://files.pythonhosted.org/packages/01/77/66e54407c59d7b02a3c4e0af3783168fff8e5d61def52cda8728439d86bc/wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16", size = 36896, upload-time = "2025-08-12T05:52:55.34Z" }, - { url = "https://files.pythonhosted.org/packages/02/a2/cd864b2a14f20d14f4c496fab97802001560f9f41554eef6df201cd7f76c/wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39", size = 54132, upload-time = "2025-08-12T05:51:49.864Z" }, - { url = "https://files.pythonhosted.org/packages/d5/46/d011725b0c89e853dc44cceb738a307cde5d240d023d6d40a82d1b4e1182/wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235", size = 39091, upload-time = "2025-08-12T05:51:38.935Z" }, - { url = "https://files.pythonhosted.org/packages/2e/9e/3ad852d77c35aae7ddebdbc3b6d35ec8013af7d7dddad0ad911f3d891dae/wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c", size = 39172, upload-time = "2025-08-12T05:51:59.365Z" }, - { url = "https://files.pythonhosted.org/packages/c3/f7/c983d2762bcce2326c317c26a6a1e7016f7eb039c27cdf5c4e30f4160f31/wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b", size = 87163, upload-time = "2025-08-12T05:52:40.965Z" }, - { url = "https://files.pythonhosted.org/packages/e4/0f/f673f75d489c7f22d17fe0193e84b41540d962f75fce579cf6873167c29b/wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa", size = 87963, upload-time = "2025-08-12T05:52:20.326Z" }, - { url = "https://files.pythonhosted.org/packages/df/61/515ad6caca68995da2fac7a6af97faab8f78ebe3bf4f761e1b77efbc47b5/wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7", size = 86945, upload-time = "2025-08-12T05:52:21.581Z" }, - { url = "https://files.pythonhosted.org/packages/d3/bd/4e70162ce398462a467bc09e768bee112f1412e563620adc353de9055d33/wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4", size = 86857, upload-time = "2025-08-12T05:52:43.043Z" }, - { url = "https://files.pythonhosted.org/packages/2b/b8/da8560695e9284810b8d3df8a19396a6e40e7518059584a1a394a2b35e0a/wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10", size = 37178, upload-time = "2025-08-12T05:53:12.605Z" }, - { url = "https://files.pythonhosted.org/packages/db/c8/b71eeb192c440d67a5a0449aaee2310a1a1e8eca41676046f99ed2487e9f/wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6", size = 39310, upload-time = "2025-08-12T05:53:11.106Z" }, - { url = "https://files.pythonhosted.org/packages/45/20/2cda20fd4865fa40f86f6c46ed37a2a8356a7a2fde0773269311f2af56c7/wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58", size = 37266, upload-time = "2025-08-12T05:52:56.531Z" }, - { url = "https://files.pythonhosted.org/packages/77/ed/dd5cf21aec36c80443c6f900449260b80e2a65cf963668eaef3b9accce36/wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a", size = 56544, upload-time = "2025-08-12T05:51:51.109Z" }, - { url = "https://files.pythonhosted.org/packages/8d/96/450c651cc753877ad100c7949ab4d2e2ecc4d97157e00fa8f45df682456a/wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067", size = 40283, upload-time = "2025-08-12T05:51:39.912Z" }, - { url = "https://files.pythonhosted.org/packages/d1/86/2fcad95994d9b572db57632acb6f900695a648c3e063f2cd344b3f5c5a37/wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454", size = 40366, upload-time = "2025-08-12T05:52:00.693Z" }, - { url = "https://files.pythonhosted.org/packages/64/0e/f4472f2fdde2d4617975144311f8800ef73677a159be7fe61fa50997d6c0/wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e", size = 108571, upload-time = "2025-08-12T05:52:44.521Z" }, - { url = "https://files.pythonhosted.org/packages/cc/01/9b85a99996b0a97c8a17484684f206cbb6ba73c1ce6890ac668bcf3838fb/wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f", size = 113094, upload-time = "2025-08-12T05:52:22.618Z" }, - { url = "https://files.pythonhosted.org/packages/25/02/78926c1efddcc7b3aa0bc3d6b33a822f7d898059f7cd9ace8c8318e559ef/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056", size = 110659, upload-time = "2025-08-12T05:52:24.057Z" }, - { url = "https://files.pythonhosted.org/packages/dc/ee/c414501ad518ac3e6fe184753632fe5e5ecacdcf0effc23f31c1e4f7bfcf/wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804", size = 106946, upload-time = "2025-08-12T05:52:45.976Z" }, - { url = "https://files.pythonhosted.org/packages/be/44/a1bd64b723d13bb151d6cc91b986146a1952385e0392a78567e12149c7b4/wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977", size = 38717, upload-time = "2025-08-12T05:53:15.214Z" }, - { url = "https://files.pythonhosted.org/packages/79/d9/7cfd5a312760ac4dd8bf0184a6ee9e43c33e47f3dadc303032ce012b8fa3/wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116", size = 41334, upload-time = "2025-08-12T05:53:14.178Z" }, - { url = "https://files.pythonhosted.org/packages/46/78/10ad9781128ed2f99dbc474f43283b13fea8ba58723e98844367531c18e9/wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6", size = 38471, upload-time = "2025-08-12T05:52:57.784Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, -] - [[package]] name = "xformers" version = "0.0.33.post1"