diff --git a/.gitattributes b/.gitattributes index 9bdda16718..8d8133faa2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -15,3 +15,4 @@ *.mov filter=lfs diff=lfs merge=lfs -text binary *.gif filter=lfs diff=lfs merge=lfs -text binary *.foxe filter=lfs diff=lfs merge=lfs -text binary +docs/**/*.png filter=lfs diff=lfs merge=lfs -text diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 01b23c8606..0d520f20be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -76,3 +76,11 @@ repos: pass_filenames: false entry: bin/lfs_check language: script + + - id: doclinks + name: Doclinks + always_run: true + pass_filenames: false + entry: python -m dimos.utils.docs.doclinks docs/ + language: system + files: ^docs/.*\.md$ diff --git a/dimos/agents/__init__.py b/dimos/agents/__init__.py index 8e099a21b4..9e1dd2df77 100644 --- a/dimos/agents/__init__.py +++ b/dimos/agents/__init__.py @@ -11,3 +11,5 @@ from dimos.agents.spec import AgentSpec from dimos.protocol.skill.skill import skill from dimos.protocol.skill.type import Output, Reducer, Stream + +__all__ = ["Agent", "AgentSpec", "Output", "Reducer", "Stream", "deploy", "skill"] diff --git a/dimos/agents/skills/gps_nav_skill.py b/dimos/agents/skills/gps_nav_skill.py index 42333b9b90..c7325a5b64 100644 --- a/dimos/agents/skills/gps_nav_skill.py +++ b/dimos/agents/skills/gps_nav_skill.py @@ -84,7 +84,7 @@ def set_gps_travel_points(self, *points: dict[str, float]) -> str: logger.info(f"Set travel points: {new_points}") if self.gps_goal._transport is not None: - self.gps_goal.publish(new_points) + self.gps_goal.publish(new_points) # type: ignore[arg-type] if self._set_gps_travel_goal_points: self._set_gps_travel_goal_points(new_points) diff --git a/dimos/core/introspection/__init__.py b/dimos/core/introspection/__init__.py new file mode 100644 index 0000000000..c40c3d49e6 --- /dev/null +++ b/dimos/core/introspection/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2025 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module and blueprint introspection utilities.""" + +from dimos.core.introspection.module import INTERNAL_RPCS, render_module_io +from dimos.core.introspection.svg import to_svg + +__all__ = ["INTERNAL_RPCS", "render_module_io", "to_svg"] diff --git a/dimos/core/introspection/blueprint/__init__.py b/dimos/core/introspection/blueprint/__init__.py new file mode 100644 index 0000000000..6545b39dfa --- /dev/null +++ b/dimos/core/introspection/blueprint/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2025 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Blueprint introspection and rendering. + +Renderers: + - dot: Graphviz DOT format (hub-style with type nodes as intermediate hubs) +""" + +from dimos.core.introspection.blueprint import dot +from dimos.core.introspection.blueprint.dot import LayoutAlgo, render_svg + +__all__ = ["LayoutAlgo", "dot", "render_svg"] diff --git a/dimos/core/introspection/blueprint/dot.py b/dimos/core/introspection/blueprint/dot.py new file mode 100644 index 0000000000..4c27c6282d --- /dev/null +++ b/dimos/core/introspection/blueprint/dot.py @@ -0,0 +1,253 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Hub-style Graphviz DOT renderer for blueprint visualization. + +This renderer creates intermediate "type nodes" for data flow, making it clearer +when one output fans out to multiple consumers: + + ModuleA --> [name:Type] --> ModuleB + --> ModuleC +""" + +from collections import defaultdict +from enum import Enum, auto + +from dimos.core.blueprints import ModuleBlueprintSet +from dimos.core.introspection.utils import ( + GROUP_COLORS, + TYPE_COLORS, + color_for_string, + sanitize_id, +) +from dimos.core.module import Module +from dimos.utils.cli import theme + + +class LayoutAlgo(Enum): + """Layout algorithms for controlling graph structure.""" + + STACK_CLUSTERS = auto() # Stack clusters vertically (invisible edges between clusters) + STACK_NODES = auto() # Stack nodes within clusters vertically + FDP = auto() # Use fdp (force-directed) layout engine instead of dot + + +# Connections to ignore (too noisy/common) +DEFAULT_IGNORED_CONNECTIONS = {("odom", "PoseStamped")} + +DEFAULT_IGNORED_MODULES = { + "WebsocketVisModule", + "UtilizationModule", + # "FoxgloveBridge", +} + + +def render( + blueprint_set: ModuleBlueprintSet, + *, + layout: set[LayoutAlgo] | None = None, + ignored_connections: set[tuple[str, str]] | None = None, + ignored_modules: set[str] | None = None, +) -> str: + """Generate a hub-style DOT graph from a ModuleBlueprintSet. + + This creates intermediate "type nodes" that represent data channels, + connecting producers to consumers through a central hub node. + + Args: + blueprint_set: The blueprint set to visualize. + layout: Set of layout algorithms to apply. Default is none (let graphviz decide). + ignored_connections: Set of (name, type_name) tuples to ignore. + ignored_modules: Set of module names to ignore. + + Returns: + A string in DOT format showing modules as nodes, type nodes as + small colored hubs, and edges connecting them. + """ + if layout is None: + layout = set() + if ignored_connections is None: + ignored_connections = DEFAULT_IGNORED_CONNECTIONS + if ignored_modules is None: + ignored_modules = DEFAULT_IGNORED_MODULES + + # Collect all outputs: (name, type) -> list of producer modules + producers: dict[tuple[str, type], list[type[Module]]] = defaultdict(list) + # Collect all inputs: (name, type) -> list of consumer modules + consumers: dict[tuple[str, type], list[type[Module]]] = defaultdict(list) + # Module name -> module class (for getting package info) + module_classes: dict[str, type[Module]] = {} + + for bp in blueprint_set.blueprints: + module_classes[bp.module.__name__] = bp.module + for conn in bp.connections: + # Apply remapping + remapped_name = blueprint_set.remapping_map.get((bp.module, conn.name), conn.name) + key = (remapped_name, conn.type) + if conn.direction == "out": + producers[key].append(bp.module) + else: + consumers[key].append(bp.module) + + # Find all active channels (have both producers AND consumers) + active_channels: dict[tuple[str, type], str] = {} # key -> color + for key in producers: + name, type_ = key + type_name = type_.__name__ + if key not in consumers: + continue + if (name, type_name) in ignored_connections: + continue + # Check if all modules are ignored + valid_producers = [m for m in producers[key] if m.__name__ not in ignored_modules] + valid_consumers = [m for m in consumers[key] if m.__name__ not in ignored_modules] + if not valid_producers or not valid_consumers: + continue + label = f"{name}:{type_name}" + active_channels[key] = color_for_string(TYPE_COLORS, label) + + # Group modules by package + def get_group(mod_class: type[Module]) -> str: + module_path = mod_class.__module__ + parts = module_path.split(".") + if len(parts) >= 2 and parts[0] == "dimos": + return parts[1] + return "other" + + by_group: dict[str, list[str]] = defaultdict(list) + for mod_name, mod_class in module_classes.items(): + if mod_name in ignored_modules: + continue + group = get_group(mod_class) + by_group[group].append(mod_name) + + # Build DOT output + lines = [ + "digraph modules {", + " bgcolor=transparent;", + " rankdir=LR;", + # " nodesep=1;", # horizontal spacing between nodes + # " ranksep=1.5;", # vertical spacing between ranks + " splines=true;", + f' node [shape=box, style=filled, fillcolor="{theme.BACKGROUND}", fontcolor="{theme.FOREGROUND}", color="{theme.BLUE}", fontname=fixed, fontsize=12, margin="0.1,0.1"];', + " edge [fontname=fixed, fontsize=10];", + "", + ] + + # Add subgraphs for each module group + sorted_groups = sorted(by_group.keys()) + for group in sorted_groups: + mods = sorted(by_group[group]) + color = color_for_string(GROUP_COLORS, group) + lines.append(f" subgraph cluster_{group} {{") + lines.append(f' label="{group}";') + lines.append(" labeljust=r;") + lines.append(" fontname=fixed;") + lines.append(" fontsize=14;") + lines.append(f' fontcolor="{theme.FOREGROUND}";') + lines.append(' style="filled,dashed";') + lines.append(f' color="{color}";') + lines.append(" penwidth=1;") + lines.append(f' fillcolor="{color}10";') + for mod in mods: + lines.append(f" {mod};") + # Stack nodes vertically within cluster + if LayoutAlgo.STACK_NODES in layout and len(mods) > 1: + for i in range(len(mods) - 1): + lines.append(f" {mods[i]} -> {mods[i + 1]} [style=invis];") + lines.append(" }") + lines.append("") + + # Add invisible edges between clusters to force vertical stacking + if LayoutAlgo.STACK_CLUSTERS in layout and len(sorted_groups) > 1: + lines.append(" // Force vertical cluster layout") + for i in range(len(sorted_groups) - 1): + group_a = sorted_groups[i] + group_b = sorted_groups[i + 1] + # Pick first node from each cluster + node_a = sorted(by_group[group_a])[0] + node_b = sorted(by_group[group_b])[0] + lines.append(f" {node_a} -> {node_b} [style=invis, weight=10];") + lines.append("") + + # Add type nodes (outside all clusters) + lines.append(" // Type nodes (data channels)") + for key, color in sorted( + active_channels.items(), key=lambda x: f"{x[0][0]}:{x[0][1].__name__}" + ): + name, type_ = key + type_name = type_.__name__ + node_id = sanitize_id(f"chan_{name}_{type_name}") + label = f"{name}:{type_name}" + lines.append( + f' {node_id} [label="{label}", shape=note, style=filled, ' + f'fillcolor="{color}35", color="{color}", fontcolor="{theme.FOREGROUND}", ' + f'width=0, height=0, margin="0.1,0.05", fontsize=10];' + ) + + lines.append("") + + # Add edges: producer -> type_node -> consumer + lines.append(" // Edges") + for key, color in sorted( + active_channels.items(), key=lambda x: f"{x[0][0]}:{x[0][1].__name__}" + ): + name, type_ = key + type_name = type_.__name__ + node_id = sanitize_id(f"chan_{name}_{type_name}") + + # Edges from producers to type node (no arrow, kept close) + for producer in producers[key]: + if producer.__name__ in ignored_modules: + continue + lines.append(f' {producer.__name__} -> {node_id} [color="{color}", arrowhead=none];') + + # Edges from type node to consumers (with arrow) + for consumer in consumers[key]: + if consumer.__name__ in ignored_modules: + continue + lines.append(f' {node_id} -> {consumer.__name__} [color="{color}"];') + + lines.append("}") + return "\n".join(lines) + + +def render_svg( + blueprint_set: ModuleBlueprintSet, + output_path: str, + *, + layout: set[LayoutAlgo] | None = None, +) -> None: + """Generate an SVG file from a ModuleBlueprintSet using graphviz. + + Args: + blueprint_set: The blueprint set to visualize. + output_path: Path to write the SVG file. + layout: Set of layout algorithms to apply. + """ + import subprocess + + if layout is None: + layout = set() + + dot_code = render(blueprint_set, layout=layout) + engine = "fdp" if LayoutAlgo.FDP in layout else "dot" + result = subprocess.run( + [engine, "-Tsvg", "-o", output_path], + input=dot_code, + text=True, + capture_output=True, + ) + if result.returncode != 0: + raise RuntimeError(f"graphviz failed: {result.stderr}") diff --git a/dimos/core/introspection/module/__init__.py b/dimos/core/introspection/module/__init__.py new file mode 100644 index 0000000000..444d0e24f3 --- /dev/null +++ b/dimos/core/introspection/module/__init__.py @@ -0,0 +1,45 @@ +# Copyright 2025 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module introspection and rendering. + +Renderers: + - ansi: ANSI terminal output (default) + - dot: Graphviz DOT format +""" + +from dimos.core.introspection.module import ansi, dot +from dimos.core.introspection.module.info import ( + INTERNAL_RPCS, + ModuleInfo, + ParamInfo, + RpcInfo, + SkillInfo, + StreamInfo, + extract_module_info, +) +from dimos.core.introspection.module.render import render_module_io + +__all__ = [ + "INTERNAL_RPCS", + "ModuleInfo", + "ParamInfo", + "RpcInfo", + "SkillInfo", + "StreamInfo", + "ansi", + "dot", + "extract_module_info", + "render_module_io", +] diff --git a/dimos/core/introspection/module/ansi.py b/dimos/core/introspection/module/ansi.py new file mode 100644 index 0000000000..6e835d63d3 --- /dev/null +++ b/dimos/core/introspection/module/ansi.py @@ -0,0 +1,96 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ANSI terminal renderer for module IO diagrams.""" + +from dimos.core import colors +from dimos.core.introspection.module.info import ( + ModuleInfo, + ParamInfo, + RpcInfo, + SkillInfo, + StreamInfo, +) + + +def render(info: ModuleInfo, color: bool = True) -> str: + """Render module info as an ANSI terminal diagram. + + Args: + info: ModuleInfo structure to render. + color: Whether to include ANSI color codes. + + Returns: + ASCII/Unicode diagram with optional ANSI colors. + """ + # Color functions that become identity when color=False + _green = colors.green if color else (lambda x: x) + _blue = colors.blue if color else (lambda x: x) + _yellow = colors.yellow if color else (lambda x: x) + _cyan = colors.cyan if color else (lambda x: x) + + def _box(name: str) -> list[str]: + return [ + "┌┴" + "─" * (len(name) + 1) + "┐", + f"│ {name} │", + "└┬" + "─" * (len(name) + 1) + "┘", + ] + + def format_stream(stream: StreamInfo) -> str: + return f"{_yellow(stream.name)}: {_green(stream.type_name)}" + + def format_param(param: ParamInfo) -> str: + result = param.name + if param.type_name: + result += ": " + _green(param.type_name) + if param.default: + result += f" = {param.default}" + return result + + def format_rpc(rpc: RpcInfo) -> str: + params = ", ".join(format_param(p) for p in rpc.params) + result = _blue(rpc.name) + f"({params})" + if rpc.return_type: + result += " -> " + _green(rpc.return_type) + return result + + def format_skill(skill: SkillInfo) -> str: + info_parts = [] + if skill.stream: + info_parts.append(f"stream={skill.stream}") + if skill.reducer: + info_parts.append(f"reducer={skill.reducer}") + if skill.output: + info_parts.append(f"output={skill.output}") + info = f" ({', '.join(info_parts)})" if info_parts else "" + return _cyan(skill.name) + info + + # Build output + lines = [ + *(f" ├─ {format_stream(s)}" for s in info.inputs), + *_box(info.name), + *(f" ├─ {format_stream(s)}" for s in info.outputs), + ] + + if info.rpcs: + lines.append(" │") + for rpc in info.rpcs: + lines.append(f" ├─ RPC {format_rpc(rpc)}") + + if info.skills: + lines.append(" │") + for skill in info.skills: + lines.append(f" ├─ Skill {format_skill(skill)}") + + return "\n".join(lines) diff --git a/dimos/core/introspection/module/dot.py b/dimos/core/introspection/module/dot.py new file mode 100644 index 0000000000..829957a8e3 --- /dev/null +++ b/dimos/core/introspection/module/dot.py @@ -0,0 +1,203 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Graphviz DOT renderer for module IO diagrams.""" + +from dimos.core.introspection.module.info import ModuleInfo +from dimos.core.introspection.utils import ( + RPC_COLOR, + SKILL_COLOR, + TYPE_COLORS, + color_for_string, + sanitize_id, +) +from dimos.utils.cli import theme + + +def render(info: ModuleInfo) -> str: + """Render module info as a DOT graph. + + Shows the module as a central node with input streams as nodes + pointing in and output streams as nodes pointing out. + + Args: + info: ModuleInfo structure to render. + + Returns: + DOT format string. + """ + lines = [ + "digraph module {", + " bgcolor=transparent;", + " rankdir=LR;", + " compound=true;", + " splines=true;", + f' node [shape=box, style=filled, fillcolor="{theme.BACKGROUND}", fontcolor="{theme.FOREGROUND}", color="{theme.BLUE}", fontname=fixed, fontsize=12, margin="0.1,0.1"];', + " edge [fontname=fixed, fontsize=10, penwidth=1];", + "", + ] + + # Module node (central, larger) + module_id = sanitize_id(info.name) + lines.append(f' {module_id} [label="{info.name}", width=2, height=0.8];') + lines.append("") + + # Input stream nodes (on the left) + if info.inputs: + lines.append(" // Input streams") + lines.append(" subgraph cluster_inputs {") + lines.append(' label="";') + lines.append(" style=invis;") + lines.append(' rank="same";') + for stream in info.inputs: + label = f"{stream.name}:{stream.type_name}" + color = color_for_string(TYPE_COLORS, label) + node_id = sanitize_id(f"in_{stream.name}") + lines.append( + f' {node_id} [label="{label}", shape=note, style=filled, ' + f'fillcolor="{color}35", color="{color}", ' + f'width=0, height=0, margin="0.1,0.05", fontsize=10];' + ) + lines.append(" }") + lines.append("") + + # Output stream nodes (on the right) + if info.outputs: + lines.append(" // Output streams") + lines.append(" subgraph cluster_outputs {") + lines.append(' label="";') + lines.append(" style=invis;") + lines.append(' rank="same";') + for stream in info.outputs: + label = f"{stream.name}:{stream.type_name}" + color = color_for_string(TYPE_COLORS, label) + node_id = sanitize_id(f"out_{stream.name}") + lines.append( + f' {node_id} [label="{label}", shape=note, style=filled, ' + f'fillcolor="{color}35", color="{color}", ' + f'width=0, height=0, margin="0.1,0.05", fontsize=10];' + ) + lines.append(" }") + lines.append("") + + # RPC nodes (in subgraph) + if info.rpcs: + lines.append(" // RPCs") + lines.append(" subgraph cluster_rpcs {") + lines.append(' label="RPCs";') + lines.append(" labeljust=l;") + lines.append(" fontname=fixed;") + lines.append(" fontsize=14;") + lines.append(f' fontcolor="{theme.FOREGROUND}";') + lines.append(' style="filled,dashed";') + lines.append(f' color="{RPC_COLOR}";') + lines.append(" penwidth=1;") + lines.append(f' fillcolor="{RPC_COLOR}10";') + for rpc in info.rpcs: + params = ", ".join( + f"{p.name}: {p.type_name}" if p.type_name else p.name for p in rpc.params + ) + ret = f" -> {rpc.return_type}" if rpc.return_type else "" + label = f"{rpc.name}({params}){ret}" + node_id = sanitize_id(f"rpc_{rpc.name}") + lines.append( + f' {node_id} [label="{label}", shape=cds, style=filled, ' + f'fillcolor="{RPC_COLOR}35", color="{RPC_COLOR}", ' + f'width=0, height=0, margin="0.1,0.05", fontsize=9];' + ) + lines.append(" }") + lines.append("") + + # Skill nodes (in subgraph) + if info.skills: + lines.append(" // Skills") + lines.append(" subgraph cluster_skills {") + lines.append(' label="Skills";') + lines.append(" labeljust=l;") + lines.append(" fontname=fixed;") + lines.append(" fontsize=14;") + lines.append(f' fontcolor="{theme.FOREGROUND}";') + lines.append(' style="filled,dashed";') + lines.append(f' color="{SKILL_COLOR}";') + lines.append(" penwidth=1;") + lines.append(f' fillcolor="{SKILL_COLOR}20";') + for skill in info.skills: + parts = [skill.name] + if skill.stream: + parts.append(f"stream={skill.stream}") + if skill.reducer: + parts.append(f"reducer={skill.reducer}") + label = " ".join(parts) + node_id = sanitize_id(f"skill_{skill.name}") + lines.append( + f' {node_id} [label="{label}", shape=cds, style=filled, ' + f'fillcolor="{SKILL_COLOR}35", color="{SKILL_COLOR}", ' + f'width=0, height=0, margin="0.1,0.05", fontsize=9];' + ) + lines.append(" }") + lines.append("") + + # Edges: inputs -> module + lines.append(" // Edges") + for stream in info.inputs: + label = f"{stream.name}:{stream.type_name}" + color = color_for_string(TYPE_COLORS, label) + node_id = sanitize_id(f"in_{stream.name}") + lines.append(f' {node_id} -> {module_id} [color="{color}"];') + + # Edges: module -> outputs + for stream in info.outputs: + label = f"{stream.name}:{stream.type_name}" + color = color_for_string(TYPE_COLORS, label) + node_id = sanitize_id(f"out_{stream.name}") + lines.append(f' {module_id} -> {node_id} [color="{color}"];') + + # Edge: module -> RPCs cluster (dashed, no arrow) + if info.rpcs: + first_rpc_id = sanitize_id(f"rpc_{info.rpcs[0].name}") + lines.append( + f" {module_id} -> {first_rpc_id} [lhead=cluster_rpcs, style=filled, weight=3" + f'color="{RPC_COLOR}", arrowhead=none];' + ) + + # Edge: module -> Skills cluster (dashed, no arrow) + if info.skills: + first_skill_id = sanitize_id(f"skill_{info.skills[0].name}") + lines.append( + f" {module_id} -> {first_skill_id} [lhead=cluster_skills, style=filled, weight=3" + f'color="{SKILL_COLOR}", arrowhead=none];' + ) + + lines.append("}") + return "\n".join(lines) + + +def render_svg(info: ModuleInfo, output_path: str) -> None: + """Generate an SVG file from ModuleInfo using graphviz. + + Args: + info: ModuleInfo structure to render. + output_path: Path to write the SVG file. + """ + import subprocess + + dot_code = render(info) + result = subprocess.run( + ["dot", "-Tsvg", "-o", output_path], + input=dot_code, + text=True, + capture_output=True, + ) + if result.returncode != 0: + raise RuntimeError(f"graphviz failed: {result.stderr}") diff --git a/dimos/core/introspection/module/info.py b/dimos/core/introspection/module/info.py new file mode 100644 index 0000000000..8fcad76006 --- /dev/null +++ b/dimos/core/introspection/module/info.py @@ -0,0 +1,168 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module introspection data structures.""" + +from collections.abc import Callable +from dataclasses import dataclass, field +import inspect +from typing import Any + +# Internal RPCs to hide from io() output +INTERNAL_RPCS = { + "dynamic_skills", + "get_rpc_method_names", + "set_rpc_method", + "skills", + "_io_instance", +} + + +@dataclass +class StreamInfo: + """Information about a module stream (input or output).""" + + name: str + type_name: str + + +@dataclass +class ParamInfo: + """Information about an RPC parameter.""" + + name: str + type_name: str | None = None + default: str | None = None + + +@dataclass +class RpcInfo: + """Information about an RPC method.""" + + name: str + params: list[ParamInfo] = field(default_factory=list) + return_type: str | None = None + + +@dataclass +class SkillInfo: + """Information about a skill.""" + + name: str + stream: str | None = None # None means "none" + reducer: str | None = None # None means "latest" + output: str | None = None # None means "standard" + + +@dataclass +class ModuleInfo: + """Extracted information about a module's IO interface.""" + + name: str + inputs: list[StreamInfo] = field(default_factory=list) + outputs: list[StreamInfo] = field(default_factory=list) + rpcs: list[RpcInfo] = field(default_factory=list) + skills: list[SkillInfo] = field(default_factory=list) + + +def extract_rpc_info(fn: Callable) -> RpcInfo: # type: ignore[type-arg] + """Extract RPC information from a callable.""" + sig = inspect.signature(fn) + params = [] + + for pname, p in sig.parameters.items(): + if pname == "self": + continue + type_name = None + if p.annotation != inspect.Parameter.empty: + type_name = getattr(p.annotation, "__name__", str(p.annotation)) + default = None + if p.default != inspect.Parameter.empty: + default = str(p.default) + params.append(ParamInfo(name=pname, type_name=type_name, default=default)) + + return_type = None + if sig.return_annotation != inspect.Signature.empty: + return_type = getattr(sig.return_annotation, "__name__", str(sig.return_annotation)) + + return RpcInfo(name=fn.__name__, params=params, return_type=return_type) + + +def extract_skill_info(fn: Callable) -> SkillInfo: # type: ignore[type-arg] + """Extract skill information from a skill-decorated callable.""" + cfg = fn._skill_config # type: ignore[attr-defined] + + stream = cfg.stream.name if cfg.stream.name != "none" else None + reducer_name = getattr(cfg.reducer, "__name__", str(cfg.reducer)) + reducer = reducer_name if reducer_name != "latest" else None + output = cfg.output.name if cfg.output.name != "standard" else None + + return SkillInfo(name=fn.__name__, stream=stream, reducer=reducer, output=output) + + +def extract_module_info( + name: str, + inputs: dict[str, Any], + outputs: dict[str, Any], + rpcs: dict[str, Callable], # type: ignore[type-arg] +) -> ModuleInfo: + """Extract module information into a ModuleInfo structure. + + Args: + name: Module class name. + inputs: Dict of input stream name -> stream object or formatted string. + outputs: Dict of output stream name -> stream object or formatted string. + rpcs: Dict of RPC method name -> callable. + + Returns: + ModuleInfo with extracted data. + """ + + # Extract stream info + def stream_info(stream: Any, stream_name: str) -> StreamInfo: + if isinstance(stream, str): + # Pre-formatted string like "name: Type" - parse it + # Strip ANSI codes for parsing + import re + + clean = re.sub(r"\x1b\[[0-9;]*m", "", stream) + if ": " in clean: + parts = clean.split(": ", 1) + return StreamInfo(name=parts[0], type_name=parts[1]) + return StreamInfo(name=stream_name, type_name=clean) + # Instance stream object + return StreamInfo(name=stream.name, type_name=stream.type.__name__) + + input_infos = [stream_info(s, n) for n, s in inputs.items()] + output_infos = [stream_info(s, n) for n, s in outputs.items()] + + # Separate skills from regular RPCs, filtering internal ones + rpc_infos = [] + skill_infos = [] + + for rpc_name, rpc_fn in rpcs.items(): + if rpc_name in INTERNAL_RPCS: + continue + if hasattr(rpc_fn, "_skill_config"): + skill_infos.append(extract_skill_info(rpc_fn)) + else: + rpc_infos.append(extract_rpc_info(rpc_fn)) + + return ModuleInfo( + name=name, + inputs=input_infos, + outputs=output_infos, + rpcs=rpc_infos, + skills=skill_infos, + ) diff --git a/dimos/core/introspection/module/render.py b/dimos/core/introspection/module/render.py new file mode 100644 index 0000000000..8e87a5b202 --- /dev/null +++ b/dimos/core/introspection/module/render.py @@ -0,0 +1,44 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Convenience rendering functions for module introspection.""" + +from collections.abc import Callable +from typing import Any + +from dimos.core.introspection.module import ansi +from dimos.core.introspection.module.info import extract_module_info + + +def render_module_io( + name: str, + inputs: dict[str, Any], + outputs: dict[str, Any], + rpcs: dict[str, Callable], # type: ignore[type-arg] + color: bool = True, +) -> str: + """Render module IO diagram using the default (ANSI) renderer. + + Args: + name: Module class name. + inputs: Dict of input stream name -> stream object or formatted string. + outputs: Dict of output stream name -> stream object or formatted string. + rpcs: Dict of RPC method name -> callable. + color: Whether to include ANSI color codes. + + Returns: + ASCII diagram showing module inputs, outputs, RPCs, and skills. + """ + info = extract_module_info(name, inputs, outputs, rpcs) + return ansi.render(info, color=color) diff --git a/dimos/core/introspection/svg.py b/dimos/core/introspection/svg.py new file mode 100644 index 0000000000..cdf87cc093 --- /dev/null +++ b/dimos/core/introspection/svg.py @@ -0,0 +1,57 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified SVG rendering for modules and blueprints.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from dimos.core.blueprints import ModuleBlueprintSet + from dimos.core.introspection.blueprint.dot import LayoutAlgo + from dimos.core.introspection.module.info import ModuleInfo + + +def to_svg( + target: ModuleInfo | ModuleBlueprintSet, + output_path: str, + *, + layout: set[LayoutAlgo] | None = None, +) -> None: + """Render a module or blueprint to SVG. + + Dispatches to the appropriate renderer based on input type: + - ModuleInfo -> module/dot.render_svg + - ModuleBlueprintSet -> blueprint/dot.render_svg + + Args: + target: Either a ModuleInfo (single module) or ModuleBlueprintSet (blueprint graph). + output_path: Path to write the SVG file. + layout: Layout algorithms (only used for blueprints). + """ + # Avoid circular imports by importing here + from dimos.core.blueprints import ModuleBlueprintSet + from dimos.core.introspection.module.info import ModuleInfo + + if isinstance(target, ModuleInfo): + from dimos.core.introspection.module import dot as module_dot + + module_dot.render_svg(target, output_path) + elif isinstance(target, ModuleBlueprintSet): + from dimos.core.introspection.blueprint import dot as blueprint_dot + + blueprint_dot.render_svg(target, output_path, layout=layout) + else: + raise TypeError(f"Expected ModuleInfo or ModuleBlueprintSet, got {type(target).__name__}") diff --git a/dimos/core/introspection/utils.py b/dimos/core/introspection/utils.py new file mode 100644 index 0000000000..166933b80c --- /dev/null +++ b/dimos/core/introspection/utils.py @@ -0,0 +1,86 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared utilities for introspection renderers.""" + +import hashlib +import re + +# Colors for type nodes and edges (bright, distinct, good on dark backgrounds) +TYPE_COLORS = [ + "#FF6B6B", # coral red + "#4ECDC4", # teal + "#FFE66D", # yellow + "#95E1D3", # mint + "#F38181", # salmon + "#AA96DA", # lavender + "#81C784", # green + "#64B5F6", # light blue + "#FFB74D", # orange + "#BA68C8", # purple + "#4DD0E1", # cyan + "#AED581", # lime + "#FF8A65", # deep orange + "#7986CB", # indigo + "#F06292", # pink + "#A1887F", # brown + "#90A4AE", # blue grey + "#DCE775", # lime yellow + "#4DB6AC", # teal green + "#9575CD", # deep purple + "#E57373", # light red + "#81D4FA", # sky blue + "#C5E1A5", # light green + "#FFCC80", # light orange + "#B39DDB", # light purple + "#80DEEA", # light cyan + "#FFAB91", # peach + "#CE93D8", # light violet + "#80CBC4", # light teal + "#FFF59D", # light yellow +] + +# Colors for group borders (bright, distinct, good on dark backgrounds) +GROUP_COLORS = [ + "#5C9FF0", # blue + "#FFB74D", # orange + "#81C784", # green + "#BA68C8", # purple + "#4ECDC4", # teal + "#FF6B6B", # coral + "#FFE66D", # yellow + "#7986CB", # indigo + "#F06292", # pink + "#4DB6AC", # teal green + "#9575CD", # deep purple + "#AED581", # lime + "#64B5F6", # light blue + "#FF8A65", # deep orange + "#AA96DA", # lavender +] + +# Colors for RPCs/Skills +RPC_COLOR = "#7986CB" # indigo +SKILL_COLOR = "#4ECDC4" # teal + + +def color_for_string(colors: list[str], s: str) -> str: + """Get a consistent color for a string based on its hash.""" + h = int(hashlib.md5(s.encode()).hexdigest(), 16) + return colors[h % len(colors)] + + +def sanitize_id(s: str) -> str: + """Sanitize a string to be a valid graphviz node ID.""" + return re.sub(r"[^a-zA-Z0-9_]", "_", s) diff --git a/dimos/core/module.py b/dimos/core/module.py index 8986bd6352..62afc94f40 100644 --- a/dimos/core/module.py +++ b/dimos/core/module.py @@ -19,6 +19,7 @@ import sys import threading from typing import ( + TYPE_CHECKING, Any, get_args, get_origin, @@ -26,12 +27,16 @@ overload, ) +if TYPE_CHECKING: + from dimos.core.introspection.module import ModuleInfo + from dask.distributed import Actor, get_worker from reactivex.disposable import CompositeDisposable from typing_extensions import TypeVar from dimos.core import colors from dimos.core.core import T, rpc +from dimos.core.introspection.module import INTERNAL_RPCS, extract_module_info, render_module_io from dimos.core.resource import Resource from dimos.core.rpc_client import RpcCall from dimos.core.stream import In, Out, RemoteIn, RemoteOut, Transport @@ -198,64 +203,127 @@ def inputs(self) -> dict[str, In]: # type: ignore[type-arg] if isinstance(s, In) and not name.startswith("_") } - @classmethod # type: ignore[misc] - @property - def rpcs(cls) -> dict[str, Callable]: # type: ignore[type-arg] + @classproperty + def rpcs(self) -> dict[str, Callable[..., Any]]: return { - name: getattr(cls, name) - for name in dir(cls) + name: getattr(self, name) + for name in dir(self) if not name.startswith("_") and name != "rpcs" # Exclude the rpcs property itself to prevent recursion - and callable(getattr(cls, name, None)) - and hasattr(getattr(cls, name), "__rpc__") + and callable(getattr(self, name, None)) + and hasattr(getattr(self, name), "__rpc__") } @rpc - def io(self) -> str: - def _box(name: str) -> str: - return [ # type: ignore[return-value] - "┌┴" + "─" * (len(name) + 1) + "┐", - f"│ {name} │", - "└┬" + "─" * (len(name) + 1) + "┘", - ] - - # can't modify __str__ on a function like we are doing for I/O - # so we have a separate repr function here - def repr_rpc(fn: Callable) -> str: # type: ignore[type-arg] - sig = inspect.signature(fn) - # Remove 'self' parameter - params = [p for name, p in sig.parameters.items() if name != "self"] - - # Format parameters with colored types - param_strs = [] - for param in params: - param_str = param.name - if param.annotation != inspect.Parameter.empty: - type_name = getattr(param.annotation, "__name__", str(param.annotation)) - param_str += ": " + colors.green(type_name) - if param.default != inspect.Parameter.empty: - param_str += f" = {param.default}" - param_strs.append(param_str) - - # Format return type - return_annotation = "" - if sig.return_annotation != inspect.Signature.empty: - return_type = getattr(sig.return_annotation, "__name__", str(sig.return_annotation)) - return_annotation = " -> " + colors.green(return_type) - - return ( - "RPC " + colors.blue(fn.__name__) + f"({', '.join(param_strs)})" + return_annotation - ) + def _io_instance(self, color: bool = True) -> str: + """Instance-level io() - shows actual running streams.""" + return render_module_io( + name=self.__class__.__name__, + inputs=self.inputs, + outputs=self.outputs, + rpcs=self.rpcs, + color=color, + ) + + @classmethod + def _io_class(cls, color: bool = True) -> str: + """Class-level io() - shows declared stream types from annotations.""" + hints = get_type_hints(cls) + + _yellow = colors.yellow if color else (lambda x: x) + _green = colors.green if color else (lambda x: x) + + def is_stream(hint: type, stream_type: type) -> bool: + origin = get_origin(hint) + if origin is stream_type: + return True + if isinstance(hint, type) and issubclass(hint, stream_type): + return True + return False + + def format_stream(name: str, hint: type) -> str: + args = get_args(hint) + type_name = args[0].__name__ if args else "?" + return f"{_yellow(name)}: {_green(type_name)}" + + inputs = { + name: format_stream(name, hint) for name, hint in hints.items() if is_stream(hint, In) + } + outputs = { + name: format_stream(name, hint) for name, hint in hints.items() if is_stream(hint, Out) + } + + return render_module_io( + name=cls.__name__, + inputs=inputs, + outputs=outputs, + rpcs=cls.rpcs, + color=color, + ) + + class _io_descriptor: + """Descriptor that makes io() work on both class and instance.""" + + def __get__( + self, obj: "ModuleBase | None", objtype: type["ModuleBase"] + ) -> Callable[[bool], str]: + if obj is None: + return objtype._io_class + return obj._io_instance - ret = [ - *(f" ├─ {stream}" for stream in self.inputs.values()), - *_box(self.__class__.__name__), - *(f" ├─ {stream}" for stream in self.outputs.values()), - " │", - *(f" ├─ {repr_rpc(rpc)}" for rpc in self.rpcs.values()), - ] + io = _io_descriptor() + + @classmethod + def _module_info_class(cls) -> "ModuleInfo": + """Class-level module_info() - returns ModuleInfo from annotations.""" + from dimos.core.introspection.module import ModuleInfo + + hints = get_type_hints(cls) + + def is_stream(hint: type, stream_type: type) -> bool: + origin = get_origin(hint) + if origin is stream_type: + return True + if isinstance(hint, type) and issubclass(hint, stream_type): + return True + return False + + def format_stream(name: str, hint: type) -> str: + args = get_args(hint) + type_name = args[0].__name__ if args else "?" + return f"{name}: {type_name}" + + inputs = { + name: format_stream(name, hint) for name, hint in hints.items() if is_stream(hint, In) + } + outputs = { + name: format_stream(name, hint) for name, hint in hints.items() if is_stream(hint, Out) + } + + return extract_module_info( + name=cls.__name__, + inputs=inputs, + outputs=outputs, + rpcs=cls.rpcs, + ) + + class _module_info_descriptor: + """Descriptor that makes module_info() work on both class and instance.""" + + def __get__( + self, obj: "ModuleBase | None", objtype: type["ModuleBase"] + ) -> Callable[[], "ModuleInfo"]: + if obj is None: + return objtype._module_info_class + # For instances, extract from actual streams + return lambda: extract_module_info( + name=obj.__class__.__name__, + inputs=obj.inputs, + outputs=obj.outputs, + rpcs=obj.rpcs, + ) - return "\n".join(ret) + module_info = _module_info_descriptor() @classproperty def blueprint(self): # type: ignore[no-untyped-def] diff --git a/dimos/core/stream.py b/dimos/core/stream.py index 29db16c655..9530ab7c32 100644 --- a/dimos/core/stream.py +++ b/dimos/core/stream.py @@ -82,14 +82,16 @@ class State(enum.Enum): class Transport(Resource, ObservableMixin[T]): # used by local Output - def broadcast(self, selfstream: Out[T], value: T) -> None: ... + def broadcast(self, selfstream: Out[T], value: T) -> None: + raise NotImplementedError + + # used by local Input + def subscribe(self, callback: Callable[[T], Any], selfstream: Stream[T]) -> Callable[[], None]: + raise NotImplementedError def publish(self, msg: T) -> None: self.broadcast(None, msg) # type: ignore[arg-type] - # used by local Input - def subscribe(self, selfstream: In[T], callback: Callable[[T], any]) -> None: ... # type: ignore[valid-type] - class Stream(Generic[T]): _transport: Transport | None # type: ignore[type-arg] @@ -139,9 +141,11 @@ def __str__(self) -> str: class Out(Stream[T], ObservableMixin[T]): _transport: Transport # type: ignore[type-arg] + _subscribers: list[Callable[[T], Any]] def __init__(self, *argv, **kwargs) -> None: # type: ignore[no-untyped-def] super().__init__(*argv, **kwargs) + self._subscribers = [] @property def transport(self) -> Transport[T]: @@ -168,22 +172,19 @@ def __reduce__(self): # type: ignore[no-untyped-def] ), ) - def publish(self, msg) -> None: # type: ignore[no-untyped-def] - if not hasattr(self, "_transport") or self._transport is None: - logger.warning(f"Trying to publish on Out {self} without a transport") - return - self._transport.broadcast(self, msg) + def publish(self, msg: T) -> None: + if hasattr(self, "_transport") and self._transport is not None: + self._transport.broadcast(self, msg) + for cb in self._subscribers: + cb(msg) - def subscribe(self, cb) -> Callable[[], None]: # type: ignore[no-untyped-def] - """Subscribe to this output stream. + def subscribe(self, cb: Callable[[T], Any]) -> Callable[[], None]: + self._subscribers.append(cb) - Args: - cb: Callback function to receive messages + def unsubscribe() -> None: + self._subscribers.remove(cb) - Returns: - Unsubscribe function - """ - return self.transport.subscribe(cb, self) # type: ignore[arg-type, func-returns-value, no-any-return] + return unsubscribe class RemoteStream(Stream[T]): @@ -206,8 +207,8 @@ class RemoteOut(RemoteStream[T]): def connect(self, other: RemoteIn[T]): # type: ignore[no-untyped-def] return other.connect(self) - def subscribe(self, cb) -> Callable[[], None]: # type: ignore[no-untyped-def] - return self.transport.subscribe(cb, self) # type: ignore[arg-type, func-returns-value, no-any-return] + def subscribe(self, cb: Callable[[T], Any]) -> Callable[[], None]: + return self.transport.subscribe(cb, self) # representation of Input @@ -241,16 +242,15 @@ def transport(self, value: Transport[T]) -> None: ... def connect(self, value: Out[T]) -> None: - # just for type checking - ... + value.subscribe(self.transport.publish) # type: ignore[arg-type] @property def state(self) -> State: return State.UNBOUND if self.owner is None else State.READY # returns unsubscribe function - def subscribe(self, cb) -> Callable[[], None]: # type: ignore[no-untyped-def] - return self.transport.subscribe(cb, self) # type: ignore[arg-type, func-returns-value, no-any-return] + def subscribe(self, cb: Callable[[T], Any]) -> Callable[[], None]: + return self.transport.subscribe(cb, self) # representation of input outside of module diff --git a/dimos/core/test_core.py b/dimos/core/test_core.py index 60dc98d13a..597b580c5c 100644 --- a/dimos/core/test_core.py +++ b/dimos/core/test_core.py @@ -87,7 +87,7 @@ def test_classmethods() -> None: # Check that we have the expected RPC methods assert "navigate_to" in class_rpcs, "navigate_to should be in rpcs" assert "start" in class_rpcs, "start should be in rpcs" - assert len(class_rpcs) == 9 + assert len(class_rpcs) == 8 # Check that the values are callable assert callable(class_rpcs["navigate_to"]), "navigate_to should be callable" diff --git a/dimos/core/transport.py b/dimos/core/transport.py index ec6be5ed41..8ffbfc91f4 100644 --- a/dimos/core/transport.py +++ b/dimos/core/transport.py @@ -25,7 +25,7 @@ TypeVar, ) -from dimos.core.stream import In, Transport +from dimos.core.stream import In, Out, Stream, Transport from dimos.protocol.pubsub.jpeg_shm import JpegSharedMemory from dimos.protocol.pubsub.lcmpubsub import LCM, JpegLCM, PickleLCM, Topic as LCMTopic from dimos.protocol.pubsub.shmpubsub import PickleSharedMemory, SharedMemory @@ -60,18 +60,20 @@ def __init__(self, topic: str, **kwargs) -> None: # type: ignore[no-untyped-def def __reduce__(self): # type: ignore[no-untyped-def] return (pLCMTransport, (self.topic,)) - def broadcast(self, _, msg) -> None: # type: ignore[no-untyped-def] + def broadcast(self, _: Out[T] | None, msg: T) -> None: if not self._started: self.lcm.start() self._started = True self.lcm.publish(self.topic, msg) - def subscribe(self, callback: Callable[[T], None], selfstream: In[T] = None) -> None: # type: ignore[assignment, override] + def subscribe( + self, callback: Callable[[T], Any], selfstream: Stream[T] | None = None + ) -> Callable[[], None]: if not self._started: self.lcm.start() self._started = True - return self.lcm.subscribe(self.topic, lambda msg, topic: callback(msg)) # type: ignore[return-value] + return self.lcm.subscribe(self.topic, lambda msg, topic: callback(msg)) def start(self) -> None: ... diff --git a/dimos/hardware/sensors/gstreamer_camera.py b/dimos/hardware/sensors/camera/gstreamer/gstreamer_camera.py similarity index 100% rename from dimos/hardware/sensors/gstreamer_camera.py rename to dimos/hardware/sensors/camera/gstreamer/gstreamer_camera.py diff --git a/dimos/hardware/sensors/gstreamer_camera_test_script.py b/dimos/hardware/sensors/camera/gstreamer/gstreamer_camera_test_script.py similarity index 97% rename from dimos/hardware/sensors/gstreamer_camera_test_script.py rename to dimos/hardware/sensors/camera/gstreamer/gstreamer_camera_test_script.py index 5b78377519..cc0e3424a5 100755 --- a/dimos/hardware/sensors/gstreamer_camera_test_script.py +++ b/dimos/hardware/sensors/camera/gstreamer/gstreamer_camera_test_script.py @@ -19,7 +19,7 @@ import time from dimos import core -from dimos.hardware.sensors.gstreamer_camera import GstreamerCameraModule +from dimos.hardware.sensors.camera.gstreamer.gstreamer_camera import GstreamerCameraModule from dimos.msgs.sensor_msgs import Image from dimos.protocol import pubsub diff --git a/dimos/hardware/sensors/gstreamer_sender.py b/dimos/hardware/sensors/camera/gstreamer/gstreamer_sender.py similarity index 100% rename from dimos/hardware/sensors/gstreamer_sender.py rename to dimos/hardware/sensors/camera/gstreamer/gstreamer_sender.py diff --git a/dimos/hardware/sensors/camera/gstreamer/readme.md b/dimos/hardware/sensors/camera/gstreamer/readme.md new file mode 100644 index 0000000000..29198aea24 --- /dev/null +++ b/dimos/hardware/sensors/camera/gstreamer/readme.md @@ -0,0 +1 @@ +This gstreamer stuff is obsoleted but could be adopted as an alternative hardware for camera module if needed diff --git a/dimos/hardware/sensors/camera/module.py b/dimos/hardware/sensors/camera/module.py index 4232fe1f49..de2c3b8c78 100644 --- a/dimos/hardware/sensors/camera/module.py +++ b/dimos/hardware/sensors/camera/module.py @@ -12,29 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections.abc import Callable +from collections.abc import Callable, Generator from dataclasses import dataclass, field -import queue import time +from typing import Any import reactivex as rx from reactivex import operators as ops -from reactivex.disposable import Disposable from reactivex.observable import Observable -from dimos import spec -from dimos.agents import Output, Reducer, Stream, skill # type: ignore[attr-defined] +from dimos.agents import Output, Reducer, Stream, skill from dimos.core import Module, ModuleConfig, Out, rpc from dimos.hardware.sensors.camera.spec import CameraHardware from dimos.hardware.sensors.camera.webcam import Webcam from dimos.msgs.geometry_msgs import Quaternion, Transform, Vector3 -from dimos.msgs.sensor_msgs import Image from dimos.msgs.sensor_msgs.CameraInfo import CameraInfo from dimos.msgs.sensor_msgs.Image import Image, sharpness_barrier -from dimos.spec import perception as spec # type: ignore[no-redef] +from dimos.spec import perception +from dimos.utils.reactive import iter_observable -def default_transform(): # type: ignore[no-untyped-def] +def default_transform() -> Transform: return Transform( translation=Vector3(0.0, 0.0, 0.0), rotation=Quaternion(0.0, 0.0, 0.0, 1.0), @@ -47,81 +45,52 @@ def default_transform(): # type: ignore[no-untyped-def] class CameraModuleConfig(ModuleConfig): frame_id: str = "camera_link" transform: Transform | None = field(default_factory=default_transform) - hardware: Callable[[], CameraHardware] | CameraHardware = Webcam # type: ignore[type-arg] - frequency: float = 5.0 + hardware: Callable[[], CameraHardware[Any]] | CameraHardware[Any] = Webcam + frequency: float = 0.0 # Hz, 0 means no limit -class CameraModule(Module[CameraModuleConfig], spec.Camera): +class CameraModule(Module[CameraModuleConfig], perception.Camera): color_image: Out[Image] camera_info: Out[CameraInfo] - hardware: CameraHardware = None # type: ignore[assignment, type-arg] - _module_subscription: Disposable | None = None - _camera_info_subscription: Disposable | None = None - _skill_stream: Observable[Image] | None = None + hardware: CameraHardware[Any] config: CameraModuleConfig default_config = CameraModuleConfig - def __init__(self, *args, **kwargs): # type: ignore[no-untyped-def] + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) - @property - def hardware_camera_info(self) -> CameraInfo: - return self.hardware.camera_info - @rpc - def start(self): # type: ignore[no-untyped-def] + def start(self) -> None: if callable(self.config.hardware): self.hardware = self.config.hardware() else: self.hardware = self.config.hardware - if self._module_subscription: - return "already started" - - stream = self.hardware.image_stream().pipe(sharpness_barrier(self.config.frequency)) # type: ignore[attr-defined] - self._disposables.add(stream.subscribe(self.color_image.publish)) - - # camera_info_stream = self.camera_info_stream(frequency=5.0) - - def publish_info(camera_info: CameraInfo) -> None: - self.camera_info.publish(camera_info) - - if self.config.transform is None: - return - - camera_link = self.config.transform - camera_link.ts = camera_info.ts - camera_optical = Transform( - translation=Vector3(0.0, 0.0, 0.0), - rotation=Quaternion(-0.5, 0.5, -0.5, 0.5), - frame_id="camera_link", - child_frame_id="camera_optical", - ts=camera_link.ts, - ) - - self.tf.publish(camera_link, camera_optical) + stream = self.hardware.image_stream() - self._camera_info_subscription = self.camera_info_stream().subscribe(publish_info) # type: ignore[assignment] - self._module_subscription = stream.subscribe(self.color_image.publish) # type: ignore[attr-defined] + if self.config.frequency > 0: + stream = stream.pipe(sharpness_barrier(self.config.frequency)) - @skill(stream=Stream.passive, output=Output.image, reducer=Reducer.latest) # type: ignore[arg-type] - def video_stream(self) -> Image: # type: ignore[misc] - """implicit video stream skill""" - _queue = queue.Queue(maxsize=1) # type: ignore[var-annotated] - self.hardware.image_stream().subscribe(_queue.put) + self._disposables.add( + stream.subscribe(self.color_image.publish), + ) - yield from iter(_queue.get, None) + self._disposables.add( + rx.interval(1.0).subscribe(lambda _: self.publish_metadata()), + ) - def publish_info(self, camera_info: CameraInfo) -> None: + def publish_metadata(self) -> None: + camera_info = self.hardware.camera_info.with_ts(time.time()) self.camera_info.publish(camera_info) - if self.config.transform is None: + if not self.config.transform: return camera_link = self.config.transform camera_link.ts = camera_info.ts + camera_optical = Transform( translation=Vector3(0.0, 0.0, 0.0), rotation=Quaternion(-0.5, 0.5, -0.5, 0.5), @@ -132,21 +101,13 @@ def publish_info(self, camera_info: CameraInfo) -> None: self.tf.publish(camera_link, camera_optical) - def camera_info_stream(self, frequency: float = 1.0) -> Observable[CameraInfo]: - def camera_info(_) -> CameraInfo: # type: ignore[no-untyped-def] - self.hardware.camera_info.ts = time.time() - return self.hardware.camera_info - - return rx.interval(1.0 / frequency).pipe(ops.map(camera_info)) - - def stop(self): # type: ignore[no-untyped-def] - if self._module_subscription: - self._module_subscription.dispose() - self._module_subscription = None - if self._camera_info_subscription: - self._camera_info_subscription.dispose() - self._camera_info_subscription = None - # Also stop the hardware if it has a stop method + # actually skills should support on_demand passive skills so we don't emit this periodically + # but just provide the latest frame on demand + @skill(stream=Stream.passive, output=Output.image, reducer=Reducer.latest) # type: ignore[arg-type] + def video_stream(self) -> Generator[Image, None, None]: + yield from iter_observable(self.hardware.image_stream().pipe(ops.sample(1.0))) + + def stop(self) -> None: if self.hardware and hasattr(self.hardware, "stop"): self.hardware.stop() super().stop() diff --git a/dimos/hardware/sensors/camera/test_webcam.py b/dimos/hardware/sensors/camera/test_webcam.py index e9f544e791..0d1a1d0040 100644 --- a/dimos/hardware/sensors/camera/test_webcam.py +++ b/dimos/hardware/sensors/camera/test_webcam.py @@ -24,10 +24,15 @@ from dimos.msgs.sensor_msgs import CameraInfo, Image -@pytest.mark.tool -def test_streaming_single() -> None: - dimos = core.start(1) +@pytest.fixture +def dimos(): + dimos_instance = core.start(1) + yield dimos_instance + dimos_instance.stop() + +@pytest.mark.tool +def test_streaming_single(dimos) -> None: camera = dimos.deploy( CameraModule, transform=Transform( @@ -37,15 +42,14 @@ def test_streaming_single() -> None: child_frame_id="camera_link", ), hardware=lambda: Webcam( - stereo_slice="left", camera_index=0, - frequency=15, + frequency=0.0, # full speed but set something to test sharpness barrier camera_info=zed.CameraInfo.SingleWebcam, ), ) - camera.image.transport = core.LCMTransport("/image1", Image) - camera.camera_info.transport = core.LCMTransport("/image1/camera_info", CameraInfo) + camera.color_image.transport = core.LCMTransport("/color_image", Image) + camera.camera_info.transport = core.LCMTransport("/camera_info", CameraInfo) camera.start() try: @@ -54,55 +58,3 @@ def test_streaming_single() -> None: except KeyboardInterrupt: camera.stop() dimos.stop() - - -@pytest.mark.tool -def test_streaming_double() -> None: - dimos = core.start(2) - - camera1 = dimos.deploy( - CameraModule, - transform=Transform( - translation=Vector3(0.05, 0.0, 0.0), - rotation=Quaternion(0.0, 0.0, 0.0, 1.0), - frame_id="sensor", - child_frame_id="camera_link", - ), - hardware=lambda: Webcam( - stereo_slice="left", - camera_index=0, - frequency=15, - camera_info=zed.CameraInfo.SingleWebcam, - ), - ) - - camera2 = dimos.deploy( - CameraModule, - transform=Transform( - translation=Vector3(0.05, 0.0, 0.0), - rotation=Quaternion(0.0, 0.0, 0.0, 1.0), - frame_id="sensor", - child_frame_id="camera_link", - ), - hardware=lambda: Webcam( - camera_index=4, - frequency=15, - stereo_slice="left", - camera_info=zed.CameraInfo.SingleWebcam, - ), - ) - - camera1.image.transport = core.LCMTransport("/image1", Image) - camera1.camera_info.transport = core.LCMTransport("/image1/camera_info", CameraInfo) - camera1.start() - camera2.image.transport = core.LCMTransport("/image2", Image) - camera2.camera_info.transport = core.LCMTransport("/image2/camera_info", CameraInfo) - camera2.start() - - try: - while True: - time.sleep(1) - except KeyboardInterrupt: - camera1.stop() - camera2.stop() - dimos.stop() diff --git a/dimos/hardware/sensors/sensor.py b/dimos/hardware/sensors/sensor.py deleted file mode 100644 index dc86d93e56..0000000000 --- a/dimos/hardware/sensors/sensor.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from abc import ABC, abstractmethod - - -class AbstractSensor(ABC): - def __init__(self, sensor_type=None) -> None: # type: ignore[no-untyped-def] - self.sensor_type = sensor_type - - @abstractmethod - def get_sensor_type(self): # type: ignore[no-untyped-def] - """Return the type of sensor.""" - pass - - @abstractmethod - def calculate_intrinsics(self): # type: ignore[no-untyped-def] - """Calculate the sensor's intrinsics.""" - pass - - @abstractmethod - def get_intrinsics(self): # type: ignore[no-untyped-def] - """Return the sensor's intrinsics.""" - pass diff --git a/dimos/msgs/geometry_msgs/Transform.py b/dimos/msgs/geometry_msgs/Transform.py index cb112ee5c9..3a52f5a8c0 100644 --- a/dimos/msgs/geometry_msgs/Transform.py +++ b/dimos/msgs/geometry_msgs/Transform.py @@ -79,7 +79,7 @@ def __repr__(self) -> str: return f"Transform(translation={self.translation!r}, rotation={self.rotation!r})" def __str__(self) -> str: - return f"Transform:\n {self.frame_id} -> {self.child_frame_id} Translation: {self.translation}\n Rotation: {self.rotation}" + return f"{self.frame_id} -> {self.child_frame_id}\n Translation: {self.translation}\n Rotation: {self.rotation}" def __eq__(self, other) -> bool: # type: ignore[no-untyped-def] """Check if two transforms are equal.""" diff --git a/dimos/msgs/geometry_msgs/test_Transform.py b/dimos/msgs/geometry_msgs/test_Transform.py index be3baee6cb..2a1daff684 100644 --- a/dimos/msgs/geometry_msgs/test_Transform.py +++ b/dimos/msgs/geometry_msgs/test_Transform.py @@ -114,7 +114,6 @@ def test_transform_string_representations() -> None: # Test str str_str = str(tf) - assert "Transform:" in str_str assert "Translation:" in str_str assert "Rotation:" in str_str diff --git a/dimos/msgs/sensor_msgs/CameraInfo.py b/dimos/msgs/sensor_msgs/CameraInfo.py index c54b6565fa..b6f85dbaca 100644 --- a/dimos/msgs/sensor_msgs/CameraInfo.py +++ b/dimos/msgs/sensor_msgs/CameraInfo.py @@ -99,6 +99,29 @@ def __init__( self.roi_width = 0 self.roi_do_rectify = False + def with_ts(self, ts: float) -> CameraInfo: + """Return a copy of this CameraInfo with the given timestamp. + + Args: + ts: New timestamp + + Returns: + New CameraInfo instance with updated timestamp + """ + return CameraInfo( + height=self.height, + width=self.width, + distortion_model=self.distortion_model, + D=self.D.copy(), + K=self.K.copy(), + R=self.R.copy(), + P=self.P.copy(), + binning_x=self.binning_x, + binning_y=self.binning_y, + frame_id=self.frame_id, + ts=ts, + ) + @classmethod def from_yaml(cls, yaml_file: str) -> CameraInfo: """Create CameraInfo from YAML file. diff --git a/dimos/robot/foxglove_bridge.py b/dimos/robot/foxglove_bridge.py index ed14a06495..529a14c838 100644 --- a/dimos/robot/foxglove_bridge.py +++ b/dimos/robot/foxglove_bridge.py @@ -17,7 +17,6 @@ import threading from typing import TYPE_CHECKING, Any -# this is missing, I'm just trying to import lcm_foxglove_bridge.py from dimos_lcm from dimos_lcm.foxglove_bridge import ( FoxgloveBridge as LCMFoxgloveBridge, ) diff --git a/dimos/robot/unitree/connection/go2.py b/dimos/robot/unitree/connection/go2.py index 03d7677f48..34f81e2bbf 100644 --- a/dimos/robot/unitree/connection/go2.py +++ b/dimos/robot/unitree/connection/go2.py @@ -287,28 +287,10 @@ def _odom_to_tf(cls, odom: PoseStamped) -> list[Transform]: ts=odom.ts, ) - sensor = Transform( - translation=Vector3(0.0, 0.0, 0.0), - rotation=Quaternion(0.0, 0.0, 0.0, 1.0), - frame_id="world", - child_frame_id="sensor", - ts=odom.ts, - ) - - map_to_world = Transform( - translation=Vector3(0.0, 0.0, 0.0), - rotation=Quaternion(0.0, 0.0, 0.0, 1.0), - frame_id="map", - child_frame_id="world", - ts=time.time(), - ) - return [ Transform.from_pose("base_link", odom), camera_link, camera_optical, - sensor, - map_to_world, ] def _publish_tf(self, msg: PoseStamped) -> None: diff --git a/dimos/utils/docs/doclinks.md b/dimos/utils/docs/doclinks.md new file mode 100644 index 0000000000..dce2e67fec --- /dev/null +++ b/dimos/utils/docs/doclinks.md @@ -0,0 +1,96 @@ +# doclinks + +A Markdown link resolver that automatically fills in correct file paths for code references in documentation. + +## What it does + +When writing docs, you can use placeholder links like: + + +```markdown +See [`service/spec.py`]() for the implementation. +``` + + +Running `doclinks` resolves these to actual paths: + + +```markdown +See [`service/spec.py`](/dimos/protocol/service/spec.py) for the implementation. +``` + + +## Features + + +- **Code file links**: `[`filename.py`]()` resolves to the file's path +- **Symbol line linking**: If another backticked term appears on the same line, it finds that symbol in the file and adds `#L`: + ```markdown + See `Configurable` in [`config.py`]() + → [`config.py`](/path/config.py#L42) + ``` +- **Doc-to-doc links**: `[Modules](.md)` resolves to `modules.md` or `modules/index.md` + +- **Multiple link modes**: absolute, relative, or GitHub URLs +- **Watch mode**: Automatically re-process on file changes +- **Ignore regions**: Skip sections with `` comments + +## Usage + +```bash +# Process a single file +doclinks docs/guide.md + +# Process a directory recursively +doclinks docs/ + +# Relative links (from doc location) +doclinks --link-mode relative docs/ + +# GitHub links +doclinks --link-mode github \ + --github-url https://github.com/org/repo docs/ + +# Dry run (preview changes) +doclinks --dry-run docs/ + +# CI check (exit 1 if changes needed) +doclinks --check docs/ + +# Watch mode (auto-update on changes) +doclinks --watch docs/ +``` + +## Options + +| Option | Description | +|--------------------|-------------------------------------------------| +| `--root PATH` | Repository root (default: auto-detect git root) | +| `--link-mode MODE` | `absolute` (default), `relative`, or `github` | +| `--github-url URL` | Base GitHub URL (required for github mode) | +| `--github-ref REF` | Branch/ref for GitHub links (default: `main`) | +| `--dry-run` | Show changes without modifying files | +| `--check` | Exit with error if changes needed (for CI) | +| `--watch` | Watch for changes and re-process | + +## Link patterns + + +| Pattern | Description | +|----------------------|------------------------------------------------| +| `[`file.py`]()` | Code file reference (empty or any link) | +| `[`path/file.py`]()` | Code file with partial path for disambiguation | +| `[`file.py`](#L42)` | Preserves existing line fragments | +| `[Doc Name](.md)` | Doc-to-doc link (resolves by name) | + + +## How resolution works + +The tool builds an index of all files in the repo. For `/dimos/protocol/service/spec.py`, it creates lookup entries for: + +- `spec.py` +- `service/spec.py` +- `protocol/service/spec.py` +- `dimos/protocol/service/spec.py` + +Use longer paths when multiple files share the same name. diff --git a/dimos/utils/docs/doclinks.py b/dimos/utils/docs/doclinks.py new file mode 100644 index 0000000000..eae5e01287 --- /dev/null +++ b/dimos/utils/docs/doclinks.py @@ -0,0 +1,628 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Markdown reference lookup tool. + +Finds markdown links like [`service/spec.py`](...) and fills in the correct +file path from the codebase. + +Usage: + python reference_lookup.py --root /repo/root [options] markdownfile.md +""" + +import argparse +from collections import defaultdict +import os +from pathlib import Path +import re +import subprocess +import sys +from typing import Any + + +def find_git_root() -> Path | None: + """Find the git repository root from current directory.""" + try: + result = subprocess.run( + ["git", "rev-parse", "--show-toplevel"], + capture_output=True, + text=True, + check=True, + ) + return Path(result.stdout.strip()) + except (subprocess.CalledProcessError, FileNotFoundError): + return None + + +def load_gitignore_patterns(root: Path) -> list[str]: + """Load patterns from .gitignore file.""" + gitignore = root / ".gitignore" + if not gitignore.exists(): + return [] + + patterns = [] + with open(gitignore) as f: + for line in f: + line = line.strip() + if line and not line.startswith("#"): + patterns.append(line) + return patterns + + +def should_ignore(path: Path, root: Path, patterns: list[str]) -> bool: + """Check if path should be ignored based on gitignore patterns.""" + rel_path = path.relative_to(root) + path_str = str(rel_path) + name = path.name + + # Always ignore these + if name in {".git", ".venv", "venv", "node_modules", "__pycache__", ".mypy_cache", "generated"}: + return True + + # Skip directories that contain a .git subdir (submodules, nested repos) + if path.is_dir() and (path / ".git").exists(): + return True + + for pattern in patterns: + # Handle directory patterns (ending with /) + if pattern.endswith("/"): + dir_pattern = pattern[:-1] + if name == dir_pattern or path_str.startswith(dir_pattern + "/"): + return True + # Handle glob patterns + elif "*" in pattern: + import fnmatch + + if fnmatch.fnmatch(name, pattern) or fnmatch.fnmatch(path_str, pattern): + return True + # Simple name match + elif name == pattern or path_str == pattern or path_str.startswith(pattern + "/"): + return True + + return False + + +def build_file_index(root: Path) -> dict[str, list[Path]]: + """ + Build an index mapping filename suffixes to full paths. + + For /dimos/protocol/service/spec.py, creates entries for: + - spec.py + - service/spec.py + - protocol/service/spec.py + - dimos/protocol/service/spec.py + """ + index: dict[str, list[Path]] = defaultdict(list) + patterns = load_gitignore_patterns(root) + + for dirpath, dirnames, filenames in os.walk(root): + current = Path(dirpath) + + # Filter out ignored directories + dirnames[:] = [d for d in dirnames if not should_ignore(current / d, root, patterns)] + + for filename in filenames: + filepath = current / filename + if should_ignore(filepath, root, patterns): + continue + + rel_path = filepath.relative_to(root) + parts = rel_path.parts + + # Add all suffix combinations + for i in range(len(parts)): + suffix = "/".join(parts[i:]) + index[suffix].append(rel_path) + + return index + + +def build_doc_index(root: Path) -> dict[str, list[Path]]: + """ + Build an index mapping lowercase doc names to .md file paths. + + For docs/concepts/modules.md, creates entry: + - "modules" -> [Path("docs/concepts/modules.md")] + + Also indexes directory index files: + - "modules" -> [Path("docs/modules/index.md")] (if modules/index.md exists) + """ + index: dict[str, list[Path]] = defaultdict(list) + patterns = load_gitignore_patterns(root) + + for dirpath, dirnames, filenames in os.walk(root): + current = Path(dirpath) + + # Filter out ignored directories + dirnames[:] = [d for d in dirnames if not should_ignore(current / d, root, patterns)] + + for filename in filenames: + if not filename.endswith(".md"): + continue + + filepath = current / filename + if should_ignore(filepath, root, patterns): + continue + + rel_path = filepath.relative_to(root) + stem = filepath.stem.lower() + + # For index.md files, also index by parent directory name + if stem == "index": + parent_name = filepath.parent.name.lower() + if parent_name: + index[parent_name].append(rel_path) + else: + index[stem].append(rel_path) + + return index + + +def find_symbol_line(file_path: Path, symbol: str) -> int | None: + """Find the first line number where symbol appears.""" + try: + with open(file_path, encoding="utf-8", errors="replace") as f: + for line_num, line in enumerate(f, start=1): + if symbol in line: + return line_num + except OSError: + pass + return None + + +def extract_other_backticks(line: str, file_ref: str) -> list[str]: + """Extract other backticked terms from a line, excluding the file reference.""" + pattern = r"`([^`]+)`" + matches = re.findall(pattern, line) + return [m for m in matches if m != file_ref and not m.endswith(".py") and "/" not in m] + + +def generate_link( + rel_path: Path, + root: Path, + doc_path: Path, + link_mode: str, + github_url: str | None, + github_ref: str, + line_fragment: str = "", +) -> str: + """Generate the appropriate link format.""" + if link_mode == "absolute": + return f"/{rel_path}{line_fragment}" + elif link_mode == "relative": + doc_dir = ( + doc_path.parent.relative_to(root) if doc_path.is_relative_to(root) else doc_path.parent + ) + target = root / rel_path + try: + rel_link = os.path.relpath(target, root / doc_dir) + except ValueError: + rel_link = str(rel_path) + return f"{rel_link}{line_fragment}" + elif link_mode == "github": + if not github_url: + raise ValueError("--github-url required when using --link-mode=github") + return f"{github_url.rstrip('/')}/blob/{github_ref}/{rel_path}{line_fragment}" + else: + raise ValueError(f"Unknown link mode: {link_mode}") + + +def split_by_ignore_regions(content: str) -> list[tuple[str, bool]]: + """ + Split content into regions, marking which should be processed. + + Returns list of (text, should_process) tuples. + Regions between and are skipped. + """ + ignore_start = re.compile(r"", re.IGNORECASE) + ignore_end = re.compile(r"", re.IGNORECASE) + + regions = [] + pos = 0 + in_ignore = False + + while pos < len(content): + if not in_ignore: + # Look for start of ignore region + match = ignore_start.search(content, pos) + if match: + # Add content before ignore marker (to be processed) + if match.start() > pos: + regions.append((content[pos : match.start()], True)) + # Add the marker itself (not processed) + regions.append((content[match.start() : match.end()], False)) + pos = match.end() + in_ignore = True + else: + # No more ignore regions, add rest of content + regions.append((content[pos:], True)) + break + else: + # Look for end of ignore region + match = ignore_end.search(content, pos) + if match: + # Add ignored content including end marker + regions.append((content[pos : match.end()], False)) + pos = match.end() + in_ignore = False + else: + # Unclosed ignore region, add rest as ignored + regions.append((content[pos:], False)) + break + + return regions + + +def process_markdown( + content: str, + root: Path, + doc_path: Path, + file_index: dict[str, list[Path]], + link_mode: str, + github_url: str | None, + github_ref: str, + doc_index: dict[str, list[Path]] | None = None, +) -> tuple[str, list[str], list[str]]: + """ + Process markdown content, replacing file and doc links. + + Regions between and + are skipped. + + Returns (new_content, changes, errors). + """ + changes = [] + errors = [] + + # Pattern 1: [`filename`](link) - code file links + code_pattern = r"\[`([^`]+)`\]\(([^)]*)\)" + + # Pattern 2: [Text](.md) - doc file links + doc_pattern = r"\[([^\]]+)\]\(\.md\)" + + def replace_code_match(match: re.Match[str]) -> str: + file_ref = match.group(1) + current_link = match.group(2) + full_match = match.group(0) + + # Skip anchor-only links (e.g., [`Symbol`](#section)) + if current_link.startswith("#"): + return full_match + + # Skip if the reference doesn't look like a file path (no extension or path separator) + if "." not in file_ref and "/" not in file_ref: + return full_match + + # Look up in index + candidates = file_index.get(file_ref, []) + + if len(candidates) == 0: + errors.append(f"No file matching '{file_ref}' found in codebase") + return full_match + elif len(candidates) > 1: + errors.append(f"'{file_ref}' matches multiple files: {[str(c) for c in candidates]}") + return full_match + + resolved_path = candidates[0] + + # Determine line fragment + line_fragment = "" + + # Check if current link has a line fragment to preserve + if "#" in current_link: + line_fragment = "#" + current_link.split("#", 1)[1] + else: + # Look for other backticked symbols on the same line + line_start = content.rfind("\n", 0, match.start()) + 1 + line_end = content.find("\n", match.end()) + if line_end == -1: + line_end = len(content) + line = content[line_start:line_end] + + symbols = extract_other_backticks(line, file_ref) + if symbols: + # Try to find the first symbol in the target file + full_file_path = root / resolved_path + for symbol in symbols: + line_num = find_symbol_line(full_file_path, symbol) + if line_num is not None: + line_fragment = f"#L{line_num}" + break + + new_link = generate_link( + resolved_path, root, doc_path, link_mode, github_url, github_ref, line_fragment + ) + new_match = f"[`{file_ref}`]({new_link})" + + if new_match != full_match: + changes.append(f" {file_ref}: {current_link} -> {new_link}") + + return new_match + + def replace_doc_match(match: re.Match[str]) -> str: + """Replace [Text](.md) with resolved doc path.""" + if doc_index is None: + return match.group(0) + + link_text = match.group(1) + full_match = match.group(0) + lookup_key = link_text.lower() + + # Look up in doc index + candidates = doc_index.get(lookup_key, []) + + if len(candidates) == 0: + errors.append(f"No doc matching '{link_text}' found") + return full_match + elif len(candidates) > 1: + errors.append(f"'{link_text}' matches multiple docs: {[str(c) for c in candidates]}") + return full_match + + resolved_path = candidates[0] + new_link = generate_link(resolved_path, root, doc_path, link_mode, github_url, github_ref) + new_match = f"[{link_text}]({new_link})" + + if new_match != full_match: + changes.append(f" {link_text}: .md -> {new_link}") + + return new_match + + # Split by ignore regions and only process non-ignored parts + regions = split_by_ignore_regions(content) + result_parts = [] + + for region_content, should_process in regions: + if should_process: + # Process code links first, then doc links + processed = re.sub(code_pattern, replace_code_match, region_content) + processed = re.sub(doc_pattern, replace_doc_match, processed) + result_parts.append(processed) + else: + result_parts.append(region_content) + + new_content = "".join(result_parts) + return new_content, changes, errors + + +def collect_markdown_files(paths: list[str]) -> list[Path]: + """Collect markdown files from paths, expanding directories recursively.""" + result: list[Path] = [] + for p in paths: + path = Path(p) + if path.is_dir(): + result.extend(path.rglob("*.md")) + elif path.exists(): + result.append(path) + return sorted(set(result)) + + +USAGE = """\ +doclinks - Update markdown file links to correct codebase paths + +Finds [`filename.py`](...) patterns and resolves them to actual file paths. +Also auto-links symbols: `Configurable` on same line adds #L fragment. + +Supports doc-to-doc linking: [Modules](.md) resolves to modules.md or modules/index.md. + +Usage: + doclinks [options] + +Examples: + # Single file (auto-detects git root) + doclinks docs/guide.md + + # Recursive directory + doclinks docs/ + + # GitHub links + doclinks --root . --link-mode github \\ + --github-url https://github.com/org/repo docs/ + + # Relative links (from doc location) + doclinks --root . --link-mode relative docs/ + + # CI check (exit 1 if changes needed) + doclinks --root . --check docs/ + + # Dry run (show changes without writing) + doclinks --root . --dry-run docs/ + +Options: + --root PATH Repository root (default: git root) + --link-mode MODE absolute (default), relative, or github + --github-url URL Base GitHub URL (for github mode) + --github-ref REF Branch/ref for GitHub links (default: main) + --dry-run Show changes without modifying files + --check Exit with error if changes needed + --watch Watch for changes and re-process (requires watchdog) + -h, --help Show this help +""" + + +def main() -> None: + if len(sys.argv) == 1: + print(USAGE) + sys.exit(0) + + parser = argparse.ArgumentParser( + description="Update markdown file links to correct codebase paths", + formatter_class=argparse.RawDescriptionHelpFormatter, + add_help=False, + ) + parser.add_argument("paths", nargs="*", help="Markdown files or directories to process") + parser.add_argument("--root", type=Path, help="Repository root path") + parser.add_argument("-h", "--help", action="store_true", help="Show help") + parser.add_argument( + "--link-mode", + choices=["absolute", "relative", "github"], + default="absolute", + help="Link format (default: absolute)", + ) + parser.add_argument("--github-url", help="Base GitHub URL (required for github mode)") + parser.add_argument("--github-ref", default="main", help="GitHub branch/ref (default: main)") + parser.add_argument( + "--dry-run", action="store_true", help="Show changes without modifying files" + ) + parser.add_argument( + "--check", action="store_true", help="Exit with error if changes needed (CI mode)" + ) + parser.add_argument("--watch", action="store_true", help="Watch for changes and re-process") + + args = parser.parse_args() + + if args.help: + print(USAGE) + sys.exit(0) + + # Auto-detect git root if --root not provided + if args.root: + root = args.root.resolve() + else: + root = find_git_root() + if root is None: + print("Error: --root not provided and not in a git repository\n", file=sys.stderr) + sys.exit(1) + + if not args.paths: + print("Error: at least one path is required\n", file=sys.stderr) + print(USAGE) + sys.exit(1) + + if args.link_mode == "github" and not args.github_url: + print("Error: --github-url is required when using --link-mode=github\n", file=sys.stderr) + sys.exit(1) + + if not root.is_dir(): + print(f"Error: {root} is not a directory", file=sys.stderr) + sys.exit(1) + + print(f"Building file index from {root}...") + file_index = build_file_index(root) + doc_index = build_doc_index(root) + print( + f"Indexed {sum(len(v) for v in file_index.values())} file paths, {len(doc_index)} doc names" + ) + + def process_file(md_path: Path, quiet: bool = False) -> tuple[bool, list[str]]: + """Process a single markdown file. Returns (changed, errors).""" + md_path = md_path.resolve() + if not quiet: + rel = md_path.relative_to(root) if md_path.is_relative_to(root) else md_path + print(f"\nProcessing {rel}...") + + content = md_path.read_text() + new_content, changes, errors = process_markdown( + content, + root, + md_path, + file_index, + args.link_mode, + args.github_url, + args.github_ref, + doc_index=doc_index, + ) + + if errors: + for err in errors: + print(f" Error: {err}", file=sys.stderr) + + if changes: + if not quiet: + print(" Changes:") + for change in changes: + print(change) + if not args.dry_run and not args.check: + md_path.write_text(new_content) + if not quiet: + print(" Updated") + return True, errors + else: + if not quiet: + print(" No changes needed") + return False, errors + + # Watch mode + if args.watch: + try: + from watchdog.events import FileSystemEventHandler + from watchdog.observers import Observer + except ImportError: + print( + "Error: --watch requires watchdog. Install with: pip install watchdog", + file=sys.stderr, + ) + sys.exit(1) + + watch_paths = args.paths if args.paths else [str(root / "docs")] + + class MarkdownHandler(FileSystemEventHandler): + def on_modified(self, event: Any) -> None: + if not event.is_directory and event.src_path.endswith(".md"): + process_file(Path(event.src_path)) + + def on_created(self, event: Any) -> None: + if not event.is_directory and event.src_path.endswith(".md"): + process_file(Path(event.src_path)) + + observer = Observer() + handler = MarkdownHandler() + + for watch_path in watch_paths: + p = Path(watch_path) + if p.is_file(): + p = p.parent + print(f"Watching {p} for changes...") + observer.schedule(handler, str(p), recursive=True) + + observer.start() + try: + while True: + import time + + time.sleep(1) + except KeyboardInterrupt: + observer.stop() + observer.join() + return + + # Normal mode + markdown_files = collect_markdown_files(args.paths) + if not markdown_files: + print("No markdown files found", file=sys.stderr) + sys.exit(1) + + print(f"Found {len(markdown_files)} markdown file(s)") + + all_errors = [] + any_changes = False + + for md_path in markdown_files: + changed, errors = process_file(md_path) + if changed: + any_changes = True + all_errors.extend(errors) + + if all_errors: + print(f"\n{len(all_errors)} error(s) encountered", file=sys.stderr) + sys.exit(1) + + if args.check and any_changes: + print("\nChanges needed (--check mode)", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/dimos/utils/docs/test_doclinks.py b/dimos/utils/docs/test_doclinks.py new file mode 100644 index 0000000000..48f4bbdc21 --- /dev/null +++ b/dimos/utils/docs/test_doclinks.py @@ -0,0 +1,524 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for doclinks - using virtual markdown content against actual repo.""" + +from pathlib import Path + +from doclinks import ( + build_doc_index, + build_file_index, + extract_other_backticks, + find_symbol_line, + process_markdown, + split_by_ignore_regions, +) +import pytest + +# Use the actual repo root +REPO_ROOT = Path(__file__).parent.parent.parent.parent + + +@pytest.fixture(scope="module") +def file_index(): + """Build file index once for all tests.""" + return build_file_index(REPO_ROOT) + + +@pytest.fixture(scope="module") +def doc_index(): + """Build doc index once for all tests.""" + return build_doc_index(REPO_ROOT) + + +class TestFileIndex: + def test_finds_spec_files(self, file_index): + """Should find spec.py files with various path suffixes.""" + # Exact match with path + assert "protocol/service/spec.py" in file_index + candidates = file_index["protocol/service/spec.py"] + assert len(candidates) == 1 + assert candidates[0] == Path("dimos/protocol/service/spec.py") + + def test_service_spec_unique(self, file_index): + """service/spec.py should uniquely match one file.""" + candidates = file_index.get("service/spec.py", []) + assert len(candidates) == 1 + assert "protocol/service/spec.py" in str(candidates[0]) + + def test_spec_ambiguous(self, file_index): + """spec.py alone should match multiple files.""" + candidates = file_index.get("spec.py", []) + assert len(candidates) > 1 # Multiple spec.py files exist + + def test_excludes_venv(self, file_index): + """Should not include files from .venv directory.""" + for paths in file_index.values(): + for p in paths: + # Check for .venv as a path component, not just substring + assert ".venv" not in p.parts + + +class TestSymbolLookup: + def test_find_configurable_in_spec(self): + """Should find Configurable class in service/spec.py.""" + spec_path = REPO_ROOT / "dimos/protocol/service/spec.py" + line = find_symbol_line(spec_path, "Configurable") + assert line is not None + assert line > 0 + + # Verify it's the class definition line + with open(spec_path) as f: + lines = f.readlines() + assert "class Configurable" in lines[line - 1] + + def test_find_nonexistent_symbol(self): + """Should return None for symbols that don't exist.""" + spec_path = REPO_ROOT / "dimos/protocol/service/spec.py" + line = find_symbol_line(spec_path, "NonExistentSymbol12345") + assert line is None + + +class TestExtractBackticks: + def test_extracts_symbols(self): + """Should extract backticked terms excluding file refs.""" + line = "See [`service/spec.py`]() for `Configurable` and `Service`" + symbols = extract_other_backticks(line, "service/spec.py") + assert "Configurable" in symbols + assert "Service" in symbols + assert "service/spec.py" not in symbols + + def test_excludes_file_paths(self): + """Should exclude things that look like file paths.""" + line = "See [`foo.py`]() and `bar.py` and `Symbol`" + symbols = extract_other_backticks(line, "foo.py") + assert "Symbol" in symbols + assert "bar.py" not in symbols # Has .py extension + assert "foo.py" not in symbols + + +class TestProcessMarkdown: + def test_resolves_service_spec(self, file_index): + """Should resolve service/spec.py to full path.""" + content = "See [`service/spec.py`]() for details" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert len(errors) == 0 + assert len(changes) == 1 + assert "/dimos/protocol/service/spec.py" in new_content + + def test_auto_links_symbol(self, file_index): + """Should auto-add line number for symbol on same line.""" + content = "The `Configurable` class is in [`service/spec.py`]()" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert len(errors) == 0 + assert "#L" in new_content # Should have line number + + def test_preserves_existing_line_fragment(self, file_index): + """Should preserve existing #L fragments.""" + content = "See [`service/spec.py`](#L99)" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert "#L99" in new_content + + def test_skips_anchor_links(self, file_index): + """Should skip anchor-only links like [`Symbol`](#section).""" + content = "See [`SomeClass`](#some-section) for details" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert len(errors) == 0 + assert len(changes) == 0 + assert new_content == content # Unchanged + + def test_skips_non_file_refs(self, file_index): + """Should skip refs that don't look like files.""" + content = "The `MyClass` is documented at [`MyClass`]()" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert len(errors) == 0 + assert len(changes) == 0 + + def test_errors_on_ambiguous(self, file_index): + """Should error when file reference is ambiguous.""" + content = "See [`spec.py`]() for details" # Multiple spec.py files + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert len(errors) == 1 + assert "matches multiple files" in errors[0] + + def test_errors_on_not_found(self, file_index): + """Should error when file doesn't exist.""" + content = "See [`nonexistent/file.py`]() for details" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert len(errors) == 1 + assert "No file matching" in errors[0] + + def test_github_mode(self, file_index): + """Should generate GitHub URLs in github mode.""" + content = "See [`service/spec.py`]()" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="github", + github_url="https://github.com/org/repo", + github_ref="main", + ) + + assert "https://github.com/org/repo/blob/main/dimos/protocol/service/spec.py" in new_content + + def test_relative_mode(self, file_index): + """Should generate relative paths in relative mode.""" + content = "See [`service/spec.py`]()" + doc_path = REPO_ROOT / "docs/concepts/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="relative", + github_url=None, + github_ref="main", + ) + + assert new_content.startswith("See [`service/spec.py`](../../") + assert "dimos/protocol/service/spec.py" in new_content + + +class TestDocIndex: + def test_indexes_by_stem(self, doc_index): + """Should index docs by lowercase stem.""" + assert "configuration" in doc_index + assert "modules" in doc_index + assert "development" in doc_index + + def test_case_insensitive(self, doc_index): + """Should use lowercase keys.""" + # All keys should be lowercase + for key in doc_index: + assert key == key.lower() + + +class TestDocLinking: + def test_resolves_doc_link(self, file_index, doc_index): + """Should resolve [Text](.md) to doc path.""" + content = "See [Configuration](.md) for details" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + doc_index=doc_index, + ) + + assert len(errors) == 0 + assert len(changes) == 1 + assert "[Configuration](/docs/" in new_content + assert ".md)" in new_content + + def test_case_insensitive_lookup(self, file_index, doc_index): + """Should match case-insensitively.""" + content = "See [CONFIGURATION](.md) for details" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + doc_index=doc_index, + ) + + assert len(errors) == 0 + assert "[CONFIGURATION](" in new_content # Preserves original text + assert ".md)" in new_content + + def test_doc_link_github_mode(self, file_index, doc_index): + """Should generate GitHub URLs for doc links.""" + content = "See [Configuration](.md)" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="github", + github_url="https://github.com/org/repo", + github_ref="main", + doc_index=doc_index, + ) + + assert "https://github.com/org/repo/blob/main/docs/" in new_content + assert ".md)" in new_content + + def test_doc_link_relative_mode(self, file_index, doc_index): + """Should generate relative paths for doc links.""" + content = "See [Development](.md)" + doc_path = REPO_ROOT / "docs/concepts/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="relative", + github_url=None, + github_ref="main", + doc_index=doc_index, + ) + + assert len(errors) == 0 + # Should be relative path from docs/concepts/ to docs/ + assert "../" in new_content + + def test_doc_not_found_error(self, file_index, doc_index): + """Should error when doc doesn't exist.""" + content = "See [NonexistentDoc](.md)" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + doc_index=doc_index, + ) + + assert len(errors) == 1 + assert "No doc matching" in errors[0] + + def test_skips_regular_links(self, file_index, doc_index): + """Should not affect regular markdown links.""" + content = "See [regular link](https://example.com) here" + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + doc_index=doc_index, + ) + + assert new_content == content # Unchanged + + +class TestIgnoreRegions: + def test_split_no_ignore(self): + """Content without ignore markers should be fully processed.""" + content = "Hello world" + regions = split_by_ignore_regions(content) + assert len(regions) == 1 + assert regions[0] == ("Hello world", True) + + def test_split_single_ignore(self): + """Should correctly split around a single ignore region.""" + content = "beforeignoredafter" + regions = split_by_ignore_regions(content) + + # Should have: before (process), marker (no), ignored+end (no), after (process) + assert len(regions) == 4 + assert regions[0] == ("before", True) + assert regions[1][1] is False # Start marker + assert regions[2][1] is False # Ignored content + end marker + assert regions[3] == ("after", True) + + def test_split_multiple_ignores(self): + """Should handle multiple ignore regions.""" + content = ( + "ax" + "byc" + ) + regions = split_by_ignore_regions(content) + + # Check that processable regions are correctly identified + processable = [r[0] for r in regions if r[1]] + assert "a" in processable + assert "b" in processable + assert "c" in processable + + def test_split_case_insensitive(self): + """Should handle different case in markers.""" + content = "beforeignoredafter" + regions = split_by_ignore_regions(content) + + processable = [r[0] for r in regions if r[1]] + assert "before" in processable + assert "after" in processable + assert "ignored" not in processable + + def test_split_unclosed_ignore(self): + """Unclosed ignore region should ignore rest of content.""" + content = "beforerest of file" + regions = split_by_ignore_regions(content) + + processable = [r[0] for r in regions if r[1]] + assert "before" in processable + assert "rest of file" not in processable + + def test_ignores_links_in_region(self, file_index): + """Links inside ignore region should not be processed.""" + content = ( + "Process [`service/spec.py`]() here\n" + "\n" + "Skip [`service/spec.py`]() here\n" + "\n" + "Process [`service/spec.py`]() again" + ) + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + ) + + assert len(errors) == 0 + # Should have 2 changes (before and after ignore region) + assert len(changes) == 2 + + # Verify the ignored region is untouched + assert "Skip [`service/spec.py`]() here" in new_content + + # Verify the processed regions have resolved links + lines = new_content.split("\n") + assert "/dimos/protocol/service/spec.py" in lines[0] + assert "/dimos/protocol/service/spec.py" in lines[-1] + + def test_ignores_doc_links_in_region(self, file_index, doc_index): + """Doc links inside ignore region should not be processed.""" + content = ( + "[Configuration](.md)\n" + "\n" + "[Configuration](.md) example\n" + "\n" + "[Configuration](.md)" + ) + doc_path = REPO_ROOT / "docs/test.md" + + new_content, changes, errors = process_markdown( + content, + REPO_ROOT, + doc_path, + file_index, + link_mode="absolute", + github_url=None, + github_ref="main", + doc_index=doc_index, + ) + + assert len(errors) == 0 + assert len(changes) == 2 # Only 2 links processed + + # Verify the ignored region still has .md placeholder + assert "[Configuration](.md) example" in new_content + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/dimos/utils/generic.py b/dimos/utils/generic.py index e53292f1b1..84168ce057 100644 --- a/dimos/utils/generic.py +++ b/dimos/utils/generic.py @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections.abc import Callable import hashlib import json import os import string -from typing import Any +from typing import Any, Generic, TypeVar, overload import uuid +_T = TypeVar("_T") + def truncate_display_string(arg: Any, max: int | None = None) -> str: """ @@ -73,6 +76,13 @@ def short_id(from_string: str | None = None) -> str: return "".join(reversed(chars))[:min_chars] -class classproperty(property): - def __get__(self, obj, cls): # type: ignore[no-untyped-def, override] - return self.fget(cls) # type: ignore[misc] +class classproperty(Generic[_T]): + def __init__(self, fget: Callable[..., _T]) -> None: + self.fget = fget + + @overload + def __get__(self, obj: None, cls: type) -> _T: ... + @overload + def __get__(self, obj: object, cls: type) -> _T: ... + def __get__(self, obj: object | None, cls: type) -> _T: + return self.fget(cls) diff --git a/dimos/utils/reactive.py b/dimos/utils/reactive.py index c7dea45576..b5833a6348 100644 --- a/dimos/utils/reactive.py +++ b/dimos/utils/reactive.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections.abc import Callable +from collections.abc import Callable, Generator +from queue import Queue import threading from typing import Any, Generic, TypeVar @@ -228,3 +229,35 @@ def _quality_barrier(source: Observable[T]) -> Observable[T]: ) return _quality_barrier + + +def iter_observable(observable: Observable[T]) -> Generator[T, None, None]: + """Convert an Observable to a blocking iterator. + + Yields items as they arrive from the observable. Properly disposes + the subscription when the generator is closed. + """ + q: Queue[T | None] = Queue() + done = threading.Event() + + def on_next(value: T) -> None: + q.put(value) + + def on_complete() -> None: + done.set() + q.put(None) + + def on_error(e: Exception) -> None: + done.set() + q.put(None) + + sub = observable.subscribe(on_next=on_next, on_completed=on_complete, on_error=on_error) + + try: + while not done.is_set() or not q.empty(): + item = q.get() + if item is None and done.is_set(): + break + yield item # type: ignore[misc] + finally: + sub.dispose() diff --git a/dimos/utils/test_reactive.py b/dimos/utils/test_reactive.py index 17b69ba0aa..a0f3fe42ef 100644 --- a/dimos/utils/test_reactive.py +++ b/dimos/utils/test_reactive.py @@ -28,6 +28,7 @@ callback_to_observable, getter_ondemand, getter_streaming, + iter_observable, ) @@ -283,3 +284,12 @@ def stop_fn(cb) -> None: # Dispose subscription and check that stop was called subscription.dispose() assert stop_called, "Stop function should be called on dispose" + + +def test_iter_observable() -> None: + source = dispose_spy(rx.of(1, 2, 3, 4, 5)) + + result = list(iter_observable(source)) + + assert result == [1, 2, 3, 4, 5] + assert source.is_disposed(), "Observable should be disposed after iteration" diff --git a/docs/VIEWER_BACKENDS.md b/docs/VIEWER_BACKENDS.md index d70e3fa824..5b069fea7c 100644 --- a/docs/VIEWER_BACKENDS.md +++ b/docs/VIEWER_BACKENDS.md @@ -62,7 +62,7 @@ This happens on lower-end hardware (NUC, older laptops) with large maps. ### Increase Voxel Size -Edit [`dimos/robot/unitree_webrtc/unitree_go2_blueprints.py`](../dimos/robot/unitree_webrtc/unitree_go2_blueprints.py) line 82: +Edit [`dimos/robot/unitree_webrtc/unitree_go2_blueprints.py`](/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py) line 82: ```python # Before (high detail, slower on large maps) diff --git a/docs/agents/docs/assets/codeblocks_example.svg b/docs/agents/docs/assets/codeblocks_example.svg new file mode 100644 index 0000000000..3ba6c37a4b --- /dev/null +++ b/docs/agents/docs/assets/codeblocks_example.svg @@ -0,0 +1,47 @@ + + + + + + + + +A + +A + + + +B + +B + + + +A->B + + + + + +C + +C + + + +A->C + + + + + +B->C + + + + + diff --git a/docs/agents/docs/assets/pikchr_basic.svg b/docs/agents/docs/assets/pikchr_basic.svg new file mode 100644 index 0000000000..5410d35577 --- /dev/null +++ b/docs/agents/docs/assets/pikchr_basic.svg @@ -0,0 +1,12 @@ + + +Step 1 + + + +Step 2 + + + +Step 3 + diff --git a/docs/agents/docs/assets/pikchr_branch.svg b/docs/agents/docs/assets/pikchr_branch.svg new file mode 100644 index 0000000000..e7b2b86596 --- /dev/null +++ b/docs/agents/docs/assets/pikchr_branch.svg @@ -0,0 +1,16 @@ + + +Input + + + +Process + + + +Path A + + + +Path B + diff --git a/docs/agents/docs/assets/pikchr_explicit.svg b/docs/agents/docs/assets/pikchr_explicit.svg new file mode 100644 index 0000000000..a6a913fcb4 --- /dev/null +++ b/docs/agents/docs/assets/pikchr_explicit.svg @@ -0,0 +1,8 @@ + + +Step 1 + + + +Step 2 + diff --git a/docs/agents/docs/assets/pikchr_labels.svg b/docs/agents/docs/assets/pikchr_labels.svg new file mode 100644 index 0000000000..b11fe64bca --- /dev/null +++ b/docs/agents/docs/assets/pikchr_labels.svg @@ -0,0 +1,5 @@ + + +Box +label below + diff --git a/docs/agents/docs/assets/pikchr_sizing.svg b/docs/agents/docs/assets/pikchr_sizing.svg new file mode 100644 index 0000000000..3a0c433cb1 --- /dev/null +++ b/docs/agents/docs/assets/pikchr_sizing.svg @@ -0,0 +1,13 @@ + + +short + + + +.subscribe() + + + +two lines +of text + diff --git a/docs/agents/docs/codeblocks.md b/docs/agents/docs/codeblocks.md new file mode 100644 index 0000000000..3f1f86f999 --- /dev/null +++ b/docs/agents/docs/codeblocks.md @@ -0,0 +1,314 @@ +# Executable Code Blocks + +We use [md-babel-py](https://github.com/leshy/md-babel-py/) to execute code blocks in markdown and insert results. + +## Golden Rule + +**All code blocks must be executable.** Never write illustrative/pseudo code blocks. If you're showing an API usage pattern, create a minimal working example that actually runs. This ensures documentation stays correct as the codebase evolves. + +## Running + +```sh skip +md-babel-py run document.md # edit in-place +md-babel-py run document.md --stdout # preview to stdout +md-babel-py run document.md --dry-run # show what would run +``` + +## Supported Languages + +Python, Shell (sh), Node.js, plus visualization: Matplotlib, Graphviz, Pikchr, Asymptote, OpenSCAD, Diagon. + +## Code Block Flags + +Add flags after the language identifier: + +| Flag | Effect | +|------|--------| +| `session=NAME` | Share state between blocks with same session name | +| `output=path.png` | Write output to file instead of inline | +| `no-result` | Execute but don't insert result | +| `skip` | Don't execute this block | +| `expected-error` | Block is expected to fail | + +## Examples + +# md-babel-py + +Execute code blocks in markdown files and insert the results. + +![Demo](assets/screencast.gif) + +**Use cases:** +- Keep documentation examples up-to-date automatically +- Validate code snippets in docs actually work +- Generate diagrams and charts from code in markdown +- Literate programming with executable documentation + +## Languages + +### Shell + +```sh +echo "cwd: $(pwd)" +``` + + +``` +cwd: /work +``` + +### Python + +```python session=example +a = "hello world" +print(a) +``` + + +``` +hello world +``` + +Sessions preserve state between code blocks: + +```python session=example +print(a, "again") +``` + + +``` +hello world again +``` + +### Node.js + +```node +console.log("Hello from Node.js"); +console.log(`Node version: ${process.version}`); +``` + + +``` +Hello from Node.js +Node version: v22.21.1 +``` + +### Matplotlib + +```python output=assets/matplotlib-demo.svg +import matplotlib.pyplot as plt +import numpy as np +plt.style.use('dark_background') +x = np.linspace(0, 4 * np.pi, 200) +plt.figure(figsize=(8, 4)) +plt.plot(x, np.sin(x), label='sin(x)', linewidth=2) +plt.plot(x, np.cos(x), label='cos(x)', linewidth=2) +plt.xlabel('x') +plt.ylabel('y') +plt.legend() +plt.grid(alpha=0.3) +plt.savefig('{output}', transparent=True) +``` + + +![output](assets/matplotlib-demo.svg) + +### Pikchr + +SQLite's diagram language: + +
+diagram source + +```pikchr output=assets/pikchr-demo.svg +color = white +fill = none +linewid = 0.4in + +# Input file +In: file "README.md" fit +arrow + +# Processing +Parse: box "Parse" rad 5px fit +arrow +Exec: box "Execute" rad 5px fit + +# Fan out to languages +arrow from Exec.e right 0.3in then up 0.4in then right 0.3in +Sh: oval "Shell" fit +arrow from Exec.e right 0.3in then right 0.3in +Node: oval "Node" fit +arrow from Exec.e right 0.3in then down 0.4in then right 0.3in +Py: oval "Python" fit + +# Merge back +X: dot at (Py.e.x + 0.3in, Node.e.y) invisible +line from Sh.e right until even with X then down to X +line from Node.e to X +line from Py.e right until even with X then up to X +Out: file "README.md" fit with .w at (X.x + 0.3in, X.y) +arrow from X to Out.w +``` + +
+ + +![output](assets/pikchr-demo.svg) + +### Asymptote + +Vector graphics: + +```asymptote output=assets/histogram.svg +import graph; +import stats; + +size(400,200,IgnoreAspect); +defaultpen(white); + +int n=10000; +real[] a=new real[n]; +for(int i=0; i < n; ++i) a[i]=Gaussrand(); + +draw(graph(Gaussian,min(a),max(a)),orange); + +int N=bins(a); + +histogram(a,min(a),max(a),N,normalize=true,low=0,rgb(0.4,0.6,0.8),rgb(0.2,0.4,0.6),bars=true); + +xaxis("$x$",BottomTop,LeftTicks,p=white); +yaxis("$dP/dx$",LeftRight,RightTicks(trailingzero),p=white); +``` + + +![output](assets/histogram.svg) + +### Graphviz + +```dot output=assets/graph.svg +A -> B -> C +A -> C +``` + + +![output](assets/graph.svg) + +### OpenSCAD + +```openscad output=assets/cube-sphere.png +cube([10, 10, 10]); +sphere(r=7); +``` + + +![output](assets/cube-sphere.png) + +### Diagon + +ASCII art diagrams: + +```diagon mode=Math +1 + 1/2 + sum(i,0,10) +``` + + +``` + 10 + ___ + 1 ╲ +1 + ─ + ╱ i + 2 ‾‾‾ + 0 +``` + +```diagon mode=GraphDAG +A -> B -> C +A -> C +``` + + +``` +┌───┐ +│A │ +└┬─┬┘ + │┌▽┐ + ││B│ + │└┬┘ +┌▽─▽┐ +│C │ +└───┘ +``` + +## Install + +### Nix (recommended) + +```sh skip +# Run directly from GitHub +nix run github:leshy/md-babel-py -- run README.md --stdout + +# Or clone and run locally +nix run . -- run README.md --stdout +``` + +### Docker + +```sh skip +# Pull from Docker Hub +docker run -v $(pwd):/work lesh/md-babel-py:main run /work/README.md --stdout + +# Or build locally via Nix +nix build .#docker # builds tarball to ./result +docker load < result # loads image from tarball +docker run -v $(pwd):/work md-babel-py:latest run /work/file.md --stdout +``` + +### pipx + +```sh skip +pipx install md-babel-py +# or: uv pip install md-babel-py +md-babel-py run README.md --stdout +``` + +If not using nix or docker, evaluators require system dependencies: + +| Language | System packages | +|-----------|-----------------------------| +| python | python3 | +| node | nodejs | +| dot | graphviz | +| asymptote | asymptote, texlive, dvisvgm | +| pikchr | pikchr | +| openscad | openscad, xvfb, imagemagick | +| diagon | diagon | + +```sh skip +# Arch Linux +sudo pacman -S python nodejs graphviz asymptote texlive-basic openscad xorg-server-xvfb imagemagick + +# Debian/Ubuntu +sudo apt-get install python3 nodejs graphviz asymptote texlive xvfb imagemagick openscad +``` + +Note: pikchr and diagon may need to be built from source. Use Docker or Nix for full evaluator support. + +## Usage + +```sh skip +# Edit file in-place +md-babel-py run document.md + +# Output to separate file +md-babel-py run document.md --output result.md + +# Print to stdout +md-babel-py run document.md --stdout + +# Only run specific languages +md-babel-py run document.md --lang python,sh + +# Dry run - show what would execute +md-babel-py run document.md --dry-run +``` diff --git a/docs/agents/docs/doclinks.md b/docs/agents/docs/doclinks.md new file mode 100644 index 0000000000..d5533c5983 --- /dev/null +++ b/docs/agents/docs/doclinks.md @@ -0,0 +1,21 @@ +When writing or editing markdown documentation, use `doclinks` tool to resolve file references. + +Full documentation if needed: [`utils/docs/doclinks.md`](/dimos/utils/docs/doclinks.md) + +## Syntax + + +| Pattern | Example | +|-------------|-----------------------------------------------------| +| Code file | `[`service/spec.py`]()` → resolves path | +| With symbol | `Configurable` in `[`spec.py`]()` → adds `#L` | +| Doc link | `[Configuration](.md)` → resolves to doc | + + +## Usage + +```bash +doclinks docs/guide.md # single file +doclinks docs/ # directory +doclinks --dry-run ... # preview only +``` diff --git a/docs/agents/docs/index.md b/docs/agents/docs/index.md new file mode 100644 index 0000000000..76fc344856 --- /dev/null +++ b/docs/agents/docs/index.md @@ -0,0 +1,192 @@ + +# Code Blocks + +**All code blocks must be executable.** +Never write illustrative/pseudo code blocks. +If you're showing an API usage pattern, create a minimal working example that actually runs. This ensures documentation stays correct as the codebase evolves. + +After writing a code block in your markdown file, you can run it by executing +`md-babel-py run document.md` + +more information on this tool is in [codeblocks](/docs/agents/docs_agent/codeblocks.md) + + +# Code or Docs Links + +After adding a link to a doc run + +`doclinks document.md` + +### Code file references +```markdown +See [`service/spec.py`](/dimos/protocol/service/spec.py) for the implementation. +``` + +After running doclinks, becomes: +```markdown +See [`service/spec.py`](/dimos/protocol/service/spec.py) for the implementation. +``` + +### Symbol auto-linking +Mention a symbol on the same line to auto-link to its line number: +```markdown +The `Configurable` class is defined in [`service/spec.py`](/dimos/protocol/service/spec.py#L22). +``` + +Becomes: +```markdown +The `Configurable` class is defined in [`service/spec.py`](/dimos/protocol/service/spec.py#L22). +``` +### Doc-to-doc references +Use `.md` as the link target: +```markdown +See [Configuration](/docs/api/configuration.md) for more details. +``` + +Becomes: +```markdown +See [Configuration](/docs/concepts/configuration.md) for more details. +``` + +More information on this in [doclinks](/docs/agents/docs_agent/doclinks.md) + + +# Pikchr + +[Pikchr](https://pikchr.org/) is a diagram language from SQLite. Use it for flowcharts and architecture diagrams. + +**Important:** Always wrap pikchr blocks in `
` tags so the source is collapsed by default on GitHub. The rendered SVG stays visible outside the fold. Code blocks (Python, etc.) should NOT be folded—they're meant to be read. + +## Basic syntax + +
+diagram source + +```pikchr output=assets/pikchr_basic.svg +color = white +fill = none + +A: box "Step 1" rad 5px fit wid 170% ht 170% +arrow right 0.3in +B: box "Step 2" rad 5px fit wid 170% ht 170% +arrow right 0.3in +C: box "Step 3" rad 5px fit wid 170% ht 170% +``` + +
+ + +![output](assets/pikchr_basic.svg) + +## Box sizing + +Use `fit` with percentage scaling to auto-size boxes with padding: + +
+diagram source + +```pikchr output=assets/pikchr_sizing.svg +color = white +fill = none + +# fit wid 170% ht 170% = auto-size + padding +A: box "short" rad 5px fit wid 170% ht 170% +arrow right 0.3in +B: box ".subscribe()" rad 5px fit wid 170% ht 170% +arrow right 0.3in +C: box "two lines" "of text" rad 5px fit wid 170% ht 170% +``` + +
+ + +![output](assets/pikchr_sizing.svg) + +The pattern `fit wid 170% ht 170%` means: auto-size to text, then scale width by 170% and height by 170%. + +For explicit sizing (when you need consistent box sizes): + +
+diagram source + +```pikchr output=assets/pikchr_explicit.svg +color = white +fill = none + +A: box "Step 1" rad 5px fit wid 170% ht 170% +arrow right 0.3in +B: box "Step 2" rad 5px fit wid 170% ht 170% +``` + +
+ + +![output](assets/pikchr_explicit.svg) + +## Common settings + +Always start with: + +``` +color = white # text color +fill = none # transparent box fill +``` + +## Branching paths + +
+diagram source + +```pikchr output=assets/pikchr_branch.svg +color = white +fill = none + +A: box "Input" rad 5px fit wid 170% ht 170% +arrow +B: box "Process" rad 5px fit wid 170% ht 170% + +# Branch up +arrow from B.e right 0.3in then up 0.35in then right 0.3in +C: box "Path A" rad 5px fit wid 170% ht 170% + +# Branch down +arrow from B.e right 0.3in then down 0.35in then right 0.3in +D: box "Path B" rad 5px fit wid 170% ht 170% +``` + +
+ + +![output](assets/pikchr_branch.svg) + +**Tip:** For tree/hierarchy diagrams, prefer left-to-right layout (root on left, children branching right). This reads more naturally and avoids awkward vertical stacking. + +## Adding labels + +
+diagram source + +```pikchr output=assets/pikchr_labels.svg +color = white +fill = none + +A: box "Box" rad 5px fit wid 170% ht 170% +text "label below" at (A.x, A.y - 0.4in) +``` + +
+ + +![output](assets/pikchr_labels.svg) + +## Reference + +| Element | Syntax | +|---------|--------| +| Box | `box "text" rad 5px wid Xin ht Yin` | +| Arrow | `arrow right 0.3in` | +| Oval | `oval "text" wid Xin ht Yin` | +| Text | `text "label" at (X, Y)` | +| Named point | `A: box ...` then reference `A.e`, `A.n`, `A.x`, `A.y` | + +See [pikchr.org/home/doc/trunk/doc/userman.md](https://pikchr.org/home/doc/trunk/doc/userman.md) for full documentation. diff --git a/docs/api/assets/backpressure.svg b/docs/api/assets/backpressure.svg new file mode 100644 index 0000000000..b3d69af6fb --- /dev/null +++ b/docs/api/assets/backpressure.svg @@ -0,0 +1,15 @@ + + +Camera +60 fps + + + +queue + + + +ML Model +2 fps +items pile up! + diff --git a/docs/api/assets/backpressure_solution.svg b/docs/api/assets/backpressure_solution.svg new file mode 100644 index 0000000000..64cea95ed8 --- /dev/null +++ b/docs/api/assets/backpressure_solution.svg @@ -0,0 +1,22 @@ + + +Camera +60 fps + + + +replay(1) +ref_count() + + + +Fast Sub + + + +LATEST + + + +Slow Sub + diff --git a/docs/api/assets/observable_flow.svg b/docs/api/assets/observable_flow.svg new file mode 100644 index 0000000000..d7e0e021d6 --- /dev/null +++ b/docs/api/assets/observable_flow.svg @@ -0,0 +1,16 @@ + + +observable + + + +.pipe(ops) + + + +.subscribe() + + + +callback + diff --git a/docs/api/assets/transforms.png b/docs/api/assets/transforms.png new file mode 100644 index 0000000000..49dba4ab9a --- /dev/null +++ b/docs/api/assets/transforms.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6597e0008197902e321a3ad3dfb1e838f860fa7ca1277c369ed6ff7da8bf757d +size 101102 diff --git a/docs/api/assets/transforms_chain.svg b/docs/api/assets/transforms_chain.svg new file mode 100644 index 0000000000..3f6c21741b --- /dev/null +++ b/docs/api/assets/transforms_chain.svg @@ -0,0 +1,12 @@ + + +base_link + + + +camera_link + + + +camera_optical + diff --git a/docs/api/assets/transforms_modules.svg b/docs/api/assets/transforms_modules.svg new file mode 100644 index 0000000000..08e7c309a5 --- /dev/null +++ b/docs/api/assets/transforms_modules.svg @@ -0,0 +1,20 @@ + + +world + + + +base_link + + + +camera_link + + + +camera_optical + +RobotBaseModule + +CameraModule + diff --git a/docs/api/assets/transforms_tree.svg b/docs/api/assets/transforms_tree.svg new file mode 100644 index 0000000000..f95f1a6621 --- /dev/null +++ b/docs/api/assets/transforms_tree.svg @@ -0,0 +1,26 @@ + + +world + + + +robot_base + + + +camera_link + + + +camera_optical +mug here + + + +arm_base + + + +gripper +target here + diff --git a/docs/api/configuration.md b/docs/api/configuration.md new file mode 100644 index 0000000000..2977e8c3c1 --- /dev/null +++ b/docs/api/configuration.md @@ -0,0 +1,90 @@ +# Configuration + +Dimos provides a `Configurable` base class, see [`service/spec.py`](/dimos/protocol/service/spec.py#L22). + +This allows using dataclasses to specify configuration structure and default values per module. + +```python +from dimos.protocol.service import Configurable +from rich import print +from dataclasses import dataclass + +@dataclass +class Config(): + x: int = 3 + hello: str = "world" + +class MyClass(Configurable): + default_config = Config + config: Config + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + +myclass1 = MyClass() +print(myclass1.config) + +# can easily override +myclass2 = MyClass(hello="override") +print(myclass2.config) + +# we will raise an error for unspecified keys +try: + myclass3 = MyClass(something="else") +except TypeError as e: + print(f"Error: {e}") + + +``` + + +``` +Config(x=3, hello='world') +Config(x=3, hello='override') +Error: Config.__init__() got an unexpected keyword argument 'something' +``` + +# Configurable Modules + +[Modules]() inherit from `Configurable`, so all of the above applies. Module configs should inherit from `ModuleConfig` ([`core/module.py`](/dimos/core/module.py#L40)), which includes shared configuration for all modules like transport protocols, frame_ids etc + +```python +from dataclasses import dataclass +from dimos.core import In, Module, Out, rpc, ModuleConfig +from rich import print + +@dataclass +class Config(ModuleConfig): + frame_id: str = "world" + publish_interval: float = 0 + voxel_size: float = 0.05 + device: str = "CUDA:0" + +class MyModule(Module): + default_config = Config + config: Config + + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + print(self.config) + + +myModule = MyModule(frame_id="frame_id_override", device="CPU") + +# In production, use dimos.deploy() instead: +# myModule = dimos.deploy(MyModule, frame_id="frame_id_override") + + +``` + + +``` +Config( + rpc_transport=, + tf_transport=, + frame_id_prefix=None, + frame_id='frame_id_override', + publish_interval=0, + voxel_size=0.05, + device='CPU' +) +``` diff --git a/docs/api/reactivex.md b/docs/api/reactivex.md new file mode 100644 index 0000000000..d395a0b123 --- /dev/null +++ b/docs/api/reactivex.md @@ -0,0 +1,533 @@ +# ReactiveX (RxPY) Quick Reference + +RxPY provides composable asynchronous data streams. This is a practical guide focused on common patterns in this codebase. + +## Quick Start: Using an Observable + +Given a function that returns an `Observable`, here's how to use it: + +```python session=rx +import reactivex as rx +from reactivex import operators as ops + +# Create an observable that emits 0,1,2,3,4 +source = rx.of(0, 1, 2, 3, 4) + +# Subscribe and print each value +received = [] +source.subscribe(lambda x: received.append(x)) +print("received:", received) +``` + + +``` + +received: [0, 1, 2, 3, 4] +``` + +## The `.pipe()` Pattern + +Chain operators using `.pipe()`: + +```python session=rx +# Transform values: multiply by 2, then filter > 4 +result = [] +source.pipe( + ops.map(lambda x: x * 2), + ops.filter(lambda x: x > 4), +).subscribe(lambda x: result.append(x)) +print("transformed:", result) +``` + + +``` + +transformed: [6, 8] +``` + +## Common Operators + +### Transform: `map` + +```python session=rx +rx.of(1, 2, 3).pipe( + ops.map(lambda x: f"item_{x}") +).subscribe(print) +``` + + +``` +item_1 +item_2 +item_3 + +``` + +### Filter: `filter` + +```python session=rx +rx.of(1, 2, 3, 4, 5).pipe( + ops.filter(lambda x: x % 2 == 0) +).subscribe(print) +``` + + +``` +2 +4 + +``` + +### Limit emissions: `take` + +```python session=rx +rx.of(1, 2, 3, 4, 5).pipe( + ops.take(3) +).subscribe(print) +``` + + +``` +1 +2 +3 + +``` + +### Flatten nested observables: `flat_map` + +```python session=rx +# For each input, emit multiple values +rx.of(1, 2).pipe( + ops.flat_map(lambda x: rx.of(x, x * 10, x * 100)) +).subscribe(print) +``` + + +``` +1 +10 +100 +2 +20 +200 + +``` + +## Rate Limiting + +### `sample(interval)` - Emit latest value every N seconds + +Takes the most recent value at each interval. Good for continuous streams where you want the freshest data. + +```python session=rx +# Use blocking .run() to collect results properly +results = rx.interval(0.05).pipe( + ops.take(10), + ops.sample(0.2), + ops.to_list(), +).run() +print("sample() got:", results) +``` + + +``` +sample() got: [2, 6, 9] +``` + +### `throttle_first(interval)` - Emit first, then block for N seconds + +Takes the first value then ignores subsequent values for the interval. Good for user input debouncing. + +```python session=rx +results = rx.interval(0.05).pipe( + ops.take(10), + ops.throttle_first(0.15), + ops.to_list(), +).run() +print("throttle_first() got:", results) +``` + + +``` +throttle_first() got: [0, 3, 6, 9] +``` + +### Difference between sample and throttle_first + +```python session=rx +# sample: takes LATEST value at each interval tick +# throttle_first: takes FIRST value then blocks + +# With fast emissions (0,1,2,3,4,5,6,7,8,9) every 50ms: +# sample(0.2s) -> gets value at 200ms, 400ms marks -> [2, 6, 9] +# throttle_first(0.15s) -> gets 0, blocks, then 3, blocks, then 6... -> [0,3,6,9] +print("sample: latest value at each tick") +print("throttle_first: first value, then block") +``` + + +``` +sample: latest value at each tick +throttle_first: first value, then block +``` + +## What is an Observable? + +An Observable is a **lazy push-based collection**: + +- **Lazy**: Does nothing until subscribed +- **Push-based**: Producer pushes values to consumers (vs pull where consumer requests) +- **Collection**: Represents 0 to infinite values over time + +
+diagram source + +```pikchr output=assets/observable_flow.svg +color = white +fill = none + +Obs: box "observable" rad 5px fit wid 170% ht 170% +arrow right 0.3in +Pipe: box ".pipe(ops)" rad 5px fit wid 170% ht 170% +arrow right 0.3in +Sub: box ".subscribe()" rad 5px fit wid 170% ht 170% +arrow right 0.3in +Handler: box "callback" rad 5px fit wid 170% ht 170% +``` + +
+ + +![output](assets/observable_flow.svg) + +Three event types: +- `on_next(value)` - A new value +- `on_error(error)` - An error occurred, stream terminates +- `on_completed()` - Stream finished normally + +```python session=rx +rx.of(1, 2, 3).subscribe( + on_next=lambda x: print(f"value: {x}"), + on_error=lambda e: print(f"error: {e}"), + on_completed=lambda: print("done") +) +``` + + +``` +value: 1 +value: 2 +value: 3 +done + +``` + +## Backpressure + +**Problem**: A fast producer can overwhelm a slow consumer, causing memory buildup or dropped frames. + +
+diagram source + +```pikchr output=assets/backpressure.svg +color = white +fill = none + +Fast: box "Camera" "60 fps" rad 5px fit wid 130% ht 130% +arrow right 0.4in +Queue: box "queue" rad 5px fit wid 170% ht 170% +arrow right 0.4in +Slow: box "ML Model" "2 fps" rad 5px fit wid 130% ht 130% + +text "items pile up!" at (Queue.x, Queue.y - 0.45in) +``` + +
+ + +![output](assets/backpressure.svg) + +**Solution**: The `backpressure()` wrapper. It: +1. Shares the source among subscribers (so camera runs once) +2. Each subscriber gets the **latest** value when ready (skips stale data) +3. Processing happens on thread pool (won't block source) + +```python session=bp +import time +import reactivex as rx +from reactivex import operators as ops +from reactivex.scheduler import ThreadPoolScheduler +from dimos.utils.reactive import backpressure + +scheduler = ThreadPoolScheduler(max_workers=4) + +# Simulate fast source +source = rx.interval(0.05).pipe(ops.take(20)) +safe = backpressure(source, scheduler=scheduler) + +fast_results = [] +slow_results = [] + +safe.subscribe(lambda x: fast_results.append(x)) + +def slow_handler(x): + time.sleep(0.15) + slow_results.append(x) + +safe.subscribe(slow_handler) + +time.sleep(1.5) +print(f"fast got {len(fast_results)} items: {fast_results[:5]}...") +print(f"slow got {len(slow_results)} items (skipped {len(fast_results) - len(slow_results)})") +scheduler.executor.shutdown(wait=True) +``` + + +``` + + +fast got 20 items: [0, 1, 2, 3, 4]... +slow got 7 items (skipped 13) +``` + +### How it works + +
+diagram source + +```pikchr output=assets/backpressure_solution.svg +color = white +fill = none +linewid = 0.3in + +Source: box "Camera" "60 fps" rad 5px fit wid 170% ht 170% +arrow +Core: box "replay(1)" "ref_count()" rad 5px fit wid 170% ht 170% +arrow from Core.e right 0.3in then up 0.35in then right 0.3in +Fast: box "Fast Sub" rad 5px fit wid 170% ht 170% +arrow from Core.e right 0.3in then down 0.35in then right 0.3in +SlowPre: box "LATEST" rad 5px fit wid 170% ht 170% +arrow +Slow: box "Slow Sub" rad 5px fit wid 170% ht 170% +``` + +
+ + +![output](assets/backpressure_solution.svg) + +The `LATEST` strategy means: when the slow subscriber finishes processing, it gets whatever the most recent value is, skipping any values that arrived while it was busy. + +### Usage in modules + +Most module streams return backpressured observables by default via `ObservableMixin`: + +```python session=bp +from dimos.core.stream import ObservableMixin + +# .observable() returns backpressured by default +# .pure_observable() returns raw stream without backpressure +print("ObservableMixin methods:", [m for m in dir(ObservableMixin) if not m.startswith('_')]) +``` + + +``` +ObservableMixin methods: ['get_next', 'hot_latest', 'observable', 'pure_observable'] +``` + +## Getting Values Synchronously + +### `getter_streaming()` - Continuously updated latest value + +Returns a callable that always returns the most recent value: + +```python session=sync +import time +import reactivex as rx +from reactivex import operators as ops +from dimos.utils.reactive import getter_streaming + +source = rx.interval(0.1).pipe(ops.take(10)) +get_val = getter_streaming(source, timeout=5.0) + +print("first call:", get_val()) +time.sleep(0.35) +print("after 350ms:", get_val()) +time.sleep(0.35) +print("after 700ms:", get_val()) + +get_val.dispose() +``` + + +``` +first call: 0 +after 350ms: 3 +after 700ms: 6 +``` + + +### `getter_ondemand()` - Fresh value each call + +Each call subscribes, waits for one value, and unsubscribes: + +```python session=sync +from dimos.utils.reactive import getter_ondemand + +source = rx.of(0, 1, 2, 3, 4) +get_val = getter_ondemand(source, timeout=5.0) + +# Each call re-subscribes to the cold observable +print("call 1:", get_val()) +print("call 2:", get_val()) +print("call 3:", get_val()) +``` + + +``` +call 1: 0 +call 2: 0 +call 3: 0 +``` + + +## Creating Observables + +### From callback-based APIs + +```python session=create +import reactivex as rx +from reactivex import operators as ops +from dimos.utils.reactive import callback_to_observable + +class MockSensor: + def __init__(self): + self._callbacks = [] + def register(self, cb): + self._callbacks.append(cb) + def unregister(self, cb): + self._callbacks.remove(cb) + def emit(self, value): + for cb in self._callbacks: + cb(value) + +sensor = MockSensor() + +obs = callback_to_observable( + start=sensor.register, + stop=sensor.unregister +) + +received = [] +sub = obs.subscribe(lambda x: received.append(x)) + +sensor.emit("reading_1") +sensor.emit("reading_2") +print("received:", received) + +sub.dispose() +print("callbacks after dispose:", len(sensor._callbacks)) +``` + + +``` +received: ['reading_1', 'reading_2'] +callbacks after dispose: 0 +``` + +### From scratch with `rx.create` + +```python session=create +from reactivex.disposable import Disposable + +def custom_subscribe(observer, scheduler=None): + observer.on_next("first") + observer.on_next("second") + observer.on_completed() + return Disposable(lambda: print("cleaned up")) + +obs = rx.create(custom_subscribe) + +results = [] +obs.subscribe( + on_next=lambda x: results.append(x), + on_completed=lambda: results.append("DONE") +) +print("results:", results) +``` + + +``` +cleaned up + +results: ['first', 'second', 'DONE'] +``` + +## Disposing Subscriptions + +Always dispose subscriptions when done to prevent leaks: + +```python session=dispose +import time +import reactivex as rx +from reactivex import operators as ops + +source = rx.interval(0.1).pipe(ops.take(100)) +received = [] + +subscription = source.subscribe(lambda x: received.append(x)) +time.sleep(0.25) +subscription.dispose() +time.sleep(0.2) + +print(f"received {len(received)} items before dispose") +``` + + +``` +received 2 items before dispose +``` + +For multiple subscriptions, use `CompositeDisposable`: + +```python session=dispose +from reactivex.disposable import CompositeDisposable + +disposables = CompositeDisposable() + +s1 = rx.of(1,2,3).subscribe(lambda x: None) +s2 = rx.of(4,5,6).subscribe(lambda x: None) + +disposables.add(s1) +disposables.add(s2) + +print("subscriptions:", len(disposables)) +disposables.dispose() +print("after dispose:", disposables.is_disposed) +``` + + +``` +subscriptions: 2 +after dispose: True +``` + +## Reference + +| Operator | Purpose | Example | +|----------|---------|---------| +| `map(fn)` | Transform each value | `ops.map(lambda x: x * 2)` | +| `filter(pred)` | Keep values matching predicate | `ops.filter(lambda x: x > 0)` | +| `take(n)` | Take first n values | `ops.take(10)` | +| `first()` | Take first value only | `ops.first()` | +| `sample(sec)` | Emit latest every interval | `ops.sample(0.5)` | +| `throttle_first(sec)` | Emit first, block for interval | `ops.throttle_first(0.5)` | +| `flat_map(fn)` | Map + flatten nested observables | `ops.flat_map(lambda x: rx.of(x, x))` | +| `observe_on(sched)` | Switch scheduler | `ops.observe_on(pool_scheduler)` | +| `replay(n)` | Cache last n values for late subscribers | `ops.replay(buffer_size=1)` | +| `ref_count()` | Auto-connect/disconnect shared observable | `ops.ref_count()` | +| `share()` | Shorthand for `publish().ref_count()` | `ops.share()` | +| `timeout(sec)` | Error if no value within timeout | `ops.timeout(5.0)` | + +See [RxPY documentation](https://rxpy.readthedocs.io/) for complete operator reference. diff --git a/docs/api/temporal_alignment.md b/docs/api/temporal_alignment.md new file mode 100644 index 0000000000..ed9208e05e --- /dev/null +++ b/docs/api/temporal_alignment.md @@ -0,0 +1,14 @@ +# Temporal Message Alignment + +We often have multiple sensors emitting data at different rates, with different latencies etc. + +For perception we'd often like to align these datapoints temporaly (to for example project RGB image into a lidar pointcloud) + +```python +self.detection_stream_3d = align_timestamped( + backpressure(self.detection_stream_2d()), + self.pointcloud.observable(), + match_tolerance=0.25, + buffer_size=20.0, +).pipe(ops.map(detection2d_to_3d)) +``` diff --git a/docs/api/transforms.md b/docs/api/transforms.md new file mode 100644 index 0000000000..4065cabc01 --- /dev/null +++ b/docs/api/transforms.md @@ -0,0 +1,469 @@ +# Transforms + +## The Problem: Everything Measures from Its Own Perspective + +Imagine your robot has an RGB-D camera—a camera that captures both color images and depth (distance to each pixel). These are common in robotics: Intel RealSense, Microsoft Kinect, and similar sensors. + +The camera spots a coffee mug at pixel (320, 240), and the depth sensor says it's 1.2 meters away. You want the robot arm to pick it up—but the arm doesn't understand pixels or camera-relative distances. It needs coordinates in its own workspace: "move to position (0.8, 0.3, 0.1) meters from my base." + +To convert camera measurements to arm coordinates, you need to know: +- The camera's intrinsic parameters (focal length, sensor size) to convert pixels to a 3D direction +- The depth value to get the full 3D position relative to the camera +- Where the camera is mounted relative to the arm, and at what angle + +This chain of conversions—(pixels + depth) → 3D point in camera frame → robot coordinates—is what **transforms** handle. + +
+diagram source + +```pikchr output=assets/transforms_tree.svg +color = white +fill = none + +# Root (left side) +W: box "world" rad 5px fit wid 170% ht 170% +arrow right 0.4in +RB: box "robot_base" rad 5px fit wid 170% ht 170% + +# Camera branch (top) +arrow from RB.e right 0.3in then up 0.4in then right 0.3in +CL: box "camera_link" rad 5px fit wid 170% ht 170% +arrow right 0.4in +CO: box "camera_optical" rad 5px fit wid 170% ht 170% +text "mug here" small italic at (CO.s.x, CO.s.y - 0.25in) + +# Arm branch (bottom) +arrow from RB.e right 0.3in then down 0.4in then right 0.3in +AB: box "arm_base" rad 5px fit wid 170% ht 170% +arrow right 0.4in +GR: box "gripper" rad 5px fit wid 170% ht 170% +text "target here" small italic at (GR.s.x, GR.s.y - 0.25in) +``` + +
+ + +![output](assets/transforms_tree.svg) + + +Each arrow in this tree is a transform. To get the mug's position in gripper coordinates, you chain transforms through their common parent: camera → robot_base → arm → gripper. + +## What's a Coordinate Frame? + +A **coordinate frame** is simply a point of view—an origin point and a set of axes (X, Y, Z) from which you measure positions and orientations. + +Think of it like giving directions: +- **GPS** says you're at 37.7749° N, 122.4194° W +- The **coffee shop floor plan** says "table 5 is 3 meters from the entrance" +- Your **friend** says "I'm two tables to your left" + +These all describe positions in the same physical space, but from different reference points. Each is a coordinate frame. + +In a robot: +- The **camera** measures in pixels, or in meters relative to its lens +- The **LIDAR** measures distances from its own mounting point +- The **robot arm** thinks in terms of its base or end-effector position +- The **world** has a fixed coordinate system everything lives in + +Each sensor, joint, and reference point has its own frame. + +## The Transform Class + +The `Transform` class at [`geometry_msgs/Transform.py`](/dimos/msgs/geometry_msgs/Transform.py#L21) represents a spatial transformation with: + +- `frame_id` - The parent frame name +- `child_frame_id` - The child frame name +- `translation` - A `Vector3` (x, y, z) offset +- `rotation` - A `Quaternion` (x, y, z, w) orientation +- `ts` - Timestamp for temporal lookups + +```python +from dimos.msgs.geometry_msgs import Transform, Vector3, Quaternion + +# Camera 0.5m forward and 0.3m up from base, no rotation +camera_transform = Transform( + translation=Vector3(0.5, 0.0, 0.3), + rotation=Quaternion(0.0, 0.0, 0.0, 1.0), # Identity rotation + frame_id="base_link", + child_frame_id="camera_link", +) +print(camera_transform) +``` + + +``` +base_link -> camera_link + Translation: → Vector Vector([0.5 0. 0.3]) + Rotation: Quaternion(0.000000, 0.000000, 0.000000, 1.000000) +``` + + +### Transform Operations + +Transforms can be composed and inverted: + +```python +from dimos.msgs.geometry_msgs import Transform, Vector3, Quaternion + +# Create two transforms +t1 = Transform( + translation=Vector3(1.0, 0.0, 0.0), + rotation=Quaternion(0.0, 0.0, 0.0, 1.0), + frame_id="base_link", + child_frame_id="camera_link", +) +t2 = Transform( + translation=Vector3(0.0, 0.5, 0.0), + rotation=Quaternion(0.0, 0.0, 0.0, 1.0), + frame_id="camera_link", + child_frame_id="end_effector", +) + +# Compose: base_link -> camera -> end_effector +t3 = t1 + t2 +print(f"Composed: {t3.frame_id} -> {t3.child_frame_id}") +print(f"Translation: ({t3.translation.x}, {t3.translation.y}, {t3.translation.z})") + +# Inverse: if t goes A -> B, -t goes B -> A +t_inverse = -t1 +print(f"Inverse: {t_inverse.frame_id} -> {t_inverse.child_frame_id}") +``` + + +``` +Composed: base_link -> end_effector +Translation: (1.0, 0.5, 0.0) +Inverse: camera_link -> base_link +``` + + +### Converting to Matrix Form + +For integration with libraries like NumPy or OpenCV: + +```python +from dimos.msgs.geometry_msgs import Transform, Vector3, Quaternion + +t = Transform( + translation=Vector3(1.0, 2.0, 3.0), + rotation=Quaternion(0.0, 0.0, 0.0, 1.0), +) +matrix = t.to_matrix() +print("4x4 transformation matrix:") +print(matrix) +``` + + +``` +4x4 transformation matrix: +[[1. 0. 0. 1.] + [0. 1. 0. 2.] + [0. 0. 1. 3.] + [0. 0. 0. 1.]] +``` + + + +## Frame IDs in Modules + +Modules in DimOS automatically get a `frame_id` property. This is controlled by two config options in [`core/module.py`](/dimos/core/module.py#L78): + +- `frame_id` - The base frame name (defaults to the class name) +- `frame_id_prefix` - Optional prefix for namespacing + +```python +from dimos.core import Module, ModuleConfig +from dataclasses import dataclass + +@dataclass +class MyModuleConfig(ModuleConfig): + frame_id: str = "sensor_link" + frame_id_prefix: str | None = None + +class MySensorModule(Module[MyModuleConfig]): + default_config = MyModuleConfig + +# With default config: +sensor = MySensorModule() +print(f"Default frame_id: {sensor.frame_id}") + +# With prefix (useful for multi-robot scenarios): +sensor2 = MySensorModule(frame_id_prefix="robot1") +print(f"With prefix: {sensor2.frame_id}") +``` + + +``` +Default frame_id: sensor_link +With prefix: robot1/sensor_link +``` + + +## The TF Service + +Every module has access to `self.tf`, a transform service that: + +- **Publishes** transforms to the system +- **Looks up** transforms between any two frames +- **Buffers** historical transforms for temporal queries + +The TF service is implemented in [`tf.py`](/dimos/protocol/tf/tf.py) and is lazily initialized on first access. + +### Multi-Module Transform Example + +This example demonstrates how multiple modules publish and receive transforms. Three modules work together: + +1. **RobotBaseModule** - Publishes `world -> base_link` (robot's position in the world) +2. **CameraModule** - Publishes `base_link -> camera_link` (camera mounting position) and `camera_link -> camera_optical` (optical frame convention) +3. **PerceptionModule** - Looks up transforms between any frames + +```python ansi=false +import time +import reactivex as rx +from reactivex import operators as ops +from dimos.core import Module, rpc, start +from dimos.msgs.geometry_msgs import Quaternion, Transform, Vector3 + +class RobotBaseModule(Module): + """Publishes the robot's position in the world frame at 10Hz.""" + def __init__(self, **kwargs: object) -> None: + super().__init__(**kwargs) + + @rpc + def start(self) -> None: + super().start() + + def publish_pose(_): + robot_pose = Transform( + translation=Vector3(2.5, 3.0, 0.0), + rotation=Quaternion(0.0, 0.0, 0.0, 1.0), + frame_id="world", + child_frame_id="base_link", + ts=time.time(), + ) + self.tf.publish(robot_pose) + + self._disposables.add( + rx.interval(0.1).subscribe(publish_pose) + ) + +class CameraModule(Module): + """Publishes camera transforms at 10Hz.""" + @rpc + def start(self) -> None: + super().start() + + def publish_transforms(_): + camera_mount = Transform( + translation=Vector3(1.0, 0.0, 0.3), + rotation=Quaternion(0.0, 0.0, 0.0, 1.0), + frame_id="base_link", + child_frame_id="camera_link", + ts=time.time(), + ) + optical_frame = Transform( + translation=Vector3(0.0, 0.0, 0.0), + rotation=Quaternion(-0.5, 0.5, -0.5, 0.5), + frame_id="camera_link", + child_frame_id="camera_optical", + ts=time.time(), + ) + self.tf.publish(camera_mount, optical_frame) + + self._disposables.add( + rx.interval(0.1).subscribe(publish_transforms) + ) + + +class PerceptionModule(Module): + """Receives transforms and performs lookups.""" + + def start(self) -> None: + # this is just to init transforms system + # touching the property for the first time enables the system for this module. + # transform lookups normally happen in fast loops in IRL modules + _ = self.tf + + @rpc + def lookup(self) -> None: + + # will pretty print information on transforms in the buffer + print(self.tf) + + direct = self.tf.get("world", "base_link") + print(f"Direct: robot is at ({direct.translation.x}, {direct.translation.y})m in world\n") + + # Chained lookup - automatically composes world->base->camera->optical + chained = self.tf.get("world", "camera_optical") + print(f"Chained: {chained}\n") + + # Inverse lookup - automatically inverts direction + inverse = self.tf.get("camera_optical", "world") + print(f"Inverse: {inverse}\n") + + print("Transform tree:") + print(self.tf.graph()) + + +if __name__ == "__main__": + dimos = start(3) + + # Deploy and start modules + robot = dimos.deploy(RobotBaseModule) + camera = dimos.deploy(CameraModule) + perception = dimos.deploy(PerceptionModule) + + robot.start() + camera.start() + perception.start() + + time.sleep(1.0) + + perception.lookup() + + dimos.stop() + +``` + + +``` +Initialized dimos local cluster with 3 workers, memory limit: auto +2025-12-29T12:47:01.433394Z [info ] Deployed module. [dimos/core/__init__.py] module=RobotBaseModule worker_id=1 +2025-12-29T12:47:01.603269Z [info ] Deployed module. [dimos/core/__init__.py] module=CameraModule worker_id=0 +2025-12-29T12:47:01.698970Z [info ] Deployed module. [dimos/core/__init__.py] module=PerceptionModule worker_id=2 +LCMTF(3 buffers): + TBuffer(world -> base_link, 10 msgs, 0.90s [2025-12-29 20:47:01 - 2025-12-29 20:47:02]) + TBuffer(base_link -> camera_link, 9 msgs, 0.80s [2025-12-29 20:47:01 - 2025-12-29 20:47:02]) + TBuffer(camera_link -> camera_optical, 9 msgs, 0.80s [2025-12-29 20:47:01 - 2025-12-29 20:47:02]) +Direct: robot is at (2.5, 3.0)m in world + +Chained: world -> camera_optical + Translation: → Vector Vector([3.5 3. 0.3]) + Rotation: Quaternion(-0.500000, 0.500000, -0.500000, 0.500000) + +Inverse: camera_optical -> world + Translation: → Vector Vector([ 3. 0.3 -3.5]) + Rotation: Quaternion(0.500000, -0.500000, 0.500000, 0.500000) + +Transform tree: +┌─────┐ +│world│ +└┬────┘ +┌▽────────┐ +│base_link│ +└┬────────┘ +┌▽──────────┐ +│camera_link│ +└┬──────────┘ +┌▽─────────────┐ +│camera_optical│ +└──────────────┘ +``` + + +You can also run `foxglove-studio-bridge` in the next terminal (binary provided by dimos and should be in your py env) and `foxglove-studio` to view these transforms in 3D (TODO we need to update this for rerun) + +![transforms](assets/transforms.png) + +Key points: + +- **Automatic broadcasting**: `self.tf.publish()` broadcasts via LCM to all modules +- **Chained lookups**: TF finds paths through the tree automatically +- **Inverse lookups**: Request transforms in either direction +- **Temporal buffering**: Transforms are timestamped and buffered (default 10s) for sensor fusion + +The transform tree from the example above, showing which module publishes each transform: + +
+diagram source + +```pikchr output=assets/transforms_modules.svg +color = white +fill = none + +# Frame boxes +W: box "world" rad 5px fit wid 170% ht 170% +A1: arrow right 0.4in +BL: box "base_link" rad 5px fit wid 170% ht 170% +A2: arrow right 0.4in +CL: box "camera_link" rad 5px fit wid 170% ht 170% +A3: arrow right 0.4in +CO: box "camera_optical" rad 5px fit wid 170% ht 170% + +# RobotBaseModule box - encompasses world->base_link +box width (BL.e.x - W.w.x + 0.15in) height 0.7in \ + at ((W.w.x + BL.e.x)/2, W.y - 0.05in) \ + rad 10px color 0x6699cc fill none +text "RobotBaseModule" italic at ((W.x + BL.x)/2, W.n.y + 0.25in) + +# CameraModule box - encompasses camera_link->camera_optical (starts after base_link) +box width (CO.e.x - BL.e.x + 0.1in) height 0.7in \ + at ((BL.e.x + CO.e.x)/2, CL.y + 0.05in) \ + rad 10px color 0xcc9966 fill none +text "CameraModule" italic at ((CL.x + CO.x)/2, CL.s.y - 0.25in) +``` + + +
+ + +![output](assets/transforms_modules.svg) + + +# Internals + +## Transform Buffer + +`self.tf` on module is a transform buffer. This is a standalone class that maintains a temporal buffer of transforms (default 10 seconds) allowing queries at past timestamps, you can use it directly: + +```python +from dimos.protocol.tf import TF +from dimos.msgs.geometry_msgs import Transform, Vector3, Quaternion +import time + +tf = TF(autostart=False) + +# Simulate transforms at different times +for i in range(5): + t = Transform( + translation=Vector3(float(i), 0.0, 0.0), + rotation=Quaternion(0.0, 0.0, 0.0, 1.0), + frame_id="base_link", + child_frame_id="camera_link", + ts=time.time() + i * 0.1, + ) + tf.receive_transform(t) + +# Query the latest transform +result = tf.get("base_link", "camera_link") +print(f"Latest transform: x={result.translation.x}") +print(f"Buffer has {len(tf.buffers)} transform pair(s)") +print(tf) +``` + + +``` +Latest transform: x=4.0 +Buffer has 1 transform pair(s) +LCMTF(1 buffers): + TBuffer(base_link -> camera_link, 5 msgs, 0.40s [2025-12-29 18:19:18 - 2025-12-29 18:19:18]) +``` + + +This is essential for sensor fusion where you need to know where the camera was when an image was captured, not where it is now. + + +## Further Reading + +For a visual introduction to transforms and coordinate frames: +- [Coordinate Transforms (YouTube)](https://www.youtube.com/watch?v=NGPn9nvLPmg) + +For the mathematical foundations, the ROS documentation provides detailed background: + +- [ROS tf2 Concepts](http://wiki.ros.org/tf2) +- [ROS REP 103 - Standard Units and Coordinate Conventions](https://www.ros.org/reps/rep-0103.html) +- [ROS REP 105 - Coordinate Frames for Mobile Platforms](https://www.ros.org/reps/rep-0105.html) + +See also: +- [Modules](/docs/concepts/modules/index.md) for understanding the module system +- [Configuration](/docs/concepts/configuration.md) for module configuration patterns diff --git a/docs/concepts/assets/camera_module.svg b/docs/concepts/assets/camera_module.svg new file mode 100644 index 0000000000..48cc4286db --- /dev/null +++ b/docs/concepts/assets/camera_module.svg @@ -0,0 +1,87 @@ + + + + + + +module + +cluster_outputs + + +cluster_rpcs + +RPCs + + +cluster_skills + +Skills + + + +CameraModule + +CameraModule + + + +out_color_image + + + +color_image:Image + + + +CameraModule->out_color_image + + + + + +out_camera_info + + + +camera_info:CameraInfo + + + +CameraModule->out_camera_info + + + + + +rpc_set_transport + +set_transport(stream_name: str, transport: Transport) -> bool + + + +CameraModule->rpc_set_transport + + + + +skill_video_stream + +video_stream stream=passive reducer=latest_reducer + + + +CameraModule->skill_video_stream + + + + +rpc_start + +start() + + + diff --git a/docs/concepts/assets/go2_agentic.svg b/docs/concepts/assets/go2_agentic.svg new file mode 100644 index 0000000000..f20c1b5ac5 --- /dev/null +++ b/docs/concepts/assets/go2_agentic.svg @@ -0,0 +1,260 @@ + + + + + + +modules + +cluster_agents + +agents + + +cluster_mapping + +mapping + + +cluster_navigation + +navigation + + +cluster_perception + +perception + + +cluster_robot + +robot + + + +HumanInput + +HumanInput + + + +LlmAgent + +LlmAgent + + + +NavigationSkillContainer + +NavigationSkillContainer + + + +SpeakSkill + +SpeakSkill + + + +WebInput + +WebInput + + + +CostMapper + +CostMapper + + + +chan_global_costmap_OccupancyGrid + + + +global_costmap:OccupancyGrid + + + +CostMapper->chan_global_costmap_OccupancyGrid + + + + +VoxelGridMapper + +VoxelGridMapper + + + +chan_global_map_LidarMessage + + + +global_map:LidarMessage + + + +VoxelGridMapper->chan_global_map_LidarMessage + + + + +ReplanningAStarPlanner + +ReplanningAStarPlanner + + + +chan_cmd_vel_Twist + + + +cmd_vel:Twist + + + +ReplanningAStarPlanner->chan_cmd_vel_Twist + + + + +chan_goal_reached_Bool + + + +goal_reached:Bool + + + +ReplanningAStarPlanner->chan_goal_reached_Bool + + + + +WavefrontFrontierExplorer + +WavefrontFrontierExplorer + + + +chan_goal_request_PoseStamped + + + +goal_request:PoseStamped + + + +WavefrontFrontierExplorer->chan_goal_request_PoseStamped + + + + +SpatialMemory + +SpatialMemory + + + +FoxgloveBridge + +FoxgloveBridge + + + +GO2Connection + +GO2Connection + + + +chan_color_image_Image + + + +color_image:Image + + + +GO2Connection->chan_color_image_Image + + + + +chan_lidar_LidarMessage + + + +lidar:LidarMessage + + + +GO2Connection->chan_lidar_LidarMessage + + + + +UnitreeSkillContainer + +UnitreeSkillContainer + + + +chan_cmd_vel_Twist->GO2Connection + + + + + +chan_color_image_Image->NavigationSkillContainer + + + + + +chan_color_image_Image->SpatialMemory + + + + + +chan_global_costmap_OccupancyGrid->ReplanningAStarPlanner + + + + + +chan_global_costmap_OccupancyGrid->WavefrontFrontierExplorer + + + + + +chan_global_map_LidarMessage->CostMapper + + + + + +chan_goal_reached_Bool->WavefrontFrontierExplorer + + + + + +chan_goal_request_PoseStamped->ReplanningAStarPlanner + + + + + +chan_lidar_LidarMessage->VoxelGridMapper + + + + + diff --git a/docs/concepts/assets/go2_nav.svg b/docs/concepts/assets/go2_nav.svg new file mode 100644 index 0000000000..25adae5264 --- /dev/null +++ b/docs/concepts/assets/go2_nav.svg @@ -0,0 +1,183 @@ + + + + + + +modules + +cluster_mapping + +mapping + + +cluster_navigation + +navigation + + +cluster_robot + +robot + + + +CostMapper + +CostMapper + + + +chan_global_costmap_OccupancyGrid + + + +global_costmap:OccupancyGrid + + + +CostMapper->chan_global_costmap_OccupancyGrid + + + + +VoxelGridMapper + +VoxelGridMapper + + + +chan_global_map_LidarMessage + + + +global_map:LidarMessage + + + +VoxelGridMapper->chan_global_map_LidarMessage + + + + +ReplanningAStarPlanner + +ReplanningAStarPlanner + + + +chan_cmd_vel_Twist + + + +cmd_vel:Twist + + + +ReplanningAStarPlanner->chan_cmd_vel_Twist + + + + +chan_goal_reached_Bool + + + +goal_reached:Bool + + + +ReplanningAStarPlanner->chan_goal_reached_Bool + + + + +WavefrontFrontierExplorer + +WavefrontFrontierExplorer + + + +chan_goal_request_PoseStamped + + + +goal_request:PoseStamped + + + +WavefrontFrontierExplorer->chan_goal_request_PoseStamped + + + + +FoxgloveBridge + +FoxgloveBridge + + + +GO2Connection + +GO2Connection + + + +chan_lidar_LidarMessage + + + +lidar:LidarMessage + + + +GO2Connection->chan_lidar_LidarMessage + + + + +chan_cmd_vel_Twist->GO2Connection + + + + + +chan_global_costmap_OccupancyGrid->ReplanningAStarPlanner + + + + + +chan_global_costmap_OccupancyGrid->WavefrontFrontierExplorer + + + + + +chan_global_map_LidarMessage->CostMapper + + + + + +chan_goal_reached_Bool->WavefrontFrontierExplorer + + + + + +chan_goal_request_PoseStamped->ReplanningAStarPlanner + + + + + +chan_lidar_LidarMessage->VoxelGridMapper + + + + + diff --git a/docs/concepts/lcm.md b/docs/concepts/lcm.md new file mode 100644 index 0000000000..345407e23a --- /dev/null +++ b/docs/concepts/lcm.md @@ -0,0 +1,160 @@ + +# LCM Messages + +[LCM (Lightweight Communications and Marshalling)](https://github.com/lcm-proj/lcm) is a message passing system with bindings for many languages (C, C++, Python, Java, Lua, Go). While LCM includes a UDP multicast transport, its real power is the message definition format - classes that can encode themselves to compact binary representation. + +Dimos uses LCM message definitions for all inter-module communication. Because messages serialize to binary, they can be sent over any transport - not just LCM's UDP multicast, but also shared memory, Redis, WebSockets, or any other channel. + +## dimos-lcm Package + +The `dimos-lcm` package provides base message types that mirror [ROS message definitions](https://docs.ros.org/en/melodic/api/sensor_msgs/html/index.html): + +```python session=lcm_demo ansi=false +from dimos_lcm.geometry_msgs import Vector3 as LCMVector3 +from dimos_lcm.sensor_msgs.PointCloud2 import PointCloud2 as LCMPointCloud2 + +# LCM messages can encode to binary +msg = LCMVector3() +msg.x, msg.y, msg.z = 1.0, 2.0, 3.0 + +binary = msg.lcm_encode() +print(f"Encoded to {len(binary)} bytes: {binary.hex()}") + +# And decode back +decoded = LCMVector3.lcm_decode(binary) +print(f"Decoded: x={decoded.x}, y={decoded.y}, z={decoded.z}") +``` + + +``` +Encoded to 24 bytes: 000000000000f03f00000000000000400000000000000840 +Decoded: x=1.0, y=2.0, z=3.0 +``` + +## Dimos Message Overlays + +Dimos subclasses the base LCM types to add Python-friendly features while preserving binary compatibility. For example, `dimos.msgs.geometry_msgs.Vector3` extends the LCM base with: + +- Multiple constructor overloads (from tuples, numpy arrays, etc.) +- Math operations (`+`, `-`, `*`, `/`, dot product, cross product) +- Conversions to numpy, quaternions, etc. + +```python session=lcm_demo ansi=false +from dimos.msgs.geometry_msgs import Vector3 + +# Rich constructors +v1 = Vector3(1, 2, 3) +v2 = Vector3([4, 5, 6]) +v3 = Vector3(v1) # copy + +# Math operations +print(f"v1 + v2 = {(v1 + v2).to_tuple()}") +print(f"v1 dot v2 = {v1.dot(v2)}") +print(f"v1 x v2 = {v1.cross(v2).to_tuple()}") +print(f"|v1| = {v1.length():.3f}") + +# Still encodes to LCM binary +binary = v1.lcm_encode() +print(f"LCM encoded: {len(binary)} bytes") +``` + + +``` +v1 + v2 = (5.0, 7.0, 9.0) +v1 dot v2 = 32.0 +v1 x v2 = (-3.0, 6.0, -3.0) +|v1| = 3.742 +LCM encoded: 24 bytes +``` + +## PointCloud2 with Open3D + +A more complex example is `PointCloud2`, which wraps Open3D point clouds while maintaining LCM binary compatibility: + +```python session=lcm_demo ansi=false +import numpy as np +from dimos.msgs.sensor_msgs import PointCloud2 + +# Create from numpy +points = np.random.rand(100, 3).astype(np.float32) +pc = PointCloud2.from_numpy(points, frame_id="camera") + +print(f"PointCloud: {len(pc)} points, frame={pc.frame_id}") +print(f"Center: {pc.center}") + +# Access as Open3D (for visualization, processing) +o3d_cloud = pc.pointcloud +print(f"Open3D type: {type(o3d_cloud).__name__}") + +# Encode to LCM binary (for transport) +binary = pc.lcm_encode() +print(f"LCM encoded: {len(binary)} bytes") + +# Decode back +pc2 = PointCloud2.lcm_decode(binary) +print(f"Decoded: {len(pc2)} points") +``` + + +``` +PointCloud: 100 points, frame=camera +Center: ↗ Vector (Vector([0.49166839, 0.50896413, 0.48393918])) +Open3D type: PointCloud +LCM encoded: 1716 bytes +Decoded: 100 points +``` + +## Transport Independence + +Since LCM messages encode to bytes, you can use them over any transport: + +```python session=lcm_demo ansi=false +from dimos.msgs.geometry_msgs import Vector3 +from dimos.protocol.pubsub.memory import Memory +from dimos.protocol.pubsub.shmpubsub import PickleSharedMemory + +# Same message works with any transport +msg = Vector3(1, 2, 3) + +# In-memory (same process) +memory = Memory() +received = [] +memory.subscribe("velocity", lambda m, t: received.append(m)) +memory.publish("velocity", msg) +print(f"Memory transport: received {received[0]}") + +# The LCM binary can also be sent raw over any byte-oriented channel +binary = msg.lcm_encode() +# send over websocket, redis, tcp, file, etc. +decoded = Vector3.lcm_decode(binary) +print(f"Raw binary transport: decoded {decoded}") +``` + + +``` +Memory transport: received ↗ Vector (Vector([1. 2. 3.])) +Raw binary transport: decoded ↗ Vector (Vector([1. 2. 3.])) +``` + +## Available Message Types + +Dimos provides overlays for common message types: + +| Package | Messages | +|---------|----------| +| `geometry_msgs` | `Vector3`, `Quaternion`, `Pose`, `Twist`, `Transform` | +| `sensor_msgs` | `Image`, `PointCloud2`, `CameraInfo`, `LaserScan` | +| `nav_msgs` | `Odometry`, `Path`, `OccupancyGrid` | +| `vision_msgs` | `Detection2D`, `Detection3D`, `BoundingBox2D` | + +Base LCM types (without Dimos extensions) are available in `dimos_lcm.*`. + +## Creating Custom Message Types + +To create a new message type: + +1. Define the LCM message in `.lcm` format (or use existing `dimos_lcm` base) +2. Create a Python overlay that subclasses the LCM type +3. Add `lcm_encode()` and `lcm_decode()` methods if custom serialization is needed + +See [`PointCloud2.py`](/dimos/msgs/sensor_msgs/PointCloud2.py) and [`Vector3.py`](/dimos/msgs/geometry_msgs/Vector3.py) for examples. diff --git a/docs/concepts/modules.md b/docs/concepts/modules.md new file mode 100644 index 0000000000..aeaee8c9b9 --- /dev/null +++ b/docs/concepts/modules.md @@ -0,0 +1,176 @@ + +# Dimos Modules + +Modules are subsystems on a robot that operate autonomously and communicate to other subsystems using standardized messages. + +Some examples of modules are: + +- Webcam (outputs image) +- Navigation (inputs a map and a target, outputs a path) +- Detection (takes an image and a vision model like YOLO, outputs a stream of detections) + +Below is an example of a structure for controlling a robot. Black blocks represent modules and colored lines are connections and message types. It's okay if this doesn't make sense now, it will by the end of this document. + +```python output=assets/go2_nav.svg +from dimos.core.introspection import to_svg +from dimos.robot.unitree_webrtc.unitree_go2_blueprints import nav +to_svg(nav, "assets/go2_nav.svg") +``` + + +![output](assets/go2_nav.svg) + +## Camera Module + +Let's learn how to build stuff like the above, starting with a simple camera module. + +```python session=camera_module_demo output=assets/camera_module.svg +from dimos.hardware.sensors.camera.module import CameraModule +from dimos.core.introspection import to_svg +to_svg(CameraModule.module_info(), "assets/camera_module.svg") +``` + + +![output](assets/camera_module.svg) + +We can always also print out Module I/O quickly into console via `.io()` call, we will do this from now on. + +```python session=camera_module_demo ansi=false +print(CameraModule.io()) +``` + + +``` +┌┴─────────────┐ +│ CameraModule │ +└┬─────────────┘ + ├─ color_image: Image + ├─ camera_info: CameraInfo + │ + ├─ RPC set_transport(stream_name: str, transport: Transport) -> bool + ├─ RPC start() + │ + ├─ Skill video_stream (stream=passive, reducer=latest_reducer, output=image) +``` + +We can see that camera module outputs two streams: + +- `color_image` with [sensor_msgs.Image](https://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/Image.html) type +- `camera_info` with [sensor_msgs.CameraInfo](https://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/CameraInfo.html) type + +Offers two RPC calls, `start()` and `stop()` + +As well as an agentic [Skill][skills.md] called `video_stream` (more about this later, in [Skills Tutorial][skills.md]) + +We can start this module and explore the output of its streams in real time (this will use your webcam). + +```python session=camera_module_demo ansi=false +import time + +camera = CameraModule() +camera.start() +# now this module runs in our main loop in a thread. we can observe it's outputs + +print(camera.color_image) + +camera.color_image.subscribe(print) +time.sleep(0.5) +camera.stop() +``` + + +``` +Out color_image[Image] @ CameraModule +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:16) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:16) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +Image(shape=(480, 640, 3), format=RGB, dtype=uint8, dev=cpu, ts=2025-12-31 15:54:17) +``` + + +## Connecting modules + +Let's load a standard 2D detector module and hook it up to a camera. + +```python ansi=false session=detection_module +from dimos.perception.detection.module2D import Detection2DModule, Config +print(Detection2DModule.io()) +``` + + +``` + ├─ image: Image +┌┴──────────────────┐ +│ Detection2DModule │ +└┬──────────────────┘ + ├─ detections: Detection2DArray + ├─ annotations: ImageAnnotations + ├─ detected_image_0: Image + ├─ detected_image_1: Image + ├─ detected_image_2: Image + │ + ├─ RPC set_transport(stream_name: str, transport: Transport) -> bool + ├─ RPC start() -> None + ├─ RPC stop() -> None +``` + +TODO: add easy way to print config + +looks like detector just needs an image input, outputs some sort of detection and annotation messages, let's connect it to a camera. + +```pythonx ansi=false +import time +from dimos.perception.detection.module2D import Detection2DModule, Config +from dimos.hardware.sensors.camera.module import CameraModule + +camera = CameraModule() +detector = Detection2DModule() + +detector.image.connect(camera.color_image) + +camera.start() +detector.start() + +detector.detections.subscribe(print) +time.sleep(3) +detector.stop() +camera.stop() +``` + + +``` +Detection(Person(1)) +Detection(Person(1)) +Detection(Person(1)) +Detection(Person(1)) +``` + +## Distributed Execution + +As we build module structures, very quickly we'll want to utilize all cores on the machine (which python doesn't allow as a single process), and potentially distribute modules across machines or even internet. + +For this we use `dimos.core` and dimos transport protocols. + +Defining message exchange protocol and message types also gives us an ability to write models in faster languages. + +## Blueprints + +Blueprint is a pre-defined structure of interconnected modules. You can include blueprints or modules in new blueprints + +Basic unitree go2 blueprint looks like what we saw before, + +```python session=blueprints output=assets/go2_agentic.svg +from dimos.core.introspection import to_svg +from dimos.robot.unitree_webrtc.unitree_go2_blueprints import agentic + +to_svg(agentic, "assets/go2_agentic.svg") +``` + + +![output](assets/go2_agentic.svg) diff --git a/docs/concepts/transports.md b/docs/concepts/transports.md new file mode 100644 index 0000000000..fe06334fe9 --- /dev/null +++ b/docs/concepts/transports.md @@ -0,0 +1,368 @@ + +# Dimos Transports + +Transports enable communication between [modules](modules.md) across process boundaries and networks. When modules run in different processes or on different machines, they need a transport layer to exchange messages. + +While the interface is called "PubSub", transports aren't limited to traditional pub/sub services. A topic can be anything that identifies a communication channel - an IP address and port, a shared memory segment name, a file path, or a Redis channel. The abstraction is flexible enough to support any communication pattern that can publish and subscribe to named channels. + +## The PubSub Interface + +At the core of all transports is the `PubSub` abstract class. Any transport implementation must provide two methods: + +```python session=pubsub_demo ansi=false +from dimos.protocol.pubsub.spec import PubSub + +# The interface every transport must implement: +import inspect +print(inspect.getsource(PubSub.publish)) +print(inspect.getsource(PubSub.subscribe)) +``` + + +``` +Session process exited unexpectedly: +/home/lesh/coding/dimos/.venv/bin/python3: No module named md_babel_py.session_server + +``` + +Key points: +- `publish(topic, message)` - Send a message to all subscribers on a topic +- `subscribe(topic, callback)` - Register a callback, returns an unsubscribe function + +## Implementing a Simple Transport + +The simplest transport is `Memory`, which works within a single process: + +```python session=memory_demo ansi=false +from dimos.protocol.pubsub.memory import Memory + +# Create a memory transport +bus = Memory() + +# Track received messages +received = [] + +# Subscribe to a topic +unsubscribe = bus.subscribe("sensor/data", lambda msg, topic: received.append(msg)) + +# Publish messages +bus.publish("sensor/data", {"temperature": 22.5}) +bus.publish("sensor/data", {"temperature": 23.0}) + +print(f"Received {len(received)} messages:") +for msg in received: + print(f" {msg}") + +# Unsubscribe when done +unsubscribe() +``` + + +``` +Received 2 messages: + {'temperature': 22.5} + {'temperature': 23.0} +``` + +The full implementation is minimal - see [`memory.py`](/dimos/protocol/pubsub/memory.py) for the complete source. + +## Available Transports + +Dimos includes several transport implementations: + +| Transport | Use Case | Process Boundary | Network | +|-----------|----------|------------------|---------| +| `Memory` | Testing, single process | No | No | +| `SharedMemory` | Multi-process on same machine | Yes | No | +| `LCM` | Network communication (UDP multicast) | Yes | Yes | +| `Redis` | Network communication via Redis server | Yes | Yes | + +### SharedMemory Transport + +For inter-process communication on the same machine, `SharedMemory` provides high-performance message passing: + +```python session=shm_demo ansi=false +from dimos.protocol.pubsub.shmpubsub import PickleSharedMemory + +shm = PickleSharedMemory(prefer="cpu") +shm.start() + +received = [] +shm.subscribe("test/topic", lambda msg, topic: received.append(msg)) +shm.publish("test/topic", {"data": [1, 2, 3]}) + +import time +time.sleep(0.1) # Allow message to propagate + +print(f"Received: {received}") +shm.stop() +``` + + +``` +Received: [{'data': [1, 2, 3]}] +``` + +### LCM Transport + +For network communication, LCM uses UDP multicast and supports typed messages: + +```python session=lcm_demo ansi=false +from dimos.protocol.pubsub.lcmpubsub import LCM, Topic +from dimos.msgs.geometry_msgs import Vector3 + +lcm = LCM(autoconf=True) +lcm.start() + +received = [] +topic = Topic(topic="/robot/velocity", lcm_type=Vector3) + +lcm.subscribe(topic, lambda msg, t: received.append(msg)) +lcm.publish(topic, Vector3(1.0, 0.0, 0.5)) + +import time +time.sleep(0.1) + +print(f"Received velocity: x={received[0].x}, y={received[0].y}, z={received[0].z}") +lcm.stop() +``` + + +``` +Received velocity: x=1.0, y=0.0, z=0.5 +``` + +## Encoder Mixins + +Transports can use encoder mixins to serialize messages. The `PubSubEncoderMixin` pattern wraps publish/subscribe to encode/decode automatically: + +```python session=encoder_demo ansi=false +from dimos.protocol.pubsub.spec import PubSubEncoderMixin, PickleEncoderMixin + +# PickleEncoderMixin provides: +# - encode(msg, topic) -> bytes (uses pickle.dumps) +# - decode(bytes, topic) -> msg (uses pickle.loads) + +# Create a transport with pickle encoding by mixing in: +from dimos.protocol.pubsub.memory import Memory + +class PickleMemory(PickleEncoderMixin, Memory): + pass + +bus = PickleMemory() +received = [] +bus.subscribe("data", lambda msg, t: received.append(msg)) +bus.publish("data", {"complex": [1, 2, 3], "nested": {"key": "value"}}) + +print(f"Received: {received[0]}") +``` + + +``` +Received: {'complex': [1, 2, 3], 'nested': {'key': 'value'}} +``` + +## Using Transports with Modules + +Modules use the `Transport` wrapper class which adapts `PubSub` to the stream interface. You can set a transport on any module stream: + +```python session=module_transport ansi=false +from dimos.core.transport import pLCMTransport, pSHMTransport + +# Transport wrappers for module streams: +# - pLCMTransport: Pickle-encoded LCM +# - LCMTransport: Native LCM encoding +# - pSHMTransport: Pickle-encoded SharedMemory +# - SHMTransport: Native SharedMemory +# - JpegShmTransport: JPEG-compressed images via SharedMemory +# - JpegLcmTransport: JPEG-compressed images via LCM + +# Example: Set a transport on a module output +# camera.set_transport("color_image", pSHMTransport("camera/color")) +print("Available transport wrappers in dimos.core.transport:") +from dimos.core import transport +print([name for name in dir(transport) if "Transport" in name]) +``` + + +``` +Available transport wrappers in dimos.core.transport: +['JpegLcmTransport', 'JpegShmTransport', 'LCMTransport', 'PubSubTransport', 'SHMTransport', 'ZenohTransport', 'pLCMTransport', 'pSHMTransport'] +``` + +## Testing Custom Transports + +The test suite in [`pubsub/test_spec.py`](/dimos/protocol/pubsub/test_spec.py) uses pytest parametrization to run the same tests against all transport implementations. To add your custom transport to the test grid: + +```python session=test_grid ansi=false +# The test grid pattern from test_spec.py: +test_pattern = """ +from contextlib import contextmanager + +@contextmanager +def my_transport_context(): + transport = MyCustomTransport() + transport.start() + yield transport + transport.stop() + +# Add to testdata list: +testdata.append( + (my_transport_context, "my_topic", ["value1", "value2", "value3"]) +) +""" +print(test_pattern) +``` + + +``` + +from contextlib import contextmanager + +@contextmanager +def my_transport_context(): + transport = MyCustomTransport() + transport.start() + yield transport + transport.stop() + +# Add to testdata list: +testdata.append( + (my_transport_context, "my_topic", ["value1", "value2", "value3"]) +) + +``` + +The test suite validates: +- Basic publish/subscribe +- Multiple subscribers receiving the same message +- Unsubscribe functionality +- Multiple messages in order +- Async iteration +- High-volume message handling (10,000 messages) + +Run the tests with: +```bash +pytest dimos/protocol/pubsub/test_spec.py -v +``` + +## Creating a Custom Transport + +To implement a new transport: + +1. **Subclass `PubSub`** and implement `publish()` and `subscribe()` +2. **Add encoding** if needed via `PubSubEncoderMixin` +3. **Create a `Transport` wrapper** by subclassing `PubSubTransport` +4. **Add to the test grid** in `test_spec.py` + +Here's a minimal template: + +```python session=custom_transport ansi=false +template = ''' +from dimos.protocol.pubsub.spec import PubSub, PickleEncoderMixin +from dimos.core.transport import PubSubTransport + +class MyPubSub(PubSub[str, bytes]): + """Custom pub/sub implementation.""" + + def __init__(self): + self._subscribers = {} + + def start(self): + # Initialize connection/resources + pass + + def stop(self): + # Cleanup + pass + + def publish(self, topic: str, message: bytes) -> None: + # Send message to all subscribers on topic + for cb in self._subscribers.get(topic, []): + cb(message, topic) + + def subscribe(self, topic, callback): + # Register callback, return unsubscribe function + if topic not in self._subscribers: + self._subscribers[topic] = [] + self._subscribers[topic].append(callback) + + def unsubscribe(): + self._subscribers[topic].remove(callback) + return unsubscribe + + +# With pickle encoding +class MyPicklePubSub(PickleEncoderMixin, MyPubSub): + pass + + +# Transport wrapper for use with modules +class MyTransport(PubSubTransport): + def __init__(self, topic: str): + super().__init__(topic) + self.pubsub = MyPicklePubSub() + + def broadcast(self, _, msg): + self.pubsub.publish(self.topic, msg) + + def subscribe(self, callback, selfstream=None): + return self.pubsub.subscribe(self.topic, lambda msg, t: callback(msg)) +''' +print(template) +``` + + +``` + +from dimos.protocol.pubsub.spec import PubSub, PickleEncoderMixin +from dimos.core.transport import PubSubTransport + +class MyPubSub(PubSub[str, bytes]): + """Custom pub/sub implementation.""" + + def __init__(self): + self._subscribers = {} + + def start(self): + # Initialize connection/resources + pass + + def stop(self): + # Cleanup + pass + + def publish(self, topic: str, message: bytes) -> None: + # Send message to all subscribers on topic + for cb in self._subscribers.get(topic, []): + cb(message, topic) + + def subscribe(self, topic, callback): + # Register callback, return unsubscribe function + if topic not in self._subscribers: + self._subscribers[topic] = [] + self._subscribers[topic].append(callback) + + def unsubscribe(): + self._subscribers[topic].remove(callback) + return unsubscribe + + +# With pickle encoding +class MyPicklePubSub(PickleEncoderMixin, MyPubSub): + pass + + +# Transport wrapper for use with modules +class MyTransport(PubSubTransport): + def __init__(self, topic: str): + super().__init__(topic) + self.pubsub = MyPicklePubSub() + + def broadcast(self, _, msg): + self.pubsub.publish(self.topic, msg) + + def subscribe(self, callback, selfstream=None): + return self.pubsub.subscribe(self.topic, lambda msg, t: callback(msg)) + +``` diff --git a/docs/ci.md b/docs/old/ci.md similarity index 100% rename from docs/ci.md rename to docs/old/ci.md diff --git a/docs/jetson.MD b/docs/old/jetson.MD similarity index 100% rename from docs/jetson.MD rename to docs/old/jetson.MD diff --git a/docs/modules.md b/docs/old/modules.md similarity index 100% rename from docs/modules.md rename to docs/old/modules.md diff --git a/docs/modules_CN.md b/docs/old/modules_CN.md similarity index 100% rename from docs/modules_CN.md rename to docs/old/modules_CN.md diff --git a/docs/ros_navigation.md b/docs/old/ros_navigation.md similarity index 100% rename from docs/ros_navigation.md rename to docs/old/ros_navigation.md diff --git a/docs/running_without_devcontainer.md b/docs/old/running_without_devcontainer.md similarity index 100% rename from docs/running_without_devcontainer.md rename to docs/old/running_without_devcontainer.md diff --git a/docs/testing_stream_reply.md b/docs/old/testing_stream_reply.md similarity index 100% rename from docs/testing_stream_reply.md rename to docs/old/testing_stream_reply.md diff --git a/flake.lock b/flake.lock index 5231ed9039..402f251030 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,21 @@ { "nodes": { + "diagon": { + "locked": { + "lastModified": 1763299369, + "narHash": "sha256-z/q22EqZfF79vZQh6K/yCmt8iqDvUSkIVTH+Omhv1VE=", + "owner": "petertrotman", + "repo": "nixpkgs", + "rev": "dff059e25eee7aa958c606aeb6b5879ae1c674f0", + "type": "github" + }, + "original": { + "owner": "petertrotman", + "ref": "Diagon", + "repo": "nixpkgs", + "type": "github" + } + }, "flake-utils": { "inputs": { "systems": "systems" @@ -108,6 +124,7 @@ }, "root": { "inputs": { + "diagon": "diagon", "flake-utils": "flake-utils", "lib": "lib", "nixpkgs": "nixpkgs", diff --git a/flake.nix b/flake.nix index 5b3c7ad1dc..3a70a0bf2f 100644 --- a/flake.nix +++ b/flake.nix @@ -9,9 +9,10 @@ xome.url = "github:jeff-hykin/xome"; xome.inputs.nixpkgs.follows = "nixpkgs"; xome.inputs.flake-utils.follows = "flake-utils"; + diagon.url = "github:petertrotman/nixpkgs/Diagon"; }; - outputs = { self, nixpkgs, flake-utils, lib, xome, ... }: + outputs = { self, nixpkgs, flake-utils, lib, xome, diagon, ... }: flake-utils.lib.eachDefaultSystem (system: let pkgs = import nixpkgs { inherit system; }; @@ -128,6 +129,12 @@ { vals.pkg=pkgs.libjpeg_turbo; flags.ldLibraryGroup=true; } { vals.pkg=pkgs.libpng; flags={}; } + ### Docs generators + { vals.pkg=pkgs.pikchr; flags={}; } + { vals.pkg=pkgs.graphviz; flags={}; } + { vals.pkg=pkgs.imagemagick; flags={}; } + { vals.pkg=diagon.legacyPackages.${system}.diagon; flags={}; } + ### LCM (Lightweight Communications and Marshalling) { vals.pkg=pkgs.lcm; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } # lcm works on darwin, but only after two fixes (1. pkg-config, 2. fsync) diff --git a/pyproject.toml b/pyproject.toml index 9cbe25dded..206263d209 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -127,7 +127,7 @@ dependencies = [ "dask[complete]==2025.5.1", # LCM / DimOS utilities - "dimos-lcm", + "dimos-lcm==0.1.0", # CLI "pydantic-settings>=2.11.0,<3", @@ -215,6 +215,7 @@ dev = [ "textual==3.7.1", "requests-mock==1.12.1", "terminaltexteffects==0.12.2", + "watchdog>=3.0.0", # Types "lxml-stubs>=0.5.1,<1", diff --git a/uv.lock b/uv.lock index 6e5e4d940b..6a89d211df 100644 --- a/uv.lock +++ b/uv.lock @@ -1585,6 +1585,7 @@ dev = [ { name = "types-tabulate" }, { name = "types-tensorflow" }, { name = "types-tqdm" }, + { name = "watchdog" }, ] drone = [ { name = "pymavlink" }, @@ -1628,7 +1629,7 @@ requires-dist = [ { name = "dask", extras = ["complete"], specifier = "==2025.5.1" }, { name = "dataclasses", marker = "extra == 'cuda'" }, { name = "detectron2", marker = "extra == 'cuda'", git = "https://github.com/facebookresearch/detectron2.git?tag=v0.6" }, - { name = "dimos-lcm" }, + { name = "dimos-lcm", specifier = "==0.1.0" }, { name = "einops", specifier = "==0.8.1" }, { name = "empy", specifier = "==3.3.4" }, { name = "fastapi", specifier = ">=0.115.6" }, @@ -1748,6 +1749,7 @@ requires-dist = [ { name = "unitree-webrtc-connect-leshy", specifier = ">=2.0.7" }, { name = "uvicorn", specifier = ">=0.34.0" }, { name = "wasmtime" }, + { name = "watchdog", marker = "extra == 'dev'", specifier = ">=3.0.0" }, { name = "xarm-python-sdk", specifier = ">=1.17.0" }, { name = "xformers", marker = "extra == 'cuda'", specifier = ">=0.0.20" }, { name = "yapf", specifier = "==0.40.2" }, @@ -9326,6 +9328,38 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/87/35cbfdf9619c958a8b48f2ad083b88abc1521d771bfab668002e4405a1da/wasmtime-40.0.0-py3-none-win_arm64.whl", hash = "sha256:7667966236bba5e80a1c454553e566a1fa700328bc3e65b5ca970bee7e177e57", size = 5398931, upload-time = "2025-12-22T16:30:38.047Z" }, ] +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/56/90994d789c61df619bfc5ce2ecdabd5eeff564e1eb47512bd01b5e019569/watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", size = 96390, upload-time = "2024-11-01T14:06:24.793Z" }, + { url = "https://files.pythonhosted.org/packages/55/46/9a67ee697342ddf3c6daa97e3a587a56d6c4052f881ed926a849fcf7371c/watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", size = 88389, upload-time = "2024-11-01T14:06:27.112Z" }, + { url = "https://files.pythonhosted.org/packages/44/65/91b0985747c52064d8701e1075eb96f8c40a79df889e59a399453adfb882/watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", size = 89020, upload-time = "2024-11-01T14:06:29.876Z" }, + { url = "https://files.pythonhosted.org/packages/e0/24/d9be5cd6642a6aa68352ded4b4b10fb0d7889cb7f45814fb92cecd35f101/watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", size = 96393, upload-time = "2024-11-01T14:06:31.756Z" }, + { url = "https://files.pythonhosted.org/packages/63/7a/6013b0d8dbc56adca7fdd4f0beed381c59f6752341b12fa0886fa7afc78b/watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2", size = 88392, upload-time = "2024-11-01T14:06:32.99Z" }, + { url = "https://files.pythonhosted.org/packages/d1/40/b75381494851556de56281e053700e46bff5b37bf4c7267e858640af5a7f/watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", size = 89019, upload-time = "2024-11-01T14:06:34.963Z" }, + { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" }, + { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" }, + { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" }, + { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" }, + { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" }, + { url = "https://files.pythonhosted.org/packages/30/ad/d17b5d42e28a8b91f8ed01cb949da092827afb9995d4559fd448d0472763/watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", size = 87902, upload-time = "2024-11-01T14:06:53.119Z" }, + { url = "https://files.pythonhosted.org/packages/5c/ca/c3649991d140ff6ab67bfc85ab42b165ead119c9e12211e08089d763ece5/watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", size = 88380, upload-time = "2024-11-01T14:06:55.19Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + [[package]] name = "watchfiles" version = "1.1.1"