Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
98c3999
feat(sim): add MJLab G1 velocity policy profile
Nabla7 Jan 15, 2026
568ada4
CI code cleanup
Nabla7 Jan 15, 2026
7689f90
fix(sim): resolve meshdir/profile asset conflicts for GO2 and G1
Nabla7 Jan 15, 2026
ce66d1b
configure unitree go2 mapper to use 10 cm voxels (#1032)
leshy Jan 16, 2026
725ce98
feat(sim): add MuJoCo subprocess profiler for performance debugging
Nabla7 Jan 16, 2026
f845271
pre commit
Nabla7 Jan 16, 2026
d7366c3
small docs clarification (#1043)
leshy Jan 16, 2026
673bd52
Fix split view on wide monitors (#1048)
jeff-hykin Jan 17, 2026
7374786
Docs: Install & Develop (#1022)
jeff-hykin Jan 17, 2026
8af8f8f
Add uv to nix and fix resulting problems (#1021)
jeff-hykin Jan 18, 2026
f6a0b5f
v0.0.8 version update (#1050)
paul-nechifor Jan 18, 2026
af34d5f
Style changes in docs (#1051)
paul-nechifor Jan 18, 2026
41bc65d
Revert "Add uv to nix and fix resulting problems (#1021)" (#1053)
leshy Jan 18, 2026
55e4ed2
Transport benchmarks & Raw ROS transport (#1038)
leshy Jan 19, 2026
f8e1729
feat: default to rerun-web and auto-open browser on startup (#1019)
Nabla7 Jan 19, 2026
f18f818
chore: fix indentation in blueprints ambiguity check
Nabla7 Jan 19, 2026
07f8601
CI code cleanup
Nabla7 Jan 19, 2026
c277b62
use p controller to stop oscillations on unitree go2 (#1014)
paul-nechifor Jan 14, 2026
ea614dc
Dynamic session providers for onnxruntime (#983)
Kaweees Jan 15, 2026
bde3bb4
Perception Full Refactor and Cleanup, deprecated Manipulation AIO Pip…
alexlin2 Jan 15, 2026
0b6667b
feat(cli): type-free topic echo via /topic#pkg.Msg inference, this mi…
Nabla7 Jan 15, 2026
c0905a1
verify blueprints (#1018)
paul-nechifor Jan 15, 2026
b4acaad
Experimental Streamed Temporal Memory with SpatioTemporal & Entity ba…
ClaireBookworm Jan 15, 2026
903756c
Control Orchestrator - Unified Controller for multi-arm and full body…
mustafab0 Jan 15, 2026
aa9eb87
configure unitree go2 mapper to use 10 cm voxels (#1032)
leshy Jan 16, 2026
0791589
Create DDSPubSubBase, DDSTopic
Kaweees Jan 16, 2026
1b53b78
Create PickleDDS
Kaweees Jan 16, 2026
5c38c30
Fix hash/equality inconsistency in DDSTopic
Kaweees Jan 16, 2026
25ff09f
Add DDSMsg
Kaweees Jan 16, 2026
73ea711
Create DDSTransport
Kaweees Jan 16, 2026
400b514
Add broadcast and subscribe methods to DDSTransport
Kaweees Jan 16, 2026
6933da8
Create DDSService
Kaweees Jan 17, 2026
460a45e
Add CycloneDDS package
Kaweees Jan 17, 2026
d918407
Remove unnecessary attributes
Kaweees Jan 18, 2026
c59c1f6
Add threading and serialization methods to DDSService
Kaweees Jan 18, 2026
b601128
Ensure broadcast and subscribe methods initialize DDS if not started
Kaweees Jan 18, 2026
7327c04
Add Transport benchmarking capabilities to CycloneDDS (#1055)
Kaweees Jan 18, 2026
43c7903
Fix DDS segmentation fault using bytearray for binary data storage
Kaweees Jan 18, 2026
7464a84
Refactor DDS PubSub implementation to use CycloneDDS Topic
Kaweees Jan 19, 2026
0eec778
Remove DDS pickling
Kaweees Jan 19, 2026
b332082
CI code cleanup
Nabla7 Jan 19, 2026
77da449
merge: origin/dev
Nabla7 Jan 19, 2026
99ed7fd
Merge branch 'feat/mjlab-g1' of github.com:dimensionalOS/dimos into f…
Nabla7 Jan 19, 2026
c9beb7d
bugfix
Nabla7 Jan 19, 2026
bfa5008
CI code cleanup
Nabla7 Jan 19, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
606 changes: 163 additions & 443 deletions README.md

Large diffs are not rendered by default.

10 changes: 8 additions & 2 deletions bin/hooks/filter_commit_message.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,16 @@ def main() -> int:

lines = commit_msg_file.read_text().splitlines(keepends=True)

# Find the first line containing "Generated with" and truncate there
# Patterns that trigger truncation (everything from this line onwards is removed)
truncate_patterns = [
"Generated with",
"Co-Authored-By",
]

# Find the first line containing any truncate pattern and truncate there
filtered_lines = []
for line in lines:
if "Generated with" in line:
if any(pattern in line for pattern in truncate_patterns):
break
filtered_lines.append(line)

Expand Down
4 changes: 2 additions & 2 deletions data/.lfs/mujoco_sim.tar.gz
Git LFS file not shown
2 changes: 2 additions & 0 deletions dimos/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from dimos.core.rpc_client import RPCClient
from dimos.core.stream import In, Out, RemoteIn, RemoteOut, Transport
from dimos.core.transport import (
DDSTransport,
LCMTransport,
SHMTransport,
ZenohTransport,
Expand All @@ -31,6 +32,7 @@
"LCMRPC",
"LCMTF",
"TF",
"DDSTransport",
"DimosCluster",
"In",
"LCMTransport",
Expand Down
2 changes: 1 addition & 1 deletion dimos/core/blueprints.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def build(
module_coordinator.start_all_modules()

# Compose and send Rerun blueprint from module contributions
if global_config.viewer_backend.startswith("rerun"):
if global_config.rerun_enabled and global_config.viewer_backend.startswith("rerun"):
self._init_rerun_blueprint(module_coordinator)

return module_coordinator
Expand Down
10 changes: 9 additions & 1 deletion dimos/core/global_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class GlobalConfig(BaseSettings):
replay: bool = False
rerun_enabled: bool = True
rerun_server_addr: str | None = None
viewer_backend: ViewerBackend = "rerun-native"
viewer_backend: ViewerBackend = "rerun-web"
n_dask_workers: int = 2
memory_limit: str = "auto"
mujoco_camera_position: str | None = None
Expand All @@ -44,6 +44,14 @@ class GlobalConfig(BaseSettings):
mujoco_start_pos: str = "-1.0, 1.0"
mujoco_steps_per_frame: int = 7
robot_model: str | None = None
# Optional: name of a MuJoCo "bundle" that selects the robot MJCF + policy together.
# If set, Dimos MuJoCo sim will prefer:
# - data/mujoco_sim/{mujoco_profile}.xml
# - data/mujoco_sim/{mujoco_profile}_policy.onnx
mujoco_profile: str | None = None
# Enable lightweight timing breakdown logs from the MuJoCo subprocess (physics/render/pcd/policy).
mujoco_profiler: bool = False
mujoco_profiler_interval_s: float = 2.0
robot_width: float = 0.3
robot_rotation_diameter: float = 0.6
planner_strategy: NavigationStrategy = "simple"
Expand Down
31 changes: 31 additions & 0 deletions dimos/core/transport.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
)

from dimos.core.stream import In, Out, Stream, Transport
from dimos.protocol.pubsub.ddspubsub import DDS, Topic as DDSTopic
from dimos.protocol.pubsub.jpeg_shm import JpegSharedMemory
from dimos.protocol.pubsub.lcmpubsub import LCM, JpegLCM, PickleLCM, Topic as LCMTopic
from dimos.protocol.pubsub.shmpubsub import PickleSharedMemory, SharedMemory
Expand Down Expand Up @@ -212,4 +213,34 @@ def start(self) -> None: ...
def stop(self) -> None: ...


class DDSTransport(PubSubTransport[T]):
_started: bool = False

def __init__(self, topic: str, type: type, **kwargs) -> None: # type: ignore[no-untyped-def]
super().__init__(DDSTopic(topic, type))
if not hasattr(self, "dds"):
self.dds = DDS(**kwargs)

def start(self) -> None:
self.dds.start()
self._started = True

def stop(self) -> None:
self.dds.stop()
self._started = False

def __reduce__(self): # type: ignore[no-untyped-def]
return (DDSTransport, (self.topic.topic, self.topic.dds_type))

def broadcast(self, _, msg) -> None: # type: ignore[no-untyped-def]
if not self._started:
self.start()
self.dds.publish(self.topic, msg)

def subscribe(self, callback: Callable[[T], None], selfstream: In[T] = None) -> None: # type: ignore[assignment, override]
if not self._started:
self.start()
return self.dds.subscribe(self.topic, lambda msg, topic: callback(msg)) # type: ignore[return-value]


class ZenohTransport(PubSubTransport[T]): ...
7 changes: 7 additions & 0 deletions dimos/dashboard/rerun_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,13 @@ def connect_rerun(
logger.debug("Already connected to Rerun server")
return

# Skip if Rerun is disabled globally (even if viewer_backend is still set to rerun-*).
if global_config and not global_config.rerun_enabled:
logger.debug(
"Rerun disabled; skipping connect", viewer_backend=global_config.viewer_backend
)
return

# Skip if foxglove backend selected
if global_config and not global_config.viewer_backend.startswith("rerun"):
logger.debug("Rerun connection skipped", viewer_backend=global_config.viewer_backend)
Expand Down
1 change: 1 addition & 0 deletions dimos/protocol/pubsub/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import dimos.protocol.pubsub.ddspubsub as dds
import dimos.protocol.pubsub.lcmpubsub as lcm
from dimos.protocol.pubsub.memory import Memory
from dimos.protocol.pubsub.spec import PubSub
175 changes: 175 additions & 0 deletions dimos/protocol/pubsub/benchmark/test_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
#!/usr/bin/env python3

# Copyright 2025-2026 Dimensional Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from collections.abc import Generator
import threading
import time
from typing import Any

import pytest

from dimos.protocol.pubsub.benchmark.testdata import testdata
from dimos.protocol.pubsub.benchmark.type import (
BenchmarkResult,
BenchmarkResults,
MsgGen,
PubSubContext,
TestCase,
)

# Message sizes for throughput benchmarking (powers of 2 from 64B to 10MB)
MSG_SIZES = [
64,
256,
1024,
4096,
16384,
65536,
262144,
524288,
1048576,
1048576 * 2,
1048576 * 5,
1048576 * 10,
]

# Benchmark duration in seconds
BENCH_DURATION = 1.0

# Max messages to send per test (prevents overwhelming slower transports)
MAX_MESSAGES = 5000

# Max time to wait for in-flight messages after publishing stops
RECEIVE_TIMEOUT = 1.0


def size_id(size: int) -> str:
"""Convert byte size to human-readable string for test IDs."""
if size >= 1048576:
return f"{size // 1048576}MB"
if size >= 1024:
return f"{size // 1024}KB"
return f"{size}B"


def pubsub_id(testcase: TestCase[Any, Any]) -> str:
"""Extract pubsub implementation name from context manager function name."""
name: str = testcase.pubsub_context.__name__
# Convert e.g. "lcm_pubsub_channel" -> "LCM", "memory_pubsub_channel" -> "Memory"
prefix = name.replace("_pubsub_channel", "").replace("_", " ")
return prefix.upper() if len(prefix) <= 3 else prefix.title().replace(" ", "")


@pytest.fixture(scope="module")
def benchmark_results() -> Generator[BenchmarkResults, None, None]:
"""Module-scoped fixture to collect benchmark results."""
results = BenchmarkResults()
yield results
results.print_summary()
results.print_heatmap()
results.print_bandwidth_heatmap()
results.print_latency_heatmap()


@pytest.mark.tool
@pytest.mark.parametrize("msg_size", MSG_SIZES, ids=[size_id(s) for s in MSG_SIZES])
@pytest.mark.parametrize("pubsub_context, msggen", testdata, ids=[pubsub_id(t) for t in testdata])
def test_throughput(
pubsub_context: PubSubContext[Any, Any],
msggen: MsgGen[Any, Any],
msg_size: int,
benchmark_results: BenchmarkResults,
) -> None:
"""Measure throughput for publishing and receiving messages over a fixed duration."""
with pubsub_context() as pubsub:
topic, msg = msggen(msg_size)
received_count = 0
target_count = [0] # Use list to allow modification after publish loop
lock = threading.Lock()
all_received = threading.Event()

def callback(message: Any, _topic: Any) -> None:
nonlocal received_count
with lock:
received_count += 1
if target_count[0] > 0 and received_count >= target_count[0]:
all_received.set()

# Subscribe
pubsub.subscribe(topic, callback)

# Warmup: give DDS/ROS time to establish connection
time.sleep(0.1)

# Set target so callback can signal when all received
target_count[0] = MAX_MESSAGES

# Publish messages until time limit, max messages, or all received
msgs_sent = 0
start = time.perf_counter()
end_time = start + BENCH_DURATION

while time.perf_counter() < end_time and msgs_sent < MAX_MESSAGES:
pubsub.publish(topic, msg)
msgs_sent += 1
# Check if all already received (fast transports)
if all_received.is_set():
break

publish_end = time.perf_counter()
target_count[0] = msgs_sent # Update to actual sent count

# Check if already done, otherwise wait up to RECEIVE_TIMEOUT
with lock:
if received_count >= msgs_sent:
all_received.set()

if not all_received.is_set():
all_received.wait(timeout=RECEIVE_TIMEOUT)
latency_end = time.perf_counter()

with lock:
final_received = received_count

# Latency: how long we waited after publishing for messages to arrive
# 0 = all arrived during publishing, 1000ms = hit timeout (loss occurred)
latency = latency_end - publish_end

# Record result (duration is publish time only for throughput calculation)
# Extract transport name from context manager function name
ctx_name = pubsub_context.__name__
prefix = ctx_name.replace("_pubsub_channel", "").replace("_", " ")
transport_name = prefix.upper() if len(prefix) <= 3 else prefix.title().replace(" ", "")
result = BenchmarkResult(
transport=transport_name,
duration=publish_end - start,
msgs_sent=msgs_sent,
msgs_received=final_received,
msg_size_bytes=msg_size,
receive_time=latency,
)
benchmark_results.add(result)

# Warn if significant message loss (but don't fail - benchmark records the data)
loss_pct = (1 - final_received / msgs_sent) * 100 if msgs_sent > 0 else 0
if loss_pct > 10:
import warnings

warnings.warn(
f"{transport_name} {msg_size}B: {loss_pct:.1f}% message loss "
f"({final_received}/{msgs_sent})",
stacklevel=2,
)
Loading
Loading