diff --git a/README.md b/README.md index f8df86f5aa..e859c9924c 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,22 @@ We are shipping a first look at the DIMOS x Unitree Go2 integration, allowing fo - **DimOS Interface / Development Tools** - Local development interface to control your robot, orchestrate agents, visualize camera/lidar streams, and debug your dimensional agentive application. +## MacOS Installation + +```sh +# Install Nix +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install + +# clone the repository +git clone --branch dev --single-branch https://github.com/dimensionalOS/dimos.git + +# setup the environment (follow the prompts after nix develop) +cd dimos +nix develop + +# You should be able to follow the instructions below as well for a more manual installation +``` + --- ## Python Installation Tested on Ubuntu 22.04/24.04 @@ -83,10 +99,10 @@ pip install torch==2.0.1 torchvision torchaudio --index-url https://download.pyt #### Install dependencies ```bash # CPU only (reccomended to attempt first) -pip install -e .[cpu,dev] +pip install -e '.[cpu,dev]' # CUDA install -pip install -e .[cuda,dev] +pip install -e '.[cuda,dev]' # Copy and configure environment variables cp default.env .env @@ -99,27 +115,27 @@ pytest -s dimos/ #### Test Dimensional with a replay UnitreeGo2 stream (no robot required) ```bash -CONNECTION_TYPE=replay python dimos/robot/unitree_webrtc/unitree_go2.py +dimos --replay run unitree-go2 ``` #### Test Dimensional with a simulated UnitreeGo2 in MuJoCo (no robot required) ```bash -pip install -e .[sim] +pip install -e '.[sim]' export DISPLAY=:1 # Or DISPLAY=:0 if getting GLFW/OpenGL X11 errors -CONNECTION_TYPE=mujoco python dimos/robot/unitree_webrtc/unitree_go2.py +dimos --simulation run unitree-go2 ``` #### Test Dimensional with a real UnitreeGo2 over WebRTC ```bash export ROBOT_IP=192.168.X.XXX # Add the robot IP address -python dimos/robot/unitree_webrtc/unitree_go2.py +dimos run unitree-go2 ``` #### Test Dimensional with a real UnitreeGo2 running Agents *OpenAI / Alibaba keys required* ```bash export ROBOT_IP=192.168.X.XXX # Add the robot IP address -python dimos/robot/unitree_webrtc/run_agents2.py +dimos run unitree-go2-agentic ``` --- diff --git a/dimos/agents/memory/chroma_impl.py b/dimos/agents/memory/chroma_impl.py index d09d515292..96cc32aa54 100644 --- a/dimos/agents/memory/chroma_impl.py +++ b/dimos/agents/memory/chroma_impl.py @@ -145,10 +145,18 @@ def __init__( def create(self) -> None: """Create local embedding model and initialize the ChromaDB client.""" # Load the sentence transformer model - # Use CUDA if available, otherwise fall back to CPU - device = "cuda" if torch.cuda.is_available() else "cpu" - print(f"Using device: {device}") - self.model = SentenceTransformer(self.model_name, device=device) # type: ignore[name-defined] + + # Use GPU if available, otherwise fall back to CPU + if torch.cuda.is_available(): + self.device = "cuda" + # MacOS Metal performance shaders + elif torch.backends.mps.is_available() and torch.backends.mps.is_built(): + self.device = "mps" + else: + self.device = "cpu" + + print(f"Using device: {self.device}") + self.model = SentenceTransformer(self.model_name, device=self.device) # type: ignore[name-defined] # Create a custom embedding class that implements the embed_query method class SentenceTransformerEmbeddings: diff --git a/dimos/agents/memory/image_embedding.py b/dimos/agents/memory/image_embedding.py index 99ca3fcc3f..77e034a3e9 100644 --- a/dimos/agents/memory/image_embedding.py +++ b/dimos/agents/memory/image_embedding.py @@ -22,6 +22,7 @@ import base64 import io import os +import sys import cv2 import numpy as np @@ -76,6 +77,12 @@ def _initialize_model(self): # type: ignore[no-untyped-def] processor_id = "openai/clip-vit-base-patch32" providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] + if sys.platform == "darwin": + # 2025-11-17 12:36:47.877215 [W:onnxruntime:, helper.cc:82 IsInputSupported] CoreML does not support input dim > 16384. Input:text_model.embeddings.token_embedding.weight, shape: {49408,512} + # 2025-11-17 12:36:47.878496 [W:onnxruntime:, coreml_execution_provider.cc:107 GetCapability] CoreMLExecutionProvider::GetCapability, number of partitions supported by CoreML: 88 number of nodes in the graph: 1504 number of nodes supported by CoreML: 933 + providers = ["CoreMLExecutionProvider"] + [ + each for each in providers if each != "CUDAExecutionProvider" + ] self.model = ort.InferenceSession(str(model_id), providers=providers) diff --git a/dimos/models/Detic/tools/dump_clip_features.py b/dimos/models/Detic/tools/dump_clip_features.py index 31be161f6d..f994710057 100644 --- a/dimos/models/Detic/tools/dump_clip_features.py +++ b/dimos/models/Detic/tools/dump_clip_features.py @@ -40,7 +40,14 @@ if args.use_underscore: cat_names = [x.strip().replace("/ ", "/").replace(" ", "_") for x in cat_names] print("cat_names", cat_names) - device = "cuda" if torch.cuda.is_available() else "cpu" + # Use GPU if available, otherwise fall back to CPU + if torch.cuda.is_available(): + device = "cuda" + # MacOS Metal performance shaders + elif torch.backends.mps.is_available() and torch.backends.mps.is_built(): + device = "mps" + else: + device = "cpu" if args.prompt == "a": sentences = ["a " + x for x in cat_names] diff --git a/dimos/models/embedding/clip.py b/dimos/models/embedding/clip.py index 6fd3f70009..246f782eee 100644 --- a/dimos/models/embedding/clip.py +++ b/dimos/models/embedding/clip.py @@ -43,7 +43,15 @@ def __init__( device: Device to run on (cuda/cpu), auto-detects if None normalize: Whether to L2 normalize embeddings """ - self.device = device or ("cuda" if torch.cuda.is_available() else "cpu") + # Use GPU if available, otherwise fall back to CPU + if torch.cuda.is_available(): + self.device = "cuda" + # MacOS Metal performance shaders + elif torch.backends.mps.is_available() and torch.backends.mps.is_built(): + self.device = "mps" + else: + self.device = "cpu" + self.normalize = normalize # Load model and processor diff --git a/dimos/models/embedding/embedding_models_disabled_tests.py b/dimos/models/embedding/embedding_models_disabled_tests.py index 6c80595571..18407f93fe 100644 --- a/dimos/models/embedding/embedding_models_disabled_tests.py +++ b/dimos/models/embedding/embedding_models_disabled_tests.py @@ -298,7 +298,7 @@ def test_gpu_query_performance(embedding_model, test_image) -> None: # type: ig assert len(results) == 5, "Should return top-5 results" # All should be high similarity (same image, allow some variation for image preprocessing) - for idx, sim in results: + for _, sim in results: assert sim > 0.90, f"Same images should have high similarity, got {sim}" diff --git a/dimos/models/embedding/mobileclip.py b/dimos/models/embedding/mobileclip.py index 0aa157c118..700eb3a354 100644 --- a/dimos/models/embedding/mobileclip.py +++ b/dimos/models/embedding/mobileclip.py @@ -51,7 +51,15 @@ def __init__( "Install it with: pip install open-clip-torch" ) - self.device = device or ("cuda" if torch.cuda.is_available() else "cpu") + # Use GPU if available, otherwise fall back to CPU + if torch.cuda.is_available(): + self.device = "cuda" + # MacOS Metal performance shaders + elif torch.backends.mps.is_available() and torch.backends.mps.is_built(): + self.device = "mps" + else: + self.device = "cpu" + self.normalize = normalize # Load model diff --git a/dimos/models/embedding/treid.py b/dimos/models/embedding/treid.py index db5db46a55..d6ade2d196 100644 --- a/dimos/models/embedding/treid.py +++ b/dimos/models/embedding/treid.py @@ -51,7 +51,15 @@ def __init__( "torchreid is required for TorchReIDModel. Install it with: pip install torchreid" ) - self.device = device or ("cuda" if torch.cuda.is_available() else "cpu") + # Use GPU if available, otherwise fall back to CPU + if torch.cuda.is_available(): + self.device = "cuda" + # MacOS Metal performance shaders + elif torch.backends.mps.is_available() and torch.backends.mps.is_built(): + self.device = "mps" + else: + self.device = "cpu" + self.normalize = normalize # Load model using torchreid's FeatureExtractor diff --git a/dimos/models/vl/moondream.py b/dimos/models/vl/moondream.py index 485377e305..4560be4702 100644 --- a/dimos/models/vl/moondream.py +++ b/dimos/models/vl/moondream.py @@ -23,7 +23,15 @@ def __init__( dtype: torch.dtype = torch.bfloat16, ) -> None: self._model_name = model_name - self._device = device or ("cuda" if torch.cuda.is_available() else "cpu") + # Use GPU if available, otherwise fall back to CPU + if torch.cuda.is_available(): + self._device = "cuda" + # MacOS Metal performance shaders + elif torch.backends.mps.is_available() and torch.backends.mps.is_built(): + self._device = "mps" + else: + self._device = "cpu" + self._dtype = dtype @cached_property diff --git a/dimos/perception/segmentation/sam_2d_seg.py b/dimos/perception/segmentation/sam_2d_seg.py index bc489147f4..2f81901bbc 100644 --- a/dimos/perception/segmentation/sam_2d_seg.py +++ b/dimos/perception/segmentation/sam_2d_seg.py @@ -31,7 +31,6 @@ plot_results, ) from dimos.utils.data import get_data -from dimos.utils.gpu_utils import is_cuda_available from dimos.utils.logging_config import setup_logger logger = setup_logger() @@ -48,14 +47,20 @@ def __init__( use_rich_labeling: bool = False, use_filtering: bool = True, ) -> None: - if is_cuda_available(): # type: ignore[no-untyped-call] + # Use GPU if available, otherwise fall back to CPU + if torch.cuda.is_available(): logger.info("Using CUDA for SAM 2d segmenter") if hasattr(onnxruntime, "preload_dlls"): # Handles CUDA 11 / onnxruntime-gpu<=1.18 onnxruntime.preload_dlls(cuda=True, cudnn=True) self.device = "cuda" + # MacOS Metal performance shaders + elif torch.backends.mps.is_available() and torch.backends.mps.is_built(): + logger.info("Using Metal for SAM 2d segmenter") + self.device = "mps" else: logger.info("Using CPU for SAM 2d segmenter") self.device = "cpu" + # Core components self.model = FastSAM(get_data(model_path) / model_name) self.use_tracker = use_tracker diff --git a/dimos/protocol/pubsub/shmpubsub.py b/dimos/protocol/pubsub/shmpubsub.py index 1a03eddb20..db9004f5c3 100644 --- a/dimos/protocol/pubsub/shmpubsub.py +++ b/dimos/protocol/pubsub/shmpubsub.py @@ -233,8 +233,9 @@ def _ensure_topic(self, topic: str) -> _TopicState: cap = int(self.config.default_capacity) def _names_for_topic(topic: str, capacity: int) -> tuple[str, str]: - # Python’s SharedMemory requires names without a leading '/' - h = hashlib.blake2b(f"{topic}:{capacity}".encode(), digest_size=12).hexdigest() + # Python's SharedMemory requires names without a leading '/' + # Use shorter digest to avoid macOS shared memory name length limits + h = hashlib.blake2b(f"{topic}:{capacity}".encode(), digest_size=8).hexdigest() return f"psm_{h}_data", f"psm_{h}_ctrl" data_name, ctrl_name = _names_for_topic(topic, cap) diff --git a/dimos/protocol/service/lcmservice.py b/dimos/protocol/service/lcmservice.py index 23fb7407be..6b25ed1f5c 100644 --- a/dimos/protocol/service/lcmservice.py +++ b/dimos/protocol/service/lcmservice.py @@ -18,6 +18,7 @@ from dataclasses import dataclass from functools import cache import os +import platform import subprocess import sys import threading @@ -48,56 +49,103 @@ def check_multicast() -> list[str]: sudo = "" if check_root() else "sudo " - # Check if loopback interface has multicast enabled - try: - result = subprocess.run(["ip", "link", "show", "lo"], capture_output=True, text=True) - if "MULTICAST" not in result.stdout: - commands_needed.append(f"{sudo}ifconfig lo multicast") - except Exception: - commands_needed.append(f"{sudo}ifconfig lo multicast") + system = platform.system() - # Check if multicast route exists - try: - result = subprocess.run( - ["ip", "route", "show", "224.0.0.0/4"], capture_output=True, text=True - ) - if not result.stdout.strip(): - commands_needed.append(f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo") - except Exception: - commands_needed.append(f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo") + if system == "Linux": + # Linux commands + loopback_interface = "lo" + # Check if loopback interface has multicast enabled + try: + result = subprocess.run( + ["ip", "link", "show", loopback_interface], capture_output=True, text=True + ) + if "MULTICAST" not in result.stdout: + commands_needed.append(f"{sudo}ifconfig {loopback_interface} multicast") + except Exception: + commands_needed.append(f"{sudo}ifconfig {loopback_interface} multicast") + + # Check if multicast route exists + try: + result = subprocess.run( + ["ip", "route", "show", "224.0.0.0/4"], capture_output=True, text=True + ) + if not result.stdout.strip(): + commands_needed.append( + f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev {loopback_interface}" + ) + except Exception: + commands_needed.append( + f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev {loopback_interface}" + ) + + elif system == "Darwin": # macOS + loopback_interface = "lo0" + # Check if multicast route exists + try: + result = subprocess.run(["netstat", "-nr"], capture_output=True, text=True) + route_exists = "224.0.0.0/4" in result.stdout or "224.0.0/4" in result.stdout + if not route_exists: + commands_needed.append( + f"{sudo}route add -net 224.0.0.0/4 -interface {loopback_interface}" + ) + except Exception: + commands_needed.append( + f"{sudo}route add -net 224.0.0.0/4 -interface {loopback_interface}" + ) + + else: + # For other systems, skip multicast configuration + logger.warning(f"Multicast configuration not supported on {system}") return commands_needed +def _set_net_value(commands_needed: list[str], sudo: str, name: str, value: int) -> int | None: + try: + result = subprocess.run(["sysctl", name], capture_output=True, text=True) + if result.returncode == 0: + current = int(result.stdout.replace(":", "=").split("=")[1].strip()) + else: + current = None + if not current or current < value: + commands_needed.append(f"{sudo}sysctl -w {name}={value}") + return current + except: + commands_needed.append(f"{sudo}sysctl -w {name}={value}") + return None + + +TARGET_RMEM_SIZE = 2097152 # prev was 67108864 +TARGET_MAX_SOCKET_BUFFER_SIZE_MACOS = 8388608 +TARGET_MAX_DGRAM_SIZE_MACOS = 65535 + + def check_buffers() -> tuple[list[str], int | None]: """Check if buffer configuration is needed and return required commands and current size. Returns: Tuple of (commands_needed, current_max_buffer_size) """ - commands_needed = [] + commands_needed: list[str] = [] current_max = None sudo = "" if check_root() else "sudo " - - # Check current buffer settings - try: - result = subprocess.run(["sysctl", "net.core.rmem_max"], capture_output=True, text=True) - current_max = int(result.stdout.split("=")[1].strip()) if result.returncode == 0 else None - if not current_max or current_max < 67108864: - commands_needed.append(f"{sudo}sysctl -w net.core.rmem_max=67108864") - except: - commands_needed.append(f"{sudo}sysctl -w net.core.rmem_max=67108864") - - try: - result = subprocess.run(["sysctl", "net.core.rmem_default"], capture_output=True, text=True) - current_default = ( - int(result.stdout.split("=")[1].strip()) if result.returncode == 0 else None + system = platform.system() + + if system == "Linux": + # Linux buffer configuration + current_max = _set_net_value(commands_needed, sudo, "net.core.rmem_max", TARGET_RMEM_SIZE) + _set_net_value(commands_needed, sudo, "net.core.rmem_default", TARGET_RMEM_SIZE) + elif system == "Darwin": # macOS + # macOS buffer configuration - check and set UDP buffer related sysctls + current_max = _set_net_value( + commands_needed, sudo, "kern.ipc.maxsockbuf", TARGET_MAX_SOCKET_BUFFER_SIZE_MACOS ) - if not current_default or current_default < 16777216: - commands_needed.append(f"{sudo}sysctl -w net.core.rmem_default=16777216") - except: - commands_needed.append(f"{sudo}sysctl -w net.core.rmem_default=16777216") + _set_net_value(commands_needed, sudo, "net.inet.udp.recvspace", TARGET_RMEM_SIZE) + _set_net_value(commands_needed, sudo, "net.inet.udp.maxdgram", TARGET_MAX_DGRAM_SIZE_MACOS) + else: + # For other systems, skip buffer configuration + logger.warning(f"Buffer configuration not supported on {system}") return commands_needed, current_max @@ -145,6 +193,8 @@ def autoconf() -> None: logger.info("CI environment detected: Skipping automatic system configuration.") return + platform.system() + commands_needed = [] # Check multicast configuration @@ -187,6 +237,9 @@ def autoconf() -> None: logger.info("System configuration completed.") +_DEFAULT_LCM_URL_MACOS = "udpm://239.255.76.67:7667?ttl=0" + + @dataclass class LCMConfig: ttl: int = 0 @@ -194,6 +247,11 @@ class LCMConfig: autoconf: bool = True lcm: lcm.LCM | None = None + def __post_init__(self) -> None: + if self.url is None and platform.system() == "Darwin": + # On macOS, use multicast with TTL=0 to keep traffic local + self.url = _DEFAULT_LCM_URL_MACOS + @runtime_checkable class LCMMsg(Protocol): @@ -220,6 +278,9 @@ def __str__(self) -> str: return f"{self.topic}#{self.lcm_type.msg_name}" +_LCM_LOOP_TIMEOUT = 50 + + class LCMService(Service[LCMConfig]): default_config = LCMConfig l: lcm.LCM | None @@ -234,13 +295,11 @@ def __init__(self, **kwargs) -> None: # type: ignore[no-untyped-def] # we support passing an existing LCM instance if self.config.lcm: - # TODO: If we pass LCM in, it's unsafe to use in this thread and the _loop thread. self.l = self.config.lcm else: self.l = lcm.LCM(self.config.url) if self.config.url else lcm.LCM() self._l_lock = threading.Lock() - self._stop_event = threading.Event() self._thread = None @@ -295,7 +354,7 @@ def _lcm_loop(self) -> None: with self._l_lock: if self.l is None: break - self.l.handle_timeout(50) + self.l.handle_timeout(_LCM_LOOP_TIMEOUT) except Exception as e: stack_trace = traceback.format_exc() print(f"Error in LCM handling: {e}\n{stack_trace}") diff --git a/dimos/protocol/service/test_lcmservice.py b/dimos/protocol/service/test_lcmservice.py index 8d3c77d3db..300552c9ed 100644 --- a/dimos/protocol/service/test_lcmservice.py +++ b/dimos/protocol/service/test_lcmservice.py @@ -19,6 +19,9 @@ import pytest from dimos.protocol.service.lcmservice import ( + TARGET_MAX_DGRAM_SIZE_MACOS, + TARGET_MAX_SOCKET_BUFFER_SIZE_MACOS, + TARGET_RMEM_SIZE, autoconf, check_buffers, check_multicast, @@ -33,394 +36,532 @@ def get_sudo_prefix() -> str: def test_check_multicast_all_configured() -> None: """Test check_multicast when system is properly configured.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock successful checks with realistic output format - mock_run.side_effect = [ - type( - "MockResult", - (), - { - "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", - "returncode": 0, - }, - )(), - type("MockResult", (), {"stdout": "224.0.0.0/4 dev lo scope link", "returncode": 0})(), - ] - - result = check_multicast() - assert result == [] + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock successful checks with realistic output format + mock_run.side_effect = [ + type( + "MockResult", + (), + { + "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", + "returncode": 0, + }, + )(), + type( + "MockResult", (), {"stdout": "224.0.0.0/4 dev lo scope link", "returncode": 0} + )(), + ] + + result = check_multicast() + assert result == [] def test_check_multicast_missing_multicast_flag() -> None: """Test check_multicast when loopback interface lacks multicast.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock interface without MULTICAST flag (realistic current system state) - mock_run.side_effect = [ - type( - "MockResult", - (), - { - "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", - "returncode": 0, - }, - )(), - type("MockResult", (), {"stdout": "224.0.0.0/4 dev lo scope link", "returncode": 0})(), - ] - - result = check_multicast() - sudo = get_sudo_prefix() - assert result == [f"{sudo}ifconfig lo multicast"] + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock interface without MULTICAST flag (realistic current system state) + mock_run.side_effect = [ + type( + "MockResult", + (), + { + "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", + "returncode": 0, + }, + )(), + type( + "MockResult", (), {"stdout": "224.0.0.0/4 dev lo scope link", "returncode": 0} + )(), + ] + + result = check_multicast() + sudo = get_sudo_prefix() + assert result == [f"{sudo}ifconfig lo multicast"] def test_check_multicast_missing_route() -> None: """Test check_multicast when multicast route is missing.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock missing route - interface has multicast but no route - mock_run.side_effect = [ - type( - "MockResult", - (), - { - "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", - "returncode": 0, - }, - )(), - type("MockResult", (), {"stdout": "", "returncode": 0})(), # Empty output - no route - ] - - result = check_multicast() - sudo = get_sudo_prefix() - assert result == [f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo"] + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock missing route - interface has multicast but no route + mock_run.side_effect = [ + type( + "MockResult", + (), + { + "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", + "returncode": 0, + }, + )(), + type( + "MockResult", (), {"stdout": "", "returncode": 0} + )(), # Empty output - no route + ] + + result = check_multicast() + sudo = get_sudo_prefix() + assert result == [f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo"] def test_check_multicast_all_missing() -> None: """Test check_multicast when both multicast flag and route are missing (current system state).""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock both missing - matches actual current system state - mock_run.side_effect = [ - type( - "MockResult", - (), - { - "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", - "returncode": 0, - }, - )(), - type("MockResult", (), {"stdout": "", "returncode": 0})(), # Empty output - no route - ] - - result = check_multicast() - sudo = get_sudo_prefix() - expected = [ - f"{sudo}ifconfig lo multicast", - f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo", - ] - assert result == expected + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock both missing - matches actual current system state + mock_run.side_effect = [ + type( + "MockResult", + (), + { + "stdout": "1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\n link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00", + "returncode": 0, + }, + )(), + type( + "MockResult", (), {"stdout": "", "returncode": 0} + )(), # Empty output - no route + ] + + result = check_multicast() + sudo = get_sudo_prefix() + expected = [ + f"{sudo}ifconfig lo multicast", + f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo", + ] + assert result == expected def test_check_multicast_subprocess_exception() -> None: """Test check_multicast when subprocess calls fail.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock subprocess exceptions - mock_run.side_effect = Exception("Command failed") + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock subprocess exceptions + mock_run.side_effect = Exception("Command failed") + + result = check_multicast() + sudo = get_sudo_prefix() + expected = [ + f"{sudo}ifconfig lo multicast", + f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo", + ] + assert result == expected + + +def test_check_multicast_macos() -> None: + """Test check_multicast on macOS when configuration is needed.""" + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Darwin"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock netstat -nr to not contain the multicast route + mock_run.side_effect = [ + type( + "MockResult", + (), + { + "stdout": "default 192.168.1.1 UGScg en0", + "returncode": 0, + }, + )(), + ] - result = check_multicast() - sudo = get_sudo_prefix() - expected = [ - f"{sudo}ifconfig lo multicast", - f"{sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo", - ] - assert result == expected + result = check_multicast() + sudo = get_sudo_prefix() + expected = [f"{sudo}route add -net 224.0.0.0/4 -interface lo0"] + assert result == expected def test_check_buffers_all_configured() -> None: """Test check_buffers when system is properly configured.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock sufficient buffer sizes (64MB for max, 16MB for default) - mock_run.side_effect = [ - type("MockResult", (), {"stdout": "net.core.rmem_max = 67108864", "returncode": 0})(), - type( - "MockResult", (), {"stdout": "net.core.rmem_default = 16777216", "returncode": 0} - )(), - ] + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock sufficient buffer sizes + mock_run.side_effect = [ + type( + "MockResult", (), {"stdout": "net.core.rmem_max = 67108864", "returncode": 0} + )(), + type( + "MockResult", + (), + {"stdout": "net.core.rmem_default = 16777216", "returncode": 0}, + )(), + ] - commands, buffer_size = check_buffers() - assert commands == [] - assert buffer_size == 67108864 + commands, buffer_size = check_buffers() + assert commands == [] + assert buffer_size >= TARGET_RMEM_SIZE def test_check_buffers_low_max_buffer() -> None: """Test check_buffers when rmem_max is too low.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock low rmem_max (below 64MB minimum) - mock_run.side_effect = [ - type("MockResult", (), {"stdout": "net.core.rmem_max = 1048576", "returncode": 0})(), - type( - "MockResult", (), {"stdout": "net.core.rmem_default = 16777216", "returncode": 0} - )(), - ] + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock low rmem_max + mock_run.side_effect = [ + type( + "MockResult", (), {"stdout": "net.core.rmem_max = 1048576", "returncode": 0} + )(), + type( + "MockResult", + (), + {"stdout": f"net.core.rmem_default = {TARGET_RMEM_SIZE}", "returncode": 0}, + )(), + ] - commands, buffer_size = check_buffers() - sudo = get_sudo_prefix() - assert commands == [f"{sudo}sysctl -w net.core.rmem_max=67108864"] - assert buffer_size == 1048576 + commands, buffer_size = check_buffers() + sudo = get_sudo_prefix() + assert commands == [f"{sudo}sysctl -w net.core.rmem_max={TARGET_RMEM_SIZE}"] + assert buffer_size == 1048576 def test_check_buffers_low_default_buffer() -> None: """Test check_buffers when rmem_default is too low.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock low rmem_default (below 16MB minimum) - mock_run.side_effect = [ - type("MockResult", (), {"stdout": "net.core.rmem_max = 67108864", "returncode": 0})(), - type( - "MockResult", (), {"stdout": "net.core.rmem_default = 1048576", "returncode": 0} - )(), - ] + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock low rmem_default + mock_run.side_effect = [ + type( + "MockResult", + (), + {"stdout": f"net.core.rmem_max = {TARGET_RMEM_SIZE}", "returncode": 0}, + )(), + type( + "MockResult", (), {"stdout": "net.core.rmem_default = 1048576", "returncode": 0} + )(), + ] - commands, buffer_size = check_buffers() - sudo = get_sudo_prefix() - assert commands == [f"{sudo}sysctl -w net.core.rmem_default=16777216"] - assert buffer_size == 67108864 + commands, buffer_size = check_buffers() + sudo = get_sudo_prefix() + assert commands == [f"{sudo}sysctl -w net.core.rmem_default={TARGET_RMEM_SIZE}"] + assert buffer_size == TARGET_RMEM_SIZE def test_check_buffers_both_low() -> None: """Test check_buffers when both buffer sizes are too low.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock both low (below minimums) - mock_run.side_effect = [ - type("MockResult", (), {"stdout": "net.core.rmem_max = 1048576", "returncode": 0})(), - type( - "MockResult", (), {"stdout": "net.core.rmem_default = 1048576", "returncode": 0} - )(), - ] - - commands, buffer_size = check_buffers() - sudo = get_sudo_prefix() - expected = [ - f"{sudo}sysctl -w net.core.rmem_max=67108864", - f"{sudo}sysctl -w net.core.rmem_default=16777216", - ] - assert commands == expected - assert buffer_size == 1048576 + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock both low + mock_run.side_effect = [ + type( + "MockResult", (), {"stdout": "net.core.rmem_max = 1048576", "returncode": 0} + )(), + type( + "MockResult", (), {"stdout": "net.core.rmem_default = 1048576", "returncode": 0} + )(), + ] + + commands, buffer_size = check_buffers() + sudo = get_sudo_prefix() + expected = [ + f"{sudo}sysctl -w net.core.rmem_max={TARGET_RMEM_SIZE}", + f"{sudo}sysctl -w net.core.rmem_default={TARGET_RMEM_SIZE}", + ] + assert commands == expected + assert buffer_size == 1048576 def test_check_buffers_subprocess_exception() -> None: """Test check_buffers when subprocess calls fail.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock subprocess exceptions - mock_run.side_effect = Exception("Command failed") - - commands, buffer_size = check_buffers() - sudo = get_sudo_prefix() - expected = [ - f"{sudo}sysctl -w net.core.rmem_max=67108864", - f"{sudo}sysctl -w net.core.rmem_default=16777216", - ] - assert commands == expected - assert buffer_size is None + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock subprocess exceptions + mock_run.side_effect = Exception("Command failed") + + commands, buffer_size = check_buffers() + sudo = get_sudo_prefix() + expected = [ + f"{sudo}sysctl -w net.core.rmem_max={TARGET_RMEM_SIZE}", + f"{sudo}sysctl -w net.core.rmem_default={TARGET_RMEM_SIZE}", + ] + assert commands == expected + assert buffer_size is None def test_check_buffers_parsing_error() -> None: """Test check_buffers when output parsing fails.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock malformed output - mock_run.side_effect = [ - type("MockResult", (), {"stdout": "invalid output", "returncode": 0})(), - type("MockResult", (), {"stdout": "also invalid", "returncode": 0})(), - ] - - commands, buffer_size = check_buffers() - sudo = get_sudo_prefix() - expected = [ - f"{sudo}sysctl -w net.core.rmem_max=67108864", - f"{sudo}sysctl -w net.core.rmem_default=16777216", - ] - assert commands == expected - assert buffer_size is None + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock malformed output + mock_run.side_effect = [ + type("MockResult", (), {"stdout": "invalid output", "returncode": 0})(), + type("MockResult", (), {"stdout": "also invalid", "returncode": 0})(), + ] + + commands, buffer_size = check_buffers() + sudo = get_sudo_prefix() + expected = [ + f"{sudo}sysctl -w net.core.rmem_max={TARGET_RMEM_SIZE}", + f"{sudo}sysctl -w net.core.rmem_default={TARGET_RMEM_SIZE}", + ] + assert commands == expected + assert buffer_size is None def test_check_buffers_dev_container() -> None: """Test check_buffers in dev container where sysctl fails.""" - with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock dev container behavior - sysctl returns non-zero - mock_run.side_effect = [ - type( - "MockResult", - (), - { - "stdout": "sysctl: cannot stat /proc/sys/net/core/rmem_max: No such file or directory", - "returncode": 255, - }, - )(), - type( - "MockResult", - (), - { - "stdout": "sysctl: cannot stat /proc/sys/net/core/rmem_default: No such file or directory", - "returncode": 255, - }, - )(), - ] - - commands, buffer_size = check_buffers() - sudo = get_sudo_prefix() - expected = [ - f"{sudo}sysctl -w net.core.rmem_max=67108864", - f"{sudo}sysctl -w net.core.rmem_default=16777216", - ] - assert commands == expected - assert buffer_size is None - - -def test_autoconf_no_config_needed() -> None: - """Test autoconf when no configuration is needed.""" - # Clear CI environment variable for this test - with patch.dict(os.environ, {"CI": ""}, clear=False): + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock all checks passing with new buffer sizes (64MB and 16MB) + # Mock dev container behavior - sysctl returns non-zero mock_run.side_effect = [ - # check_multicast calls type( "MockResult", (), { - "stdout": "1: lo: mtu 65536", - "returncode": 0, + "stdout": "sysctl: cannot stat /proc/sys/net/core/rmem_max: No such file or directory", + "returncode": 255, }, )(), - type( - "MockResult", (), {"stdout": "224.0.0.0/4 dev lo scope link", "returncode": 0} - )(), - # check_buffers calls - type( - "MockResult", (), {"stdout": "net.core.rmem_max = 67108864", "returncode": 0} - )(), type( "MockResult", (), - {"stdout": "net.core.rmem_default = 16777216", "returncode": 0}, + { + "stdout": "sysctl: cannot stat /proc/sys/net/core/rmem_default: No such file or directory", + "returncode": 255, + }, )(), ] - with patch("dimos.protocol.service.lcmservice.logger") as mock_logger: - autoconf() - # Should not log anything when no config is needed - mock_logger.info.assert_not_called() - mock_logger.error.assert_not_called() - mock_logger.warning.assert_not_called() + commands, buffer_size = check_buffers() + sudo = get_sudo_prefix() + expected = [ + f"{sudo}sysctl -w net.core.rmem_max={TARGET_RMEM_SIZE}", + f"{sudo}sysctl -w net.core.rmem_default={TARGET_RMEM_SIZE}", + ] + assert commands == expected + assert buffer_size is None -def test_autoconf_with_config_needed_success() -> None: - """Test autoconf when configuration is needed and commands succeed.""" - # Clear CI environment variable for this test - with patch.dict(os.environ, {"CI": ""}, clear=False): +def test_check_buffers_macos_all_configured() -> None: + """Test check_buffers on macOS when system is properly configured.""" + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Darwin"): with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock checks failing, then mock the execution succeeding + # Mock sufficient buffer sizes for macOS mock_run.side_effect = [ - # check_multicast calls type( "MockResult", (), - {"stdout": "1: lo: mtu 65536", "returncode": 0}, + { + "stdout": f"kern.ipc.maxsockbuf: {TARGET_MAX_SOCKET_BUFFER_SIZE_MACOS}", + "returncode": 0, + }, )(), - type("MockResult", (), {"stdout": "", "returncode": 0})(), - # check_buffers calls (low buffer sizes) type( - "MockResult", (), {"stdout": "net.core.rmem_max = 1048576", "returncode": 0} + "MockResult", + (), + {"stdout": f"net.inet.udp.recvspace: {TARGET_RMEM_SIZE}", "returncode": 0}, )(), type( - "MockResult", (), {"stdout": "net.core.rmem_default = 1048576", "returncode": 0} + "MockResult", + (), + { + "stdout": f"net.inet.udp.maxdgram: {TARGET_MAX_DGRAM_SIZE_MACOS}", + "returncode": 0, + }, )(), - # Command execution calls - type( - "MockResult", (), {"stdout": "success", "returncode": 0} - )(), # ifconfig lo multicast - type("MockResult", (), {"stdout": "success", "returncode": 0})(), # route add... - type("MockResult", (), {"stdout": "success", "returncode": 0})(), # sysctl rmem_max - type( - "MockResult", (), {"stdout": "success", "returncode": 0} - )(), # sysctl rmem_default ] - from unittest.mock import call - - with patch("dimos.protocol.service.lcmservice.logger") as mock_logger: - autoconf() - - sudo = get_sudo_prefix() - # Verify the expected log calls with new buffer sizes - expected_info_calls = [ - call("System configuration required. Executing commands..."), - call(f" Running: {sudo}ifconfig lo multicast"), - call(" ✓ Success"), - call(f" Running: {sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo"), - call(" ✓ Success"), - call(f" Running: {sudo}sysctl -w net.core.rmem_max=67108864"), - call(" ✓ Success"), - call(f" Running: {sudo}sysctl -w net.core.rmem_default=16777216"), - call(" ✓ Success"), - call("System configuration completed."), - ] - - mock_logger.info.assert_has_calls(expected_info_calls) + commands, buffer_size = check_buffers() + assert commands == [] + assert buffer_size == TARGET_MAX_SOCKET_BUFFER_SIZE_MACOS -def test_autoconf_with_command_failures() -> None: - """Test autoconf when some commands fail.""" - # Clear CI environment variable for this test - with patch.dict(os.environ, {"CI": ""}, clear=False): +def test_check_buffers_macos_needs_config() -> None: + """Test check_buffers on macOS when configuration is needed.""" + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Darwin"): with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: - # Mock checks failing, then mock some commands failing + mock_max_sock_buf_size = 4194304 + # Mock low buffer sizes for macOS mock_run.side_effect = [ - # check_multicast calls type( "MockResult", (), - {"stdout": "1: lo: mtu 65536", "returncode": 0}, + {"stdout": f"kern.ipc.maxsockbuf: {mock_max_sock_buf_size}", "returncode": 0}, )(), - type("MockResult", (), {"stdout": "", "returncode": 0})(), - # check_buffers calls (no buffer issues for simpler test, use new minimums) type( - "MockResult", (), {"stdout": "net.core.rmem_max = 67108864", "returncode": 0} + "MockResult", (), {"stdout": "net.inet.udp.recvspace: 1048576", "returncode": 0} )(), type( - "MockResult", - (), - {"stdout": "net.core.rmem_default = 16777216", "returncode": 0}, + "MockResult", (), {"stdout": "net.inet.udp.maxdgram: 32768", "returncode": 0} )(), - # Command execution calls - first succeeds, second fails - type( - "MockResult", (), {"stdout": "success", "returncode": 0} - )(), # ifconfig lo multicast - subprocess.CalledProcessError( - 1, - [ - *get_sudo_prefix().split(), - "route", - "add", - "-net", - "224.0.0.0", - "netmask", - "240.0.0.0", - "dev", - "lo", - ], - "Permission denied", - "Operation not permitted", - ), ] - with patch("dimos.protocol.service.lcmservice.logger") as mock_logger: - # The function should raise on multicast/route failures - with pytest.raises(subprocess.CalledProcessError): + commands, buffer_size = check_buffers() + sudo = get_sudo_prefix() + expected = [ + f"{sudo}sysctl -w kern.ipc.maxsockbuf={TARGET_MAX_SOCKET_BUFFER_SIZE_MACOS}", + f"{sudo}sysctl -w net.inet.udp.recvspace={TARGET_RMEM_SIZE}", + f"{sudo}sysctl -w net.inet.udp.maxdgram={TARGET_MAX_DGRAM_SIZE_MACOS}", + ] + assert commands == expected + assert buffer_size == mock_max_sock_buf_size + + +def test_autoconf_no_config_needed() -> None: + """Test autoconf when no configuration is needed.""" + # Clear CI environment variable for this test + with patch.dict(os.environ, {"CI": ""}, clear=False): + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock all checks passing + mock_run.side_effect = [ + # check_multicast calls + type( + "MockResult", + (), + { + "stdout": "1: lo: mtu 65536", + "returncode": 0, + }, + )(), + type( + "MockResult", + (), + {"stdout": "224.0.0.0/4 dev lo scope link", "returncode": 0}, + )(), + # check_buffers calls + type( + "MockResult", + (), + {"stdout": f"net.core.rmem_max = {TARGET_RMEM_SIZE}", "returncode": 0}, + )(), + type( + "MockResult", + (), + {"stdout": f"net.core.rmem_default = {TARGET_RMEM_SIZE}", "returncode": 0}, + )(), + ] + + with patch("dimos.protocol.service.lcmservice.logger") as mock_logger: autoconf() + # Should not log anything when no config is needed + mock_logger.info.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.warning.assert_not_called() + + +def test_autoconf_with_config_needed_success() -> None: + """Test autoconf when configuration is needed and commands succeed.""" + # Clear CI environment variable for this test + with patch.dict(os.environ, {"CI": ""}, clear=False): + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock checks failing, then mock the execution succeeding + mock_run.side_effect = [ + # check_multicast calls + type( + "MockResult", + (), + {"stdout": "1: lo: mtu 65536", "returncode": 0}, + )(), + type("MockResult", (), {"stdout": "", "returncode": 0})(), + # check_buffers calls + type( + "MockResult", (), {"stdout": "net.core.rmem_max = 1048576", "returncode": 0} + )(), + type( + "MockResult", + (), + {"stdout": "net.core.rmem_default = 1048576", "returncode": 0}, + )(), + # Command execution calls + type( + "MockResult", (), {"stdout": "success", "returncode": 0} + )(), # ifconfig lo multicast + type( + "MockResult", (), {"stdout": "success", "returncode": 0} + )(), # route add... + type( + "MockResult", (), {"stdout": "success", "returncode": 0} + )(), # sysctl rmem_max + type( + "MockResult", (), {"stdout": "success", "returncode": 0} + )(), # sysctl rmem_default + ] + + from unittest.mock import call + + with patch("dimos.protocol.service.lcmservice.logger") as mock_logger: + autoconf() + + sudo = get_sudo_prefix() + # Verify the expected log calls + expected_info_calls = [ + call("System configuration required. Executing commands..."), + call(f" Running: {sudo}ifconfig lo multicast"), + call(" ✓ Success"), + call(f" Running: {sudo}route add -net 224.0.0.0 netmask 240.0.0.0 dev lo"), + call(" ✓ Success"), + call(f" Running: {sudo}sysctl -w net.core.rmem_max={TARGET_RMEM_SIZE}"), + call(" ✓ Success"), + call( + f" Running: {sudo}sysctl -w net.core.rmem_default={TARGET_RMEM_SIZE}" + ), + call(" ✓ Success"), + call("System configuration completed."), + ] + + mock_logger.info.assert_has_calls(expected_info_calls) + + +def test_autoconf_with_command_failures() -> None: + """Test autoconf when some commands fail.""" + # Clear CI environment variable for this test + with patch.dict(os.environ, {"CI": ""}, clear=False): + with patch("dimos.protocol.service.lcmservice.platform.system", return_value="Linux"): + with patch("dimos.protocol.service.lcmservice.subprocess.run") as mock_run: + # Mock checks failing, then mock some commands failing + mock_run.side_effect = [ + # check_multicast calls + type( + "MockResult", + (), + {"stdout": "1: lo: mtu 65536", "returncode": 0}, + )(), + type("MockResult", (), {"stdout": "", "returncode": 0})(), + # check_buffers calls (no buffer issues for simpler test) + type( + "MockResult", + (), + {"stdout": f"net.core.rmem_max = {TARGET_RMEM_SIZE}", "returncode": 0}, + )(), + type( + "MockResult", + (), + {"stdout": f"net.core.rmem_default = {TARGET_RMEM_SIZE}", "returncode": 0}, + )(), + # Command execution calls - first succeeds, second fails + type( + "MockResult", (), {"stdout": "success", "returncode": 0} + )(), # ifconfig lo multicast + subprocess.CalledProcessError( + 1, + [ + *get_sudo_prefix().split(), + "route", + "add", + "-net", + "224.0.0.0", + "netmask", + "240.0.0.0", + "dev", + "lo", + ], + "Permission denied", + "Operation not permitted", + ), + ] + + with patch("dimos.protocol.service.lcmservice.logger") as mock_logger: + # The function should raise on multicast/route failures + with pytest.raises(subprocess.CalledProcessError): + autoconf() - # Verify it logged the failure before raising - info_calls = [call[0][0] for call in mock_logger.info.call_args_list] - error_calls = [call[0][0] for call in mock_logger.error.call_args_list] + # Verify it logged the failure before raising + info_calls = [call[0][0] for call in mock_logger.info.call_args_list] + error_calls = [call[0][0] for call in mock_logger.error.call_args_list] - assert "System configuration required. Executing commands..." in info_calls - assert " ✓ Success" in info_calls # First command succeeded - assert any( - "✗ Failed to configure multicast" in call for call in error_calls - ) # Second command failed + assert "System configuration required. Executing commands..." in info_calls + assert " ✓ Success" in info_calls # First command succeeded + assert any( + "✗ Failed to configure multicast" in call for call in error_calls + ) # Second command failed diff --git a/dimos/robot/unitree_webrtc/mujoco_connection.py b/dimos/robot/unitree_webrtc/mujoco_connection.py index a214182925..053c6d52aa 100644 --- a/dimos/robot/unitree_webrtc/mujoco_connection.py +++ b/dimos/robot/unitree_webrtc/mujoco_connection.py @@ -80,14 +80,28 @@ def start(self) -> None: # Launch the subprocess try: + # mjpython must be used macOS (because of launch_passive inside mujoco_process.py) + executable = sys.executable if sys.platform != "darwin" else "mjpython" self.process = subprocess.Popen( - [sys.executable, str(LAUNCHER_PATH), config_pickle, shm_names_json], + [executable, str(LAUNCHER_PATH), config_pickle, shm_names_json], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, ) except Exception as e: self.shm_data.cleanup() raise RuntimeError(f"Failed to start MuJoCo subprocess: {e}") from e + def get_stderr() -> str: + text = "" + if self.process and self.process.stderr: + text = ( + "\n" + self.process.stderr.read().replace("\n", "\n[mujoco_process.py] ") + "\n" + ) + return text + # Wait for process to be ready ready_timeout = 10 start_time = time.time() @@ -95,16 +109,20 @@ def start(self) -> None: while time.time() - start_time < ready_timeout: if self.process.poll() is not None: exit_code = self.process.returncode + stderr_string = get_stderr() self.stop() - raise RuntimeError(f"MuJoCo process failed to start (exit code {exit_code})") + raise RuntimeError( + f"{stderr_string}MuJoCo process failed to start (exit code {exit_code})" + ) if self.shm_data.is_ready(): logger.info("MuJoCo process started successfully") return time.sleep(0.1) # Timeout + stderr_string = get_stderr() self.stop() - raise RuntimeError("MuJoCo process failed to start (timeout)") + raise RuntimeError(f"{stderr_string}MuJoCo process failed to start (timeout)") def stop(self) -> None: if self._is_cleaned_up: @@ -112,6 +130,13 @@ def stop(self) -> None: self._is_cleaned_up = True + # clean up open file descriptors + if self.process: + if self.process.stderr: + self.process.stderr.close() + if self.process.stdout: + self.process.stdout.close() + # Cancel any pending timers if self._stop_timer: self._stop_timer.cancel() diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py index 17f25e8560..77be9797ad 100644 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py @@ -56,14 +56,20 @@ behavior_tree_navigator(), wavefront_frontier_explorer(), websocket_vis(), - foxglove_bridge(), + foxglove_bridge( + shm_channels=[ + "/go2/color_image#sensor_msgs.Image", + ] + ), ) .global_config(n_dask_workers=4, robot_model="unitree_go2") .transports( # These are kept the same so that we don't have to change foxglove configs. # Although we probably should. { - ("color_image", Image): LCMTransport("/go2/color_image", Image), + ("color_image", Image): pSHMTransport( + "/go2/color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE + ), ("camera_pose", PoseStamped): LCMTransport("/go2/camera_pose", PoseStamped), ("camera_info", CameraInfo): LCMTransport("/go2/camera_info", CameraInfo), } @@ -77,21 +83,6 @@ utilization(), ).global_config(n_dask_workers=8) -standard_with_shm = autoconnect( - standard.transports( - { - ("color_image", Image): pSHMTransport( - "/go2/color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ), - } - ), - foxglove_bridge( - shm_channels=[ - "/go2/color_image#sensor_msgs.Image", - ] - ), -) - standard_with_jpeglcm = standard.transports( { ("color_image", Image): JpegLcmTransport("/go2/color_image", Image), diff --git a/flake.lock b/flake.lock index e6d920a293..1934527896 100644 --- a/flake.lock +++ b/flake.lock @@ -18,6 +18,78 @@ "type": "github" } }, + "home-manager": { + "inputs": { + "nixpkgs": [ + "xome", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1753983724, + "narHash": "sha256-2vlAOJv4lBrE+P1uOGhZ1symyjXTRdn/mz0tZ6faQcg=", + "owner": "nix-community", + "repo": "home-manager", + "rev": "7035020a507ed616e2b20c61491ae3eaa8e5462c", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "home-manager", + "type": "github" + } + }, + "lib": { + "inputs": { + "flakeUtils": [ + "flake-utils" + ], + "libSource": "libSource" + }, + "locked": { + "lastModified": 1763164848, + "narHash": "sha256-OlnnK3Iepi4As1onBrNfIiiQ0xIGzEWsJ16/TrLFcpY=", + "owner": "jeff-hykin", + "repo": "quick-nix-toolkits", + "rev": "3c820d33a0c4c8480a771484f99490243b3c6b5f", + "type": "github" + }, + "original": { + "owner": "jeff-hykin", + "repo": "quick-nix-toolkits", + "type": "github" + } + }, + "libSource": { + "locked": { + "lastModified": 1763255503, + "narHash": "sha256-7AL5rgcGVjhYgZFbZQt1IndGcY27h5B5xi9OWtLlm6c=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "56f74a2d6cd236c0ea3097b3df2e053fbb374b26", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, + "libSource_2": { + "locked": { + "lastModified": 1753579242, + "narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=", + "owner": "divnix", + "repo": "nixpkgs.lib", + "rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e", + "type": "github" + }, + "original": { + "owner": "divnix", + "repo": "nixpkgs.lib", + "type": "github" + } + }, "nixpkgs": { "locked": { "lastModified": 1748929857, @@ -37,7 +109,9 @@ "root": { "inputs": { "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" + "lib": "lib", + "nixpkgs": "nixpkgs", + "xome": "xome" } }, "systems": { @@ -54,6 +128,31 @@ "repo": "default", "type": "github" } + }, + "xome": { + "inputs": { + "flake-utils": [ + "flake-utils" + ], + "home-manager": "home-manager", + "libSource": "libSource_2", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1765320026, + "narHash": "sha256-yPvFElT1PG4ENioIn+ctf682E8y6jPodeosM8One680=", + "owner": "jeff-hykin", + "repo": "xome", + "rev": "bae4f441d3e1ebe3b2da51a3ecdbdccddda67444", + "type": "github" + }, + "original": { + "owner": "jeff-hykin", + "repo": "xome", + "type": "github" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 75e68f595e..78c21c0710 100644 --- a/flake.nix +++ b/flake.nix @@ -4,9 +4,14 @@ inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; flake-utils.url = "github:numtide/flake-utils"; + lib.url = "github:jeff-hykin/quick-nix-toolkits"; + lib.inputs.flakeUtils.follows = "flake-utils"; + xome.url = "github:jeff-hykin/xome"; + xome.inputs.nixpkgs.follows = "nixpkgs"; + xome.inputs.flake-utils.follows = "flake-utils"; }; - outputs = { self, nixpkgs, flake-utils, ... }: + outputs = { self, nixpkgs, flake-utils, lib, xome, ... }: flake-utils.lib.eachDefaultSystem (system: let pkgs = import nixpkgs { inherit system; }; @@ -14,71 +19,331 @@ # ------------------------------------------------------------ # 1. Shared package list (tool-chain + project deps) # ------------------------------------------------------------ - devPackages = with pkgs; [ + # we "flag" each package with what we need it for (e.g. LD_LIBRARY_PATH, nativeBuildInputs vs buildInputs, etc) + aggregation = lib.aggregator [ ### Core shell & utils - bashInteractive coreutils gh - stdenv.cc.cc.lib pcre2 + { vals.pkg=pkgs.bashInteractive; flags={}; } + { vals.pkg=pkgs.coreutils; flags={}; } + { vals.pkg=pkgs.gh; flags={}; } + { vals.pkg=pkgs.stdenv.cc.cc.lib; flags.ldLibraryGroup=true; } + { vals.pkg=pkgs.stdenv.cc; flags.ldLibraryGroup=true; } + { vals.pkg=pkgs.cctools; flags={}; onlyIf=pkgs.stdenv.isDarwin; } # for pip install opencv-python + { vals.pkg=pkgs.pcre2; flags={ ldLibraryGroup=true; flags.packageConfGroup=pkgs.stdenv.isDarwin; }; } + { vals.pkg=pkgs.libsysprof-capture; flags.packageConfGroup=true; onlyIf=pkgs.stdenv.isDarwin; } + { vals.pkg=pkgs.xcbuild; flags={}; } + { vals.pkg=pkgs.git-lfs; flags={}; } + { vals.pkg=pkgs.gnugrep; flags={}; } + { vals.pkg=pkgs.pkg-config; flags={}; } + { vals.pkg=pkgs.git; flags={}; } + { vals.pkg=pkgs.unixtools.ifconfig; flags={}; } + { vals.pkg=pkgs.unixtools.netstat; flags={}; } ### Python + static analysis - python312 python312Packages.pip python312Packages.setuptools - python312Packages.virtualenv pre-commit + { vals.pkg=pkgs.python312; flags={}; vals.pythonMinorVersion="12";} + { vals.pkg=pkgs.python312Packages.pip; flags={}; } + { vals.pkg=pkgs.python312Packages.setuptools; flags={}; } + { vals.pkg=pkgs.python312Packages.virtualenv; flags={}; } + { vals.pkg=pkgs.pre-commit; flags={}; } ### Runtime deps - python312Packages.pyaudio portaudio ffmpeg_6 ffmpeg_6.dev + { vals.pkg=pkgs.python312Packages.pyaudio; flags={}; } + { vals.pkg=pkgs.python312Packages.opencv4; flags={}; } + { vals.pkg=pkgs.portaudio; flags={ldLibraryGroup=true; packageConfGroup=true;}; } + { vals.pkg=pkgs.ffmpeg_6; flags={}; } + { vals.pkg=pkgs.ffmpeg_6.dev; flags={}; } ### Graphics / X11 stack - libGL libGLU mesa glfw - xorg.libX11 xorg.libXi xorg.libXext xorg.libXrandr xorg.libXinerama - xorg.libXcursor xorg.libXfixes xorg.libXrender xorg.libXdamage - xorg.libXcomposite xorg.libxcb xorg.libXScrnSaver xorg.libXxf86vm - - udev SDL2 SDL2.dev zlib + { vals.pkg=pkgs.libGL; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.libGLU; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.mesa; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.glfw; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libX11; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXi; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXext; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXrandr; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXinerama; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXcursor; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXfixes; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXrender; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXdamage; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXcomposite; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libxcb; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXScrnSaver; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.xorg.libXxf86vm; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.udev; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.SDL2; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.SDL2.dev; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.zlib; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } ### GTK / OpenCV helpers - glib gtk3 gdk-pixbuf gobject-introspection + { vals.pkg=pkgs.glib; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.gtk3; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.gdk-pixbuf; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.gobject-introspection; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } ### GStreamer - gst_all_1.gstreamer gst_all_1.gst-plugins-base gst_all_1.gst-plugins-good - gst_all_1.gst-plugins-bad gst_all_1.gst-plugins-ugly - python312Packages.gst-python + { vals.pkg=pkgs.gst_all_1.gstreamer; flags.ldLibraryGroup=true; flags.giTypelibGroup=true; } + { vals.pkg=pkgs.gst_all_1.gst-plugins-base; flags.ldLibraryGroup=true; flags.giTypelibGroup=true; } + { vals.pkg=pkgs.gst_all_1.gst-plugins-good; flags={}; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.gst_all_1.gst-plugins-bad; flags={}; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.gst_all_1.gst-plugins-ugly; flags={}; onlyIf=pkgs.stdenv.isLinux; } + { vals.pkg=pkgs.python312Packages.gst-python; flags={}; onlyIf=pkgs.stdenv.isLinux; } ### Open3D & build-time - eigen cmake ninja jsoncpp libjpeg libjpeg_turbo libpng + { vals.pkg=pkgs.eigen; flags={}; } + { vals.pkg=pkgs.cmake; flags={}; } + { vals.pkg=pkgs.ninja; flags={}; } + { vals.pkg=pkgs.jsoncpp; flags={}; } + { vals.pkg=pkgs.libjpeg; flags.ldLibraryGroup=true; } + { vals.pkg=pkgs.libjpeg_turbo; flags.ldLibraryGroup=true; } + { vals.pkg=pkgs.libpng; flags={}; } + ### LCM (Lightweight Communications and Marshalling) - lcm + { vals.pkg=pkgs.lcm; flags.ldLibraryGroup=true; onlyIf=pkgs.stdenv.isLinux; } + # lcm works on darwin, but only after two fixes (1. pkg-config, 2. fsync) + { + onlyIf=pkgs.stdenv.isDarwin; + flags.ldLibraryGroup=true; + flags.manualPythonPackages=true; + vals.pkg=pkgs.lcm.overrideAttrs (old: + let + # 1. fix pkg-config on darwin + pkgConfPackages = aggregation.getAll { hasAllFlags=[ "packageConfGroup" ]; attrPath=[ "pkg" ]; }; + packageConfPackagesString = (aggregation.getAll { + hasAllFlags=[ "packageConfGroup" ]; + attrPath=[ "pkg" ]; + strAppend="/lib/pkgconfig"; + strJoin=":"; + }); + in + { + buildInputs = (old.buildInputs or []) ++ pkgConfPackages; + nativeBuildInputs = (old.nativeBuildInputs or []) ++ [ pkgs.pkg-config pkgs.python312 ]; + # 1. fix pkg-config on darwin + env.PKG_CONFIG_PATH = packageConfPackagesString; + # 2. Fix fsync on darwin + patches = [ + (pkgs.writeText "lcm-darwin-fsync.patch" "--- ./lcm-logger/lcm_logger.c 2025-11-14 09:46:01.000000000 -0600\n+++ ./lcm-logger/lcm_logger.c 2025-11-14 09:47:05.000000000 -0600\n@@ -428,9 +428,13 @@\n if (needs_flushed) {\n fflush(logger->log->f);\n #ifndef WIN32\n+#ifdef __APPLE__\n+ fsync(fileno(logger->log->f));\n+#else\n // Perform a full fsync operation after flush\n fdatasync(fileno(logger->log->f));\n #endif\n+#endif\n logger->last_fflush_time = log_event->timestamp;\n }\n") + ]; + } + ); + } ]; # ------------------------------------------------------------ - # 2. Host interactive shell → `nix develop` + # 2. group / aggregate our packages # ------------------------------------------------------------ - devShell = pkgs.mkShell { - packages = devPackages; - shellHook = '' - export LD_LIBRARY_PATH="${pkgs.lib.makeLibraryPath [ - pkgs.stdenv.cc.cc.lib pkgs.libGL pkgs.libGLU pkgs.mesa pkgs.glfw - pkgs.xorg.libX11 pkgs.xorg.libXi pkgs.xorg.libXext pkgs.xorg.libXrandr - pkgs.xorg.libXinerama pkgs.xorg.libXcursor pkgs.xorg.libXfixes - pkgs.xorg.libXrender pkgs.xorg.libXdamage pkgs.xorg.libXcomposite - pkgs.xorg.libxcb pkgs.xorg.libXScrnSaver pkgs.xorg.libXxf86vm - pkgs.udev pkgs.portaudio pkgs.SDL2.dev pkgs.zlib pkgs.glib pkgs.gtk3 - pkgs.gdk-pixbuf pkgs.gobject-introspection pkgs.lcm pkgs.pcre2 - pkgs.gst_all_1.gstreamer pkgs.gst_all_1.gst-plugins-base pkgs.libjpeg_turbo]}:$LD_LIBRARY_PATH" - - export DISPLAY=:0 - export GI_TYPELIB_PATH="${pkgs.gst_all_1.gstreamer}/lib/girepository-1.0:${pkgs.gst_all_1.gst-plugins-base}/lib/girepository-1.0:$GI_TYPELIB_PATH" - - PROJECT_ROOT=$(git rev-parse --show-toplevel 2>/dev/null || echo "$PWD") - if [ -f "$PROJECT_ROOT/env/bin/activate" ]; then - . "$PROJECT_ROOT/env/bin/activate" - fi - - [ -f "$PROJECT_ROOT/motd" ] && cat "$PROJECT_ROOT/motd" - [ -f "$PROJECT_ROOT/.pre-commit-config.yaml" ] && pre-commit install --install-hooks - ''; + devPackages = aggregation.getAll { attrPath=[ "pkg" ]; }; + ldLibraryPackages = aggregation.getAll { hasAllFlags=[ "ldLibraryGroup" ]; attrPath=[ "pkg" ]; }; + giTypelibPackagesString = aggregation.getAll { + hasAllFlags=[ "giTypelibGroup" ]; + attrPath=[ "pkg" ]; + strAppend="/lib/girepository-1.0"; + strJoin=":"; }; + packageConfPackagesString = (aggregation.getAll { + hasAllFlags=[ "packageConfGroup" ]; + attrPath=[ "pkg" ]; + strAppend="/lib/pkgconfig"; + strJoin=":"; + }); + manualPythonPackages = (aggregation.getAll { + hasAllFlags=[ "manualPythonPackages" ]; + attrPath=[ "pkg" ]; + strAppend="/lib/python3.${aggregation.mergedVals.pythonMinorVersion}/site-packages"; + strJoin=":"; + }); + + # ------------------------------------------------------------ + # 3. Host interactive shell → `nix develop` + # ------------------------------------------------------------ + devShell = (xome.simpleMakeHomeFor { + inherit pkgs; + pure = true; + commandPassthrough = [ "sudo" "nvim" "code" "sysctl" "sw_vers" "git" "vim" "emacs" ]; # e.g. use external nvim instead of nix's + # commonly needed for MacOS: [ "osascript" "otool" "hidutil" "logger" "codesign" ] + homeSubpathPassthrough = [ "cache/nix/" ]; # share nix cache between projects + homeModule = { + # for home-manager examples, see: + # https://deepwiki.com/nix-community/home-manager/5-configuration-examples + # all home-manager options: + # https://nix-community.github.io/home-manager/options.xhtml + home.homeDirectory = "/tmp/virtual_homes/dimos"; + home.stateVersion = "25.11"; + home.packages = devPackages; + + programs = { + home-manager = { + enable = true; + }; + zsh = { + enable = true; + enableCompletion = true; + autosuggestion.enable = true; + syntaxHighlighting.enable = true; + shellAliases.ll = "ls -la"; + history.size = 100000; + # this is kinda like .zshrc + initContent = '' + export LD_LIBRARY_PATH="${pkgs.lib.makeLibraryPath ldLibraryPackages}:$LD_LIBRARY_PATH" + export DISPLAY=:0 + export GI_TYPELIB_PATH="${giTypelibPackagesString}:$GI_TYPELIB_PATH" + export PKG_CONFIG_PATH=${lib.escapeShellArg packageConfPackagesString} + export PYTHONPATH="$PYTHONPATH:"${lib.escapeShellArg manualPythonPackages} + # for getting passed `pip install pyaudio` on macos + export CFLAGS="$(pkg-config --cflags portaudio-2.0) $CFLAGS" + export LDFLAGS="-L$(pkg-config --variable=libdir portaudio-2.0) $LDFLAGS" + + # without this alias, the pytest uses the non-venv python and fails + alias pytest="python -m pytest" + + PROJECT_ROOT=$(git rev-parse --show-toplevel 2>/dev/null || echo "$PWD") + [ -f "$PROJECT_ROOT/motd" ] && cat "$PROJECT_ROOT/motd" + [ -f "$PROJECT_ROOT/.pre-commit-config.yaml" ] && pre-commit install --install-hooks + cd "$PROJECT_ROOT" + + # + # python & setup + # + if [ -f "$PROJECT_ROOT/venv/bin/activate" ]; then + # if there is a venv, load it + _nix_python_path="$(realpath "$(which python)")" + . "$PROJECT_ROOT/venv/bin/activate" + # check the venv to make sure it wasn't created with a different (non nix) python + if [ "$_nix_python_path" != "$(realpath "$(which python)")" ] + then + echo + echo + echo "WARNING:" + echo " Your venv was created with something other than the current nix python" + echo " This could happen if you made the venv before doing `nix develop`" + echo " It could also happen if the nix-python was updated but the venv wasn't" + echo " WHAT YOU NEED TO DO:" + echo " - If you're about to make/test a PR, delete/rename your venv and run `nix develop` again" + echo " - If you're just trying to get the code working, you can continue but you might get bugs FYI" + echo + echo + echo "Got it? (press enter)"; read _ + echo + fi + else + # + # automate the readme + # + + # helper + confirm_ask() { + echo + question="$1";answer="" + while true; do + echo "$question"; read response + if [ -z "$response" ]; then + echo + return 0 # success + break + fi + case "$response" in + [Yy]* ) answer='yes'; break;; + [Nn]* ) answer='no'; break;; + * ) echo "Please answer yes or no.";; + esac + done + if [ "$answer" = "yes" ] + then + echo + return 0 # success + fi + echo + return 1 # failure + } + + macos_version="$(sw_vers -productVersion 2>/dev/null || echo "0.0")" + macos_major_version="''${macos_version%%.*}" + if confirm_ask "Would you like me to set up the environment for you? [y/n]"; then + echo "Making sure git lfs is installed..." + git lfs install || true + + if confirm_ask "Should I donwload the models and data? (around 17Gb) this will be needed to run the simulation [y/n]"; then + echo "Downloading the models and data..." + git lfs fetch --all + git lfs pull + echo "Done!" + fi + + # check if no .env + if ! [ -f ".env" ] + then + echo "Setting up .env file..." + cp default.env .env + echo + echo "note: you might want to edit the .env file with your own settings" + echo + fi + + echo "Setting up virtualenv..." + python3 -m venv venv + echo "Activating virtualenv..." + . venv/bin/activate + echo "Installing python dependencies..." + pip install -e . + + # if really old MacOS then ignore the lcm dependency (it'll be supplied by nix) + if [ "$(uname)" = "Darwin" ] && [ "$macos_major_version" -le 13 ]; then + echo "You're on a really old MacOS version. Ignore the errors above (and probably later below) about LCM" + echo "Got it? (press enter)";read _ + rm -f pyproject.original.toml + cp pyproject.toml pyproject.original.toml + # install dimos-lcm without installing lcm + pip install --no-deps 'git+https://github.com/dimensionalOS/dimos-lcm.git' + # manually install dependencies of dimos-lcm + pip install foxglove-websocket numpy + # remove dimos-lcm from pyproject.toml for a moment + grep -v '^\s*#' pyproject.original.toml | grep -v "dimos-lcm @ .*" | grep -v "opencv-python" > pyproject.toml + pip install -e '.[cpu,dev,sim]' 2>&1 | grep -v -E "Could not find a version that satisfies the requirement lcm |ERROR: No matching distribution found for lcm" + # restore pyproject.toml + rm -f pyproject.toml + mv pyproject.original.toml pyproject.toml + fi + + # CUDA/CPU dependencies + if ! [ "$(uname)" = "Darwin" ] && confirm_ask "Want me to install the cuda dependencies? [y/n]"; then + pip install -e '.[cuda,dev]' + else + pip install -e '.[cpu,dev]' + fi + + # Mujoco/Simulation dependencies + if confirm_ask "Want me to install the optional simulation (mujoco) dependencies? [y/n]"; then + pip install -e '.[sim]' + fi + + if confirm_ask "Would you like me to run the tests to make sure everything is working? [y/n]"; then + echo "Running tests..." + python -m pytest -s "$PROJECT_ROOT/dimos/" && echo "tests finished" + fi + + echo "here's the main command to run:" + echo CONNECTION_TYPE=replay python dimos/robot/unitree_webrtc/unitree_go2.py + fi + fi + ''; + }; + starship = { + enable = true; + enableZshIntegration = true; + settings = { + character = { + success_symbol = "[▣](bold green)"; + error_symbol = "[▣](bold red)"; + }; + }; + }; + }; + }; + }).default; # ------------------------------------------------------------ - # 3. Closure copied into the OCI image rootfs + # 4. Closure copied into the OCI image rootfs # ------------------------------------------------------------ imageRoot = pkgs.buildEnv { name = "dimos-image-root"; diff --git a/pyproject.toml b/pyproject.toml index 1b3e879ce0..de7497681d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,7 @@ dependencies = [ "langchain-text-splitters>=0.3.11,<1", "langchain-huggingface>=0.3.1,<1", "langchain-ollama>=0.3.10,<1", - "bitsandbytes>=0.48.2,<1.0", + "bitsandbytes>=0.48.2,<1.0; sys_platform == 'linux'", "ollama>=0.6.0", # Class Extraction