From 28504e81e9928295e01a6c007de9c44dbf8fd1d5 Mon Sep 17 00:00:00 2001 From: Yajat Date: Tue, 17 Mar 2026 20:49:35 -0700 Subject: [PATCH 1/3] feat: add setup-apple command for Apple Silicon Macs Adds a 'nemoclaw setup-apple' command that validates and configures macOS Apple Silicon environments: - Validates macOS + arm64 architecture - Checks Docker Desktop socket (Desktop, Colima, Podman) - Installs and configures Ollama for local inference - Reports GPU cores and unified memory - Validates OpenShell CLI availability - Prints clear next steps and known limitations Closes part of #260 (macOS support tracking) Co-authored-by: Yajat Singh --- bin/nemoclaw.js | 8 +- scripts/setup-apple.sh | 188 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 195 insertions(+), 1 deletion(-) create mode 100755 scripts/setup-apple.sh diff --git a/bin/nemoclaw.js b/bin/nemoclaw.js index 07bb3d5b5a..2deb33105c 100755 --- a/bin/nemoclaw.js +++ b/bin/nemoclaw.js @@ -21,7 +21,7 @@ const policies = require("./lib/policies"); // ── Global commands ────────────────────────────────────────────── const GLOBAL_COMMANDS = new Set([ - "onboard", "list", "deploy", "setup", "setup-spark", + "onboard", "list", "deploy", "setup", "setup-spark", "setup-apple", "start", "stop", "status", "help", "--help", "-h", ]); @@ -42,6 +42,10 @@ async function setup() { run(`bash "${SCRIPTS}/setup.sh"`); } +async function setupApple() { + run(`bash "${SCRIPTS}/setup-apple.sh"`); +} + async function setupSpark() { await ensureApiKey(); run(`sudo -E NVIDIA_API_KEY="${process.env.NVIDIA_API_KEY}" bash "${SCRIPTS}/setup-spark.sh"`); @@ -276,6 +280,7 @@ function help() { nemoclaw onboard Interactive setup wizard (recommended) nemoclaw setup Legacy setup (deprecated, use onboard) nemoclaw setup-spark Set up on DGX Spark (fixes cgroup v2 + Docker) + nemoclaw setup-apple Set up on Apple Silicon Mac (Docker + Ollama) Sandbox Management: nemoclaw list List all sandboxes @@ -318,6 +323,7 @@ const [cmd, ...args] = process.argv.slice(2); case "onboard": await onboard(); break; case "setup": await setup(); break; case "setup-spark": await setupSpark(); break; + case "setup-apple": await setupApple(); break; case "deploy": await deploy(args[0]); break; case "start": await start(); break; case "stop": stop(); break; diff --git a/scripts/setup-apple.sh b/scripts/setup-apple.sh new file mode 100755 index 0000000000..f6f1688a2f --- /dev/null +++ b/scripts/setup-apple.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# NemoClaw setup for Apple Silicon Macs (M-series). +# +# macOS cannot run k3s natively — OpenShell runs inside Docker Desktop's +# Linux VM. This script validates the Docker environment, configures +# Ollama for local inference, and ensures the sandbox can reach it. +# +# Usage: +# nemoclaw setup-apple +# # or directly: +# bash scripts/setup-apple.sh +# +# What it does: +# 1. Validates macOS + Apple Silicon environment +# 2. Checks Docker Desktop is running and socket is reachable +# 3. Installs and configures Ollama for local inference +# 4. Validates OpenShell gateway can start +# 5. Prints recommended next steps + +set -euo pipefail + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[1;34m' +NC='\033[0m' + +info() { echo -e "${GREEN}>>>${NC} $1"; } +warn() { echo -e "${YELLOW}>>>${NC} $1"; } +fail() { echo -e "${RED}>>>${NC} $1"; exit 1; } +step() { echo -e "\n${BLUE}━━━ $1 ━━━${NC}"; } + +# ── Pre-flight checks ───────────────────────────────────────────── + +if [ "$(uname -s)" != "Darwin" ]; then + fail "This script is for macOS. Use 'nemoclaw setup-spark' for DGX Spark or 'nemoclaw onboard' for Linux." +fi + +ARCH="$(uname -m)" +if [ "$ARCH" != "arm64" ] && [ "$ARCH" != "aarch64" ]; then + warn "Expected Apple Silicon (arm64) but detected ${ARCH}. Proceeding anyway." +fi + +step "1/5 Checking macOS environment" + +# Report hardware +GPU_CORES="" +UNIFIED_MEM="" +if command -v system_profiler &>/dev/null; then + GPU_CORES=$(system_profiler SPDisplaysDataType 2>/dev/null | grep "Total Number of Cores" | awk -F': ' '{print $2}' | head -1) + UNIFIED_MEM=$(sysctl -n hw.memsize 2>/dev/null | awk '{printf "%.0f", $1/1024/1024/1024}') +fi +info "macOS $(sw_vers -productVersion 2>/dev/null || echo 'unknown') on ${ARCH}" +[ -n "$GPU_CORES" ] && info "GPU: ${GPU_CORES} cores, ${UNIFIED_MEM}GB unified memory" + +# ── Docker Desktop ───────────────────────────────────────────────── + +step "2/5 Checking Docker Desktop" + +if ! command -v docker &>/dev/null; then + fail "Docker not found. Install Docker Desktop for Mac: https://www.docker.com/products/docker-desktop/" +fi + +if ! docker info &>/dev/null 2>&1; then + fail "Docker daemon is not running. Start Docker Desktop and try again." +fi + +# Find Docker socket (Docker Desktop, Colima, or Podman) +DOCKER_SOCKET="" +for _sock in \ + "$HOME/.docker/run/docker.sock" \ + "/var/run/docker.sock" \ + "$HOME/.colima/default/docker.sock" \ + "$HOME/.config/colima/default/docker.sock" \ + "$HOME/.local/share/containers/podman/machine/podman.sock"; do + if [ -S "$_sock" ]; then + DOCKER_SOCKET="$_sock" + break + fi +done + +if [ -n "$DOCKER_SOCKET" ]; then + info "Docker socket: ${DOCKER_SOCKET}" +else + warn "Could not locate Docker socket (Docker is running but socket path unknown)" +fi + +DOCKER_VERSION=$(docker version --format '{{.Server.Version}}' 2>/dev/null || echo "unknown") +info "Docker ${DOCKER_VERSION}" + +# ── Ollama ───────────────────────────────────────────────────────── + +step "3/5 Configuring local inference (Ollama)" + +OLLAMA_INSTALLED=false +OLLAMA_RUNNING=false + +if command -v ollama &>/dev/null; then + OLLAMA_INSTALLED=true + info "Ollama found: $(ollama --version 2>/dev/null || echo 'installed')" +else + info "Ollama not installed. Installing via Homebrew..." + if command -v brew &>/dev/null; then + brew install ollama 2>/dev/null && OLLAMA_INSTALLED=true || warn "Ollama install failed. Install manually: https://ollama.com" + else + warn "Homebrew not found. Install Ollama manually: https://ollama.com" + fi +fi + +if [ "$OLLAMA_INSTALLED" = true ]; then + # Check if Ollama is serving + if curl -s --max-time 3 http://localhost:11434/api/tags &>/dev/null; then + OLLAMA_RUNNING=true + info "Ollama is running on port 11434" + else + info "Starting Ollama service..." + OLLAMA_HOST=0.0.0.0:11434 ollama serve &>/dev/null & + sleep 3 + if curl -s --max-time 3 http://localhost:11434/api/tags &>/dev/null; then + OLLAMA_RUNNING=true + info "Ollama started successfully" + else + warn "Ollama failed to start. You can start it manually: OLLAMA_HOST=0.0.0.0:11434 ollama serve" + fi + fi + + # Suggest pulling a model if none present + if [ "$OLLAMA_RUNNING" = true ]; then + MODEL_COUNT=$(curl -s http://localhost:11434/api/tags 2>/dev/null | python3 -c "import json,sys; print(len(json.load(sys.stdin).get('models',[])))" 2>/dev/null || echo "0") + if [ "$MODEL_COUNT" = "0" ]; then + info "No models found. For local inference, pull a model:" + echo " ollama pull llama3.1:8b # 4.7GB, good balance" + echo " ollama pull qwen3:8b # 4.9GB, strong reasoning" + else + info "Ollama has ${MODEL_COUNT} model(s) available" + fi + fi +fi + +# ── OpenShell gateway ────────────────────────────────────────────── + +step "4/5 Validating OpenShell" + +if command -v openshell &>/dev/null; then + info "OpenShell CLI found: $(openshell --version 2>/dev/null || echo 'installed')" +else + warn "OpenShell CLI not found. Run 'nemoclaw onboard' to install it." +fi + +# ── Summary ──────────────────────────────────────────────────────── + +step "5/5 Apple Silicon setup complete" + +echo "" +echo " ┌─────────────────────────────────────────────────┐" +echo " │ NemoClaw — Apple Silicon Environment Summary │" +echo " ├─────────────────────────────────────────────────┤" +printf " │ %-20s %-27s │\n" "macOS:" "$(sw_vers -productVersion 2>/dev/null || echo 'unknown') (${ARCH})" +printf " │ %-20s %-27s │\n" "Docker:" "${DOCKER_VERSION}" +if [ "$OLLAMA_INSTALLED" = true ]; then + if [ "$OLLAMA_RUNNING" = true ]; then + printf " │ %-20s %-27s │\n" "Ollama:" "✅ Running (port 11434)" + else + printf " │ %-20s %-27s │\n" "Ollama:" "⚠️ Installed (not running)" + fi +else + printf " │ %-20s %-27s │\n" "Ollama:" "❌ Not installed" +fi +if command -v openshell &>/dev/null; then + printf " │ %-20s %-27s │\n" "OpenShell:" "✅ Installed" +else + printf " │ %-20s %-27s │\n" "OpenShell:" "❌ Not found" +fi +echo " └─────────────────────────────────────────────────┘" +echo "" + +info "Next steps:" +echo " 1. Run 'nemoclaw onboard' to create your first sandbox" +echo " 2. For local inference: ollama pull llama3.1:8b" +echo " 3. For cloud inference: get an API key at https://build.nvidia.com" +echo "" +info "Known macOS limitations:" +echo " • Ollama local inference may need DNS fix (inference.local → sandbox)" +echo " • Docker Model Runner bridge not yet supported" +echo " • See: https://github.com/NVIDIA/NemoClaw/issues/260" From 1b9eb87de43dc3b7d9456ddb812e87189a5585d6 Mon Sep 17 00:00:00 2001 From: Yajat Date: Tue, 17 Mar 2026 20:59:09 -0700 Subject: [PATCH 2/3] =?UTF-8?q?fix:=20address=20review=20feedback=20?= =?UTF-8?q?=E2=80=94=20Intel=20hard-fail,=20no=20python3=20dep,=20Ollama?= =?UTF-8?q?=20cleanup?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Intel Macs now fail with clear message instead of warning - Replaced python3 JSON parsing with grep (macOS 12.3+ has no Python) - Ollama startup prefers brew services (managed, survives reboots) - Added cleanup trap for background Ollama process - No orphaned processes on script failure/Ctrl-C --- scripts/setup-apple.sh | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/scripts/setup-apple.sh b/scripts/setup-apple.sh index f6f1688a2f..504ae6af55 100755 --- a/scripts/setup-apple.sh +++ b/scripts/setup-apple.sh @@ -41,7 +41,7 @@ fi ARCH="$(uname -m)" if [ "$ARCH" != "arm64" ] && [ "$ARCH" != "aarch64" ]; then - warn "Expected Apple Silicon (arm64) but detected ${ARCH}. Proceeding anyway." + fail "This script is for Apple Silicon Macs (arm64). Detected: ${ARCH}. Use 'nemoclaw onboard' for other platforms." fi step "1/5 Checking macOS environment" @@ -116,8 +116,21 @@ if [ "$OLLAMA_INSTALLED" = true ]; then OLLAMA_RUNNING=true info "Ollama is running on port 11434" else - info "Starting Ollama service..." - OLLAMA_HOST=0.0.0.0:11434 ollama serve &>/dev/null & + # Prefer brew services (managed, survives reboots) + if brew services list 2>/dev/null | grep -q "ollama.*started"; then + OLLAMA_RUNNING=true + info "Ollama managed by brew services (already running)" + else + info "Starting Ollama via brew services..." + if brew services start ollama &>/dev/null 2>&1; then + info "Ollama started via brew services" + else + info "brew services unavailable — starting directly..." + OLLAMA_HOST=0.0.0.0:11434 ollama serve &>/dev/null & + OLLAMA_BG_PID=$! + trap 'kill $OLLAMA_BG_PID 2>/dev/null || true' EXIT INT TERM + fi + fi sleep 3 if curl -s --max-time 3 http://localhost:11434/api/tags &>/dev/null; then OLLAMA_RUNNING=true @@ -129,7 +142,7 @@ if [ "$OLLAMA_INSTALLED" = true ]; then # Suggest pulling a model if none present if [ "$OLLAMA_RUNNING" = true ]; then - MODEL_COUNT=$(curl -s http://localhost:11434/api/tags 2>/dev/null | python3 -c "import json,sys; print(len(json.load(sys.stdin).get('models',[])))" 2>/dev/null || echo "0") + MODEL_COUNT=$(curl -s http://localhost:11434/api/tags 2>/dev/null | grep -co '"name"' 2>/dev/null || echo "0") if [ "$MODEL_COUNT" = "0" ]; then info "No models found. For local inference, pull a model:" echo " ollama pull llama3.1:8b # 4.7GB, good balance" From 37927817a0999c9a8565685e51685b7b8cf4c158 Mon Sep 17 00:00:00 2001 From: Yajat Date: Wed, 18 Mar 2026 11:52:25 -0700 Subject: [PATCH 3/3] =?UTF-8?q?fix:=20address=20CodeRabbit=20review=20?= =?UTF-8?q?=E2=80=94=20pipefail=20safety,=20trap=20cleanup,=20grep=20fallb?= =?UTF-8?q?ack?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Comment 2: Add || true to GPU core detection pipeline to prevent pipefail abort on VMs/headless Macs - Comment 3: Remove EXIT from trap — only kill Ollama on INT/TERM, not normal script completion - Comment 4: Disable pipefail for MODEL_COUNT grep to handle empty JSON gracefully --- scripts/setup-apple.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/setup-apple.sh b/scripts/setup-apple.sh index 504ae6af55..d3bc9e1ee1 100755 --- a/scripts/setup-apple.sh +++ b/scripts/setup-apple.sh @@ -50,7 +50,7 @@ step "1/5 Checking macOS environment" GPU_CORES="" UNIFIED_MEM="" if command -v system_profiler &>/dev/null; then - GPU_CORES=$(system_profiler SPDisplaysDataType 2>/dev/null | grep "Total Number of Cores" | awk -F': ' '{print $2}' | head -1) + GPU_CORES=$(system_profiler SPDisplaysDataType 2>/dev/null | grep "Total Number of Cores" | awk -F': ' '{print $2}' | head -1 || true) UNIFIED_MEM=$(sysctl -n hw.memsize 2>/dev/null | awk '{printf "%.0f", $1/1024/1024/1024}') fi info "macOS $(sw_vers -productVersion 2>/dev/null || echo 'unknown') on ${ARCH}" @@ -128,7 +128,7 @@ if [ "$OLLAMA_INSTALLED" = true ]; then info "brew services unavailable — starting directly..." OLLAMA_HOST=0.0.0.0:11434 ollama serve &>/dev/null & OLLAMA_BG_PID=$! - trap 'kill $OLLAMA_BG_PID 2>/dev/null || true' EXIT INT TERM + trap 'kill $OLLAMA_BG_PID 2>/dev/null || true' INT TERM fi fi sleep 3 @@ -142,7 +142,7 @@ if [ "$OLLAMA_INSTALLED" = true ]; then # Suggest pulling a model if none present if [ "$OLLAMA_RUNNING" = true ]; then - MODEL_COUNT=$(curl -s http://localhost:11434/api/tags 2>/dev/null | grep -co '"name"' 2>/dev/null || echo "0") + MODEL_COUNT=$(set +o pipefail; curl -s http://localhost:11434/api/tags 2>/dev/null | grep -co '"name"' 2>/dev/null || echo "0") if [ "$MODEL_COUNT" = "0" ]; then info "No models found. For local inference, pull a model:" echo " ollama pull llama3.1:8b # 4.7GB, good balance"